source: trunk/gsdl/perllib/plugins/HTMLPlug.pm@ 1431

Last change on this file since 1431 was 1431, checked in by sjboddie, 24 years ago

Made a few minor adjustments to perl building code for use with
collectoraction

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 15.4 KB
Line 
1###########################################################################
2#
3# HTMLPlug.pm -- basic html plugin
4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34# To use frames properly you'll need to use the WebPlug plugin.
35#
36
37
38package HTMLPlug;
39
40use ConvertToBasPlug;
41use ghtml;
42use util;
43use parsargv;
44
45sub BEGIN {
46 @ISA = ('ConvertToBasPlug');
47}
48
49sub print_usage {
50 print STDERR "\n usage: plugin HTMLPlug [options]\n\n";
51 print STDERR " options:\n";
52 print STDERR " -nolinks Don't make any attempt to trap links (setting this flag may\n";
53 print STDERR " improve speed of building/importing but any relative links within\n";
54 print STDERR " documents will be broken).\n";
55 print STDERR " -keep_head Don't remove headers from html files.\n";
56 print STDERR " -no_metadata Don't attempt to extract any metadata from files.\n";
57 print STDERR " -metadata_fields Comma separated list of metadata fields to attempt to extract.\n";
58 print STDERR " Defaults to 'Title'.\n";
59 print STDERR " Use `first200` to get the first 200 characters of the body.\n";
60 print STDERR " Use `H1` to get the text inside the first <H1> and </H1> tags in the text.\n";
61 print STDERR " -w3mir Set if w3mir was used to generate input file structure.\n";
62 print STDERR " -assoc_files Perl regular expression of file extensions to associate with\n";
63 print STDERR " html documents. Defaults to '(?i)\.(jpe?g|gif|png|css|pdf)\$'\n";
64 print STDERR " -rename_assoc_files Renames files associated with documents (e.g. images). Also\n";
65 print STDERR " creates much shallower directory structure (useful when creating\n";
66 print STDERR " collections to go on cd-rom).\n\n";
67 print STDERR " -title_sub Substitution expression to modify string stored as Title.\n";
68 print STDERR " Used by, for example, PDFHtml to remove Page 1 etc from text\n";
69 print STDERR " chosen to be used as the title.\n";
70}
71
72sub new {
73 my $class = shift (@_);
74 my $self = new ConvertToBasPlug ($class, @_);
75
76 if (!parsargv::parse(\@_,
77 q^nolinks^, \$self->{'nolinks'},
78 q^keep_head^, \$self->{'keep_head'},
79 q^no_metadata^, \$self->{'no_metadata'},
80 q^metadata_fields/.*/Title^, \$self->{'metadata_fields'},
81 q^w3mir^, \$self->{'w3mir'},
82 q^assoc_files/.*/(?i)\.(jpe?g|gif|png|css|pdf)$^, \$self->{'assoc_files'},
83 q^rename_assoc_files^, \$self->{'rename_assoc_files'},
84 q^title_sub/.*/^, \$self->{'title_sub'},
85 "allow_extra_options")) {
86
87 print STDERR "\nIncorrect options passed to HTMLPlug, check your collect.cfg configuration file\n";
88 &print_usage();
89 die "\n";
90 }
91
92 $self->{'aux_files'} = {};
93 $self->{'dir_num'} = 0;
94 $self->{'file_num'} = 0;
95
96 $self->{'convert_to'} = "HTML";
97 $self->{'convert_to_ext'} = "html";
98
99 return bless $self, $class;
100}
101
102
103sub get_default_block_exp {
104 my $self = shift (@_);
105
106 return q^(?i)\.(gif|jpe?g|png|pdf|rtf|css)$^;
107}
108
109sub get_default_process_exp {
110 my $self = shift (@_);
111
112 # the last option is an attempt to encode the concept of an html query ...
113 return q^(?i)(\.html?|\.shtml|\.shm|\.asp|\.php|\.cgi|.+\?.+=.*)$^;
114}
115
116
117# do plugin specific processing of doc_obj
118sub process {
119 my $self = shift (@_);
120 my ($textref, $pluginfo, $base_dir, $file, $metadata, $doc_obj) = @_;
121 my $outhandle = $self->{'outhandle'};
122
123 print $outhandle "HTMLPlug: processing $file\n"
124 if $self->{'verbosity'} > 1;
125
126 my $cursection = $doc_obj->get_top_section();
127
128 $self->extract_metadata ($textref, $metadata, $doc_obj, $cursection)
129 unless $self->{'no_metadata'};
130
131 # Store URL for page as metadata - this can be used for an
132 # altavista style search interface. The URL won't be valid
133 # unless the file structure contains the domain name (i.e.
134 # like when w3mir is used to download a website).
135 my $web_url = "http://$file";
136 $web_url =~ s/\\/\//g; # for windows
137 $doc_obj->add_utf8_metadata($cursection, "URL", $web_url);
138
139 # remove header and footer
140 if (!$self->{'keep_head'}) {
141 $$textref =~ s/^.*?<body[^>]*>//is;
142 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
143 }
144
145 # trap links
146 if (!$self->{'nolinks'}) {
147
148 # usemap="./#index" not handled correctly => change to "#index"
149 $$textref =~ s/(<img[^>]*?usemap\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
150 $self->replace_usemap_links($1, $2, $3)/isge;
151
152 $$textref =~ s/(<(?:a|area|frame|link)\s+[^>]*?\s*(?:href|src)\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
153 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
154 }
155
156 # trap images
157 $$textref =~ s/(<img[^>]*? src\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
158 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
159
160 # add text to document object
161 $doc_obj->add_utf8_text($cursection, $$textref);
162
163 return 1;
164}
165
166sub replace_images {
167 my $self = shift (@_);
168 my ($front, $link, $back, $base_dir,
169 $file, $doc_obj, $section) = @_;
170
171 $link =~ s/\n/ /g;
172
173 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
174 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
175}
176
177sub replace_href_links {
178 my $self = shift (@_);
179 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
180
181 # attempt to sort out targets - frames are not handled
182 # well in this plugin and some cases will screw things
183 # up - e.g. the _parent target (so we'll just remove
184 # them all ;-)
185 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
186 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
187 $front =~ s/target=\"?_parent\"?//is;
188 $back =~ s/target=\"?_parent\"?//is;
189
190 return $front . $link . $back if $link =~ /^\#/s;
191 $link =~ s/\n/ /g;
192
193 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
194 # href may use '\'s where '/'s should be on Windows
195 $href =~ s/\\/\//g;
196
197 my ($filename) = $href =~ /^(?:.*?):(?:\/\/)?(.*)/;
198
199 ##### leave all these links alone (they won't be picked up by intermediate
200 ##### pages). I think that's safest when dealing with frames, targets etc.
201 ##### (at least until I think of a better way to do it). Problems occur with
202 ##### mailto links from within small frames, the intermediate page is displayed
203 ##### within that frame and can't be seen. There is still potential for this to
204 ##### happen even with html pages - the solution seems to be to somehow tell
205 ##### the browser from the server side to display the page being sent (i.e.
206 ##### the intermediate page) in the top level window - I'm not sure if that's
207 ##### possible - the following line should probably be deleted if that can be done
208 return $front . $link . $back if $href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/is;
209
210
211 if (($rl == 0) || ($filename =~ /$self->{'process_exp'}/) ||
212 ($href =~ /\/$/) || ($href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
213 &ghtml::urlsafe ($href);
214 return $front . "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part . $back;
215
216 } else {
217 # link is to some other type of file (image, pdf etc.) so we'll
218 # need to associate that file
219 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
220 }
221}
222
223sub add_file {
224 my $self = shift (@_);
225 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
226 my ($newname);
227
228 my $filename = $href;
229 $filename =~ s/^[^:]*:\/\///;
230 $filename = &util::filename_cat($base_dir, $filename);
231
232 my ($ext) = $filename =~ /(\.[^\.]*)$/;
233
234 if ((!defined $ext) || ($ext !~ /$self->{'assoc_files'}/)) {
235 return "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part;
236 }
237
238 if ($self->{'rename_assoc_files'}) {
239 if (defined $self->{'aux_files'}->{$href}) {
240 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
241 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
242 } else {
243 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
244 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
245 $self->inc_filecount ();
246 }
247 $doc_obj->associate_file($filename, $newname, undef, $section);
248 return "_httpcollimg_/$newname";
249
250 } else {
251 ($newname) = $filename =~ /([^\/\\]*)$/;
252 $doc_obj->associate_file($filename, $newname, undef, $section);
253 return "_httpdocimg_/$newname";
254 }
255}
256
257
258sub format_link {
259 my $self = shift (@_);
260 my ($link, $base_dir, $file) = @_;
261
262 my ($before_hash, $hash_part) = $link =~ /^([^\#]*)(\#?.*)$/;
263 $hash_part = "" if !defined $hash_part;
264 if (!defined $before_hash || $before_hash !~ /[\w\.\/]/) {
265 my $outhandle = $self->{'outhandle'};
266 print $outhandle "HTMLPlug: ERROR - badly formatted tag ignored ($link)\n"
267 if $self->{'verbosity'};
268 return ($link, "", 0);
269 }
270
271 if ($before_hash =~ s/^((?:http|ftp|file):\/\/)//i) {
272 my $type = $1;
273
274 if ($link =~ /^(http|ftp):/i) {
275 # Turn url (using /) into file name (possibly using \ on windows)
276 my @http_dir_split = split('/', $before_hash);
277 $before_hash = &util::filename_cat(@http_dir_split);
278 }
279
280 $before_hash = $self->eval_dir_dots($before_hash);
281
282 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
283
284 my $rl = 0;
285 $rl = 1 if (-e $linkfilename);
286
287 # make sure there's a slash on the end if it's a directory
288 if ($before_hash !~ /\/$/) {
289 $before_hash .= "/" if (-d $linkfilename);
290 }
291
292 return ($type . $before_hash, $hash_part, $rl);
293
294 } elsif ($link !~ /^(mailto|news|gopher|nntp|telnet|javascript):/i) {
295
296 if ($before_hash =~ s/^\///) {
297 # the first directory will be the domain name if w3mir was used
298 # to generate archives, otherwise we'll assume all files are
299 # from the same site and base_dir is the root
300 if ($self->{'w3mir'}) {
301 my @dirs = split /[\/\\]/, $file;
302 my $domname = shift (@dirs);
303 $before_hash = &util::filename_cat($domname, $before_hash);
304 $before_hash =~ s/\\/\//g; # for windows
305 }
306 else
307 {
308 # see if link shares directory with source document
309 # => turn into relative link if this is so!
310 my $before_hash_fix = &util::filename_cat("",$before_hash);
311 $before_hash_fix =~ s/^$base_dir(\\|\/)//;
312 $before_hash = $before_hash_fix;
313
314 }
315
316
317 } else {
318 # Turn relative file path into full path
319 my $dirname = &File::Basename::dirname($file);
320 $before_hash = &util::filename_cat($dirname, $before_hash);
321 $before_hash = $self->eval_dir_dots($before_hash);
322 }
323
324
325 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
326
327 # make sure there's a slash on the end if it's a directory
328 if ($before_hash !~ /\/$/) {
329 $before_hash .= "/" if (-d $linkfilename);
330 }
331
332 return ("http://" . $before_hash, $hash_part, 1);
333
334 } else {
335 # mailto, news, nntp, telnet, javascript or gopher link
336 return ($before_hash, "", 0);
337 }
338}
339
340sub extract_metadata {
341 my $self = shift (@_);
342 my ($textref, $metadata, $doc_obj, $section) = @_;
343
344 return if (!defined $self->{'metadata_fields'});
345
346 foreach my $field (split /,/, $self->{'metadata_fields'}) {
347
348 # don't need to extract field if it was passed in from a previous
349 # (recursive) plugin
350 next if defined $metadata->{$field};
351
352 # see if there's a <meta> tag for this field
353 if ($$textref =~ /<meta(.*?)(?:name|http-equiv)\s*=\s*\"?$field\"?([^>]*)/is) {
354 my $content = $1 . $2;
355 if ($content =~ /content\s*=\s*\"?(.*?)\"?/is) {
356 if (defined $1) {
357 my $value = $1;
358 $value =~ s/\s+/ /gs;
359 $doc_obj->add_utf8_metadata($section, $field, $value);
360 next;
361 }
362 }
363 }
364
365 # TITLE: extract the document title
366
367 if ($field =~ /^title$/i) {
368
369 # see if there's a <title> tag
370 if ($$textref =~ /<title[^>]*>([^<]*)<\/title[^>]*>/is) {
371 if (defined $1) {
372 my $title = $1;
373 if ($title =~ /\w/) {
374 $title =~ s/\s+/ /gs;
375 $title =~ s/^\s+//;
376 $title =~ s/\s+$//;
377 $doc_obj->add_utf8_metadata ($section, $field, $title);
378 next;
379 }
380 }
381 }
382
383 # if no title use first 100 characters
384 my $tmptext = $$textref;
385 $tmptext =~ s/\s+/ /gs;
386 $tmptext =~ s/$self->{'title_sub'}// if (defined $self->{'title_sub'});
387 $tmptext =~ s/<[^>]*>//g;
388 $tmptext = substr ($tmptext, 0, 100);
389 $tmptext =~ s/^\s+//;
390 $tmptext =~ s/\s+$//;
391 $tmptext =~ s/\s\S*$/.../;
392 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
393 next;
394 }
395
396 # FIRST200: extract the first 200 characters as metadata
397
398 if ($field =~ /^first200$/i) {
399 my $tmptext = $$textref;
400 $tmptext =~ s/\s+/ /gs;
401 $tmptext =~ s/.*<body[^>]*>//i;
402 $tmptext =~ s/$self->{'title_sub'}// if (defined $self->{'title_sub'});
403 $tmptext =~ s/<[^>]*>//g;
404 $tmptext = substr ($tmptext, 0, 200);
405 $tmptext =~ s/^\s+//;
406 $tmptext =~ s/\s+$//;
407 $tmptext =~ s/\s\S*$/.../;
408 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
409 next;
410 }
411
412 # H1: extract the text between the first <H1> and </H1> tags
413 if ($field =~ /^H1$/i) {
414 my $tmptext = $$textref;
415 $tmptext =~ s/\s+/ /gs;
416 if ($tmptext =~ /<H1[^>]*>/i) {
417 $tmptext =~ s/.*<H1[^>]*>//i;
418 $tmptext =~ s/<\/H1[^>]*>.*//i;
419 $tmptext =~ s/^\s+//;
420 $tmptext =~ s/\s+$//;
421 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
422 }
423 next;
424 }
425 }
426}
427
428
429# evaluate any "../" to next directory up
430# evaluate any "./" as here
431sub eval_dir_dots {
432 my $self = shift (@_);
433 my ($filename) = @_;
434
435 my $dirsep_os = &util::get_os_dirsep();
436 my @dirsep = split(/$dirsep_os/,$filename);
437
438 my @eval_dirs = ();
439 foreach my $d (@dirsep) {
440 if ($d eq "..") {
441 pop(@eval_dirs);
442
443 } elsif ($d eq ".") {
444 # do nothing!
445
446 } else {
447 push(@eval_dirs,$d);
448 }
449 }
450
451 return &util::filename_cat(@eval_dirs);
452}
453
454sub replace_usemap_links {
455 my $self = shift (@_);
456 my ($front, $link, $back) = @_;
457
458 $link =~ s/^\.\///;
459 return $front . $link . $back;
460}
461
462sub inc_filecount {
463 my $self = shift (@_);
464
465 if ($self->{'file_num'} == 1000) {
466 $self->{'dir_num'} ++;
467 $self->{'file_num'} = 0;
468 } else {
469 $self->{'file_num'} ++;
470 }
471}
472
4731;
Note: See TracBrowser for help on using the repository browser.