source: trunk/gsdl/perllib/plugins/HTMLPlug.pm@ 1231

Last change on this file since 1231 was 1231, checked in by gwp, 24 years ago

Bug fix on the H1 metadata option: if the file has no <H1> tag,
nothing is added.

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 15.9 KB
Line 
1###########################################################################
2#
3# HTMLPlug.pm -- basic html plugin
4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34# To use frames properly you'll need to use the WebPlug plugin.
35#
36
37
38package HTMLPlug;
39
40use BasPlug;
41use ghtml;
42use util;
43use parsargv;
44
45sub BEGIN {
46 @ISA = ('BasPlug');
47}
48
49use strict;
50
51sub print_usage {
52 print STDERR "\nIncorrect options passed to HTMLPlug, check your collect.cfg configuration file\n";
53
54 print STDERR "\n usage: plugin HTMLPlug [options]\n\n";
55 print STDERR " options:\n";
56 print STDERR " -process_exp A perl regular expression to match against filenames.\n";
57 print STDERR " Matching filenames will be processed by this plugin.\n";
58 print STDERR " Defaults to '(?i)\.html?\$' i.e. all documents ending in\n";
59 print STDERR " .htm or .html (case-insensitive).\n";
60 print STDERR " -nolinks Don't make any attempt to trap links (setting this flag may\n";
61 print STDERR " improve speed of building/importing but any relative links within\n";
62 print STDERR " documents will be broken).\n";
63 print STDERR " -block_exp Files matching this regular expression will be blocked from\n";
64 print STDERR " being passed to any further plugins in the list. By default\n";
65 print STDERR " HTMLPlug blocks any files with .gif, .jpg, .jpeg, .png, .pdf,\n";
66 print STDERR " .rtf or .css file extensions.\n";
67 print STDERR " -keep_head Don't remove headers from html files.\n";
68 print STDERR " -no_metadata Don't attempt to extract any metadata from files.\n";
69 print STDERR " -metadata_fields Comma separated list of metadata fields to attempt to extract.\n";
70 print STDERR " Defaults to 'Title'.\n";
71 print STDERR " Use `first200` to get the first 200 characters of the body.\n";
72 print STDERR " Use `H1` to get the text inside the first <H1> and </H1> tags in the text.\n";
73 print STDERR " -w3mir Set if w3mir was used to generate input file structure.\n";
74 print STDERR " w3mir \n";
75 print STDERR " -assoc_files Perl regular expression of file extensions to associate with\n";
76 print STDERR " html documents. Defaults to '(?i)\.(jpe?g|gif|png|css|pdf)$'\n";
77 print STDERR " -rename_assoc_files Renames files associated with documents (e.g. images). Also\n";
78 print STDERR " creates much shallower directory structure (useful when creating\n";
79 print STDERR " collections to go on cd-rom).\n\n";
80}
81
82sub new {
83 my $class = shift (@_);
84 my $self = new BasPlug (@_);
85
86 if (!parsargv::parse(\@_,
87 q^process_exp/.*/(?i)\.html?$^, \$self->{'process_exp'},
88 q^nolinks^, \$self->{'nolinks'},
89 q^block_exp/.*/(?i)\.(gif|jpe?g|png|pdf|rtf|css)$^, \$self->{'block_exp'},
90 q^keep_head^, \$self->{'keep_head'},
91 q^no_metadata^, \$self->{'no_metadata'},
92 q^metadata_fields/.*/Title^, \$self->{'metadata_fields'},
93 q^w3mir^, \$self->{'w3mir'},
94 q^assoc_files/.*/(?i)\.(jpe?g|gif|png|css|pdf)$^, \$self->{'assoc_files'},
95 q^rename_assoc_files^, \$self->{'rename_assoc_files'},
96 "allow_extra_options")) {
97
98 &print_usage();
99 die "\n";
100 }
101
102 $self->{'aux_files'} = {};
103 $self->{'dir_num'} = 0;
104 $self->{'file_num'} = 0;
105
106 return bless $self, $class;
107}
108
109sub is_recursive {
110 my $self = shift (@_);
111
112 return 0; # this is not a recursive plugin
113}
114
115# return number of files processed, undef if can't process
116# Note that $base_dir might be "" and that $file might
117# include directories
118sub read {
119 my $self = shift (@_);
120 my ($pluginfo, $base_dir, $file, $metadata, $processor) = @_;
121
122 my $filename = &util::filename_cat($base_dir, $file);
123 return 0 if $filename =~ /$self->{'block_exp'}/;
124 if ($filename !~ /$self->{'process_exp'}/ || !-f $filename) {
125 return undef;
126 }
127 $file =~ s/^[\/\\]+//;
128
129 $self->{'verbosity'} = $processor->{'verbosity'};
130 print STDERR "HTMLPlug: processing $file\n"
131 if $self->{'verbosity'} > 1;
132
133 # create a new document
134 my $doc_obj = new doc ($file, "indexed_doc");
135 my $cursection = $doc_obj->get_top_section();
136
137 # read in HTML file ($text will be in utf8)
138 my $text = "";
139 $self->read_file ($filename, \$text);
140
141 if ($text !~ /\w/) {
142 print STDERR "HTMLPlug: ERROR: $file contains no text\n" if $self->{'verbosity'};
143 return 0;
144 }
145
146 $self->extra_metadata ($doc_obj, $cursection, $metadata);
147 $self->extract_metadata (\$text, $metadata, $doc_obj, $cursection)
148 unless $self->{'no_metadata'};
149
150 # Store URL for page as metadata - this can be used for an
151 # altavista style search interface. The URL won't be valid
152 # unless the file structure contains the domain name (i.e.
153 # like when w3mir is used to download a website).
154 my $web_url = "http://$file";
155 $web_url =~ s/\\/\//g; # for windows
156 $doc_obj->add_utf8_metadata($cursection, "URL", $web_url);
157
158 # remove header and footer
159 if (!$self->{'keep_head'}) {
160 $text =~ s/^.*?<body[^>]*>//is;
161 $text =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
162 }
163
164 # trap links
165 if (!$self->{'nolinks'}) {
166
167 # usemap="./#index" not handled correctly => change to "#index"
168 $text =~ s/(<img[^>]*?usemap\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
169 $self->replace_usemap_links($1, $2, $3)/isge;
170
171 $text =~ s/(<(?:a|area|frame|link)\s+[^>]*?(?:href|src)\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
172 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
173 }
174
175 # trap images
176 $text =~ s/(<img[^>]*?src\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
177 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
178
179 $doc_obj->add_utf8_text ($cursection, $text);
180
181 # add an OID
182 $doc_obj->set_OID();
183
184 # process the document
185 $processor->process($doc_obj);
186
187 return 1; # processed the file
188}
189
190sub replace_images {
191 my $self = shift (@_);
192 my ($front, $link, $back, $base_dir,
193 $file, $doc_obj, $section) = @_;
194
195 $link =~ s/\n/ /g;
196
197 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
198 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
199}
200
201sub replace_href_links {
202 my $self = shift (@_);
203 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
204
205 # attempt to sort out targets - frames are not handled
206 # well in this plugin and some cases will screw things
207 # up - e.g. the _parent target (so we'll just remove
208 # them all ;-)
209 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
210 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
211 $front =~ s/target=\"?_parent\"?//is;
212 $back =~ s/target=\"?_parent\"?//is;
213
214 return $front . $link . $back if $link =~ /^\#/s;
215 $link =~ s/\n/ /g;
216
217 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
218
219 my ($filename) = $href =~ /^(?:.*?):(?:\/\/)?(.*)/;
220
221 ##### leave all these links alone (they won't be picked up by intermediate
222 ##### pages). I think that's safest when dealing with frames, targets etc.
223 ##### (at least until I think of a better way to do it). Problems occur with
224 ##### mailto links from within small frames, the intermediate page is displayed
225 ##### within that frame and can't be seen. There is still potential for this to
226 ##### happen even with html pages - the solution seems to be to somehow tell
227 ##### the browser from the server side to display the page being sent (i.e.
228 ##### the intermediate page) in the top level window - I'm not sure if that's
229 ##### possible - the following line should probably be deleted if that can be done
230 return $front . $link . $back if $href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/is;
231
232
233 if (($rl == 0) || ($filename =~ /$self->{'process_exp'}/) ||
234 ($href =~ /\/$/) || ($href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
235 &ghtml::urlsafe ($href);
236 return $front . "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part . $back;
237
238 } else {
239 # link is to some other type of file (image, pdf etc.) so we'll
240 # need to associate that file
241 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
242 }
243}
244
245sub add_file {
246 my $self = shift (@_);
247 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
248 my ($newname);
249
250 my $filename = $href;
251 $filename =~ s/^[^:]*:\/\///;
252 $filename = &util::filename_cat ($base_dir, $filename);
253 my ($ext) = $filename =~ /(\.[^\.]*)$/;
254
255 if ((!defined $ext) || ($ext !~ /$self->{'assoc_files'}/)) {
256 return "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part;
257 }
258
259 if ($self->{'rename_assoc_files'}) {
260 if (defined $self->{'aux_files'}->{$href}) {
261 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
262 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
263 } else {
264 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
265 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
266 $self->inc_filecount ();
267 }
268 $doc_obj->associate_file($filename, $newname, undef, $section);
269 return "_httpcollimg_/$newname";
270
271 } else {
272 ($newname) = $filename =~ /([^\/\\]*)$/;
273 $doc_obj->associate_file($filename, $newname, undef, $section);
274 return "_httpdocimg_/$newname";
275 }
276}
277
278
279sub format_link {
280 my $self = shift (@_);
281 my ($link, $base_dir, $file) = @_;
282
283 my ($before_hash, $hash_part) = $link =~ /^([^\#]*)(\#?.*)$/;
284 $hash_part = "" if !defined $hash_part;
285 if (!defined $before_hash || $before_hash !~ /[\w\.\/]/) {
286 print STDERR "HTMLPlug: ERROR - badly formatted tag ignored ($link)\n"
287 if $self->{'verbosity'};
288 return ($link, "", 0);
289 }
290
291 if ($before_hash =~ s/^((?:http|ftp|file):\/\/)//i) {
292 my $type = $1;
293
294 if ($link =~ /^(http|ftp):/i) {
295 # Turn url (using /) into file name (possibly using \ on windows)
296 my @http_dir_split = split('/', $before_hash);
297 $before_hash = &util::filename_cat(@http_dir_split);
298 }
299
300 $before_hash = $self->eval_dir_dots($before_hash);
301
302 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
303
304 my $rl = 0;
305 $rl = 1 if (-e $linkfilename);
306
307 # make sure there's a slash on the end if it's a directory
308 if ($before_hash !~ /\/$/) {
309 $before_hash .= "/" if (-d $linkfilename);
310 }
311
312 return ($type . $before_hash, $hash_part, $rl);
313
314 } elsif ($link !~ /^(mailto|news|gopher|nntp|telnet|javascript):/i) {
315
316 if ($before_hash =~ s/^\///) {
317 # the first directory will be the domain name if w3mir was used
318 # to generate archives, otherwise we'll assume all files are
319 # from the same site and base_dir is the root
320 if ($self->{'w3mir'}) {
321 my @dirs = split /[\/\\]/, $file;
322 my $domname = shift (@dirs);
323 $before_hash = &util::filename_cat($domname, $before_hash);
324 $before_hash =~ s/\\/\//g; # for windows
325 }
326
327 } else {
328 # Turn relative file path into full path
329 my $dirname = &File::Basename::dirname($file);
330 $before_hash = &util::filename_cat($dirname, $before_hash);
331 $before_hash = $self->eval_dir_dots($before_hash);
332 }
333
334 # make sure there's a slash on the end if it's a directory
335 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
336 if ($before_hash !~ /\/$/) {
337 $before_hash .= "/" if (-d $linkfilename);
338 }
339
340 return ("http://" . $before_hash, $hash_part, 1);
341
342 } else {
343 # mailto, news, nntp, telnet, javascript or gopher link
344 return ($before_hash, "", 0);
345 }
346}
347
348sub extract_metadata {
349 my $self = shift (@_);
350 my ($textref, $metadata, $doc_obj, $section) = @_;
351
352 foreach my $field (split /,/, $self->{'metadata_fields'}) {
353
354 # don't need to extract field if it was passed in from a previous
355 # (recursive) plugin
356 next if defined $metadata->{$field};
357
358 # see if there's a <meta> tag for this field
359 if ($$textref =~ /<meta(.*?)(?:name|http-equiv)\s*=\s*\"?$field\"?([^>]*)/is) {
360 my $content = $1 . $2;
361 if ($content =~ /content\s*=\s*\"?(.*?)\"?/is) {
362 if (defined $1) {
363 my $value = $1;
364 $value =~ s/\s+/ /gs;
365 $doc_obj->add_utf8_metadata($section, $field, $value);
366 next;
367 }
368 }
369 }
370
371 # TITLE: extract the document title
372
373 if ($field =~ /^title$/i) {
374
375 # see if there's a <title> tag
376 if ($$textref =~ /<title[^>]*>([^<]*)<\/title[^>]*>/is) {
377 if (defined $1) {
378 my $title = $1;
379 if ($title =~ /\w/) {
380 $title =~ s/\s+/ /gs;
381 $title =~ s/^\s+//;
382 $title =~ s/\s+$//;
383 $doc_obj->add_utf8_metadata ($section, $field, $title);
384 next;
385 }
386 }
387 }
388
389 # if no title use first 100 characters
390 my $tmptext = $$textref;
391 $tmptext =~ s/\s+/ /gs;
392 $tmptext =~ s/<[^>]*>//g;
393 $tmptext = substr ($tmptext, 0, 100);
394 $tmptext =~ s/^\s+//;
395 $tmptext =~ s/\s+$//;
396 $tmptext =~ s/\s\S*$/.../;
397 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
398 next;
399 }
400
401 # FIRST200: extract the first 200 characters as metadata
402
403 if ($field =~ /^first200$/i) {
404 my $tmptext = $$textref;
405 $tmptext =~ s/\s+/ /gs;
406 $tmptext =~ s/.*<body[^>]*>//i;
407 $tmptext =~ s/<[^>]*>//g;
408 $tmptext = substr ($tmptext, 0, 200);
409 $tmptext =~ s/^\s+//;
410 $tmptext =~ s/\s+$//;
411 $tmptext =~ s/\s\S*$/.../;
412 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
413 next;
414 }
415
416 # H1: extract the text between the first <H1> and </H1> tags
417 if ($field =~ /^H1$/i) {
418 my $tmptext = $$textref;
419 $tmptext =~ s/\s+/ /gs;
420 if ($tmptext =~ /<H1[^>]*>/i) {
421 $tmptext =~ s/.*<H1[^>]*>//i;
422 $tmptext =~ s/<\/H1[^>]*>.*//i;
423 $tmptext =~ s/^\s+//;
424 $tmptext =~ s/\s+$//;
425 $doc_obj->add_utf8_metadata ($section, $field, $tmptext);
426 }
427 next;
428 }
429 }
430}
431
432
433# evaluate any "../" to next directory up
434# evaluate any "./" as here
435sub eval_dir_dots {
436 my $self = shift (@_);
437 my ($filename) = @_;
438
439 my $dirsep_os = &util::get_os_dirsep();
440 my @dirsep = split(/$dirsep_os/,$filename);
441
442 my @eval_dirs = ();
443 foreach my $d (@dirsep) {
444 if ($d eq "..") {
445 pop(@eval_dirs);
446
447 } elsif ($d eq ".") {
448 # do nothing!
449
450 } else {
451 push(@eval_dirs,$d);
452 }
453 }
454
455 return &util::filename_cat(@eval_dirs);
456}
457
458sub replace_usemap_links {
459 my $self = shift (@_);
460 my ($front, $link, $back) = @_;
461
462 $link =~ s/^\.\///;
463 return $front . $link . $back;
464}
465
466sub inc_filecount {
467 my $self = shift (@_);
468
469 if ($self->{'file_num'} == 1000) {
470 $self->{'dir_num'} ++;
471 $self->{'file_num'} = 0;
472 } else {
473 $self->{'file_num'} ++;
474 }
475}
476
4771;
Note: See TracBrowser for help on using the repository browser.