source: trunk/gsdl/perllib/plugins/HTMLPlug.pm@ 965

Last change on this file since 965 was 965, checked in by sjboddie, 24 years ago

fixed bug - added assoc_files option

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 14.8 KB
RevLine 
[585]1###########################################################################
2#
3# HTMLPlug.pm -- basic html plugin
[808]4#
[585]5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
[808]27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34# To use frames properly you'll need to use the WebPlug plugin.
35#
[585]36
[808]37
[585]38package HTMLPlug;
39
40use BasPlug;
[721]41use html;
[585]42use util;
[808]43use parsargv;
[585]44
45sub BEGIN {
46 @ISA = ('BasPlug');
47}
48
[850]49use strict;
50
[808]51sub print_usage {
52 print STDERR "\nIncorrect options passed to HTMLPlug, check your collect.cfg configuration file\n";
53
54 print STDERR "\n usage: plugin HTMLPlug [options]\n\n";
55 print STDERR " options:\n";
56 print STDERR " -process_exp A perl regular expression to match against filenames.\n";
57 print STDERR " Matching filenames will be processed by this plugin.\n";
58 print STDERR " Defaults to '(?i)\.html?\$' i.e. all documents ending in\n";
59 print STDERR " .htm or .html (case-insensitive).\n";
60 print STDERR " -nolinks Don't make any attempt to trap links (setting this flag may\n";
61 print STDERR " improve speed of building/importing but any relative links within\n";
62 print STDERR " documents will be broken).\n";
63 print STDERR " -block_exp Files matching this regular expression will be blocked from\n";
64 print STDERR " being passed to any further plugins in the list. By default\n";
[897]65 print STDERR " HTMLPlug blocks any files with .gif, .jpg, .jpeg, .png, .pdf,\n";
66 print STDERR " .rtf or .css file extensions.\n";
[808]67 print STDERR " -keep_head Don't remove headers from html files.\n";
68 print STDERR " -no_metadata Don't attempt to extract any metadata from files.\n";
69 print STDERR " -metadata_fields Comma separated list of metadata fields to attempt to extract.\n";
70 print STDERR " Defaults to 'Title'\n";
[897]71 print STDERR " -w3mir Set if w3mir was used to generate input file structure.\n";
72 print STDERR " w3mir \n";
[965]73 print STDERR " -assoc_files Perl regular expression of file extensions to associate with\n";
74 print STDERR " html documents. Defaults to '(?i)\.(jpe?g|gif|png|css|pdf)$'\n";
[900]75 print STDERR " -rename_assoc_files Renames files associated with documents (e.g. images). Also\n";
76 print STDERR " creates much shallower directory structure (useful when creating\n";
77 print STDERR " collections to go on cd-rom).\n\n";
[808]78}
79
[585]80sub new {
[808]81 my $class = shift (@_);
[850]82 my $self = new BasPlug ();
[585]83
[808]84 if (!parsargv::parse(\@_,
85 q^process_exp/.*/(?i)\.html?$^, \$self->{'process_exp'},
86 q^nolinks^, \$self->{'nolinks'},
[897]87 q^block_exp/.*/(?i)\.(gif|jpe?g|png|pdf|rtf|css)$^, \$self->{'block_exp'},
[808]88 q^keep_head^, \$self->{'keep_head'},
89 q^no_metadata^, \$self->{'no_metadata'},
90 q^metadata_fields/.*/Title^, \$self->{'metadata_fields'},
[900]91 q^w3mir^, \$self->{'w3mir'},
[965]92 q^assoc_files/.*/(?i)\.(jpe?g|gif|png|css|pdf)$^, \$self->{'assoc_files'},
[900]93 q^rename_assoc_files^, \$self->{'rename_assoc_files'})) {
[808]94 &print_usage();
95 die "\n";
96 }
[721]97
[808]98 $self->{'aux_files'} = {};
99 $self->{'dir_num'} = 0;
100 $self->{'file_num'} = 0;
101
[585]102 return bless $self, $class;
103}
104
105sub is_recursive {
106 my $self = shift (@_);
107
108 return 0; # this is not a recursive plugin
109}
110
[808]111# return number of files processed, undef if can't process
112# Note that $base_dir might be "" and that $file might
113# include directories
114sub read {
115 my $self = shift (@_);
116 my ($pluginfo, $base_dir, $file, $metadata, $processor) = @_;
[585]117
[808]118 my $filename = &util::filename_cat($base_dir, $file);
119 return 0 if $filename =~ /$self->{'block_exp'}/;
120 if ($filename !~ /$self->{'process_exp'}/ || !-f $filename) {
121 return undef;
122 }
123 $file =~ s/^[\/\\]+//;
[721]124
[808]125 $self->{'verbosity'} = $processor->{'verbosity'};
126 print STDERR "HTMLPlug: processing $file\n"
127 if $self->{'verbosity'} > 1;
[721]128
[808]129 # create a new document
130 my $doc_obj = new doc ($file, "indexed_doc");
131 my $cursection = $doc_obj->get_top_section();
132
133 # read in HTML file
134 open (FILE, $filename) || die "HTMLPlug::read - can't open $filename\n";
135 undef $/;
136 my $text = <FILE>;
137 $/ = "\n";
138 close FILE;
[965]139 if (!defined $text || $text !~ /\w/) {
[808]140 print STDERR "HTMLPlug: ERROR: $file contains no text\n" if $self->{'verbosity'};
141 return 0;
[721]142 }
143
[850]144 $self->extra_metadata ($doc_obj, $cursection, $metadata);
[808]145 $self->extract_metadata (\$text, $metadata, $doc_obj, $cursection)
146 unless $self->{'no_metadata'};
[721]147
[808]148 # Store URL for page as metadata - this can be used for an
149 # altavista style search interface. The URL won't be valid
150 # unless the file structure contains the domain name (i.e.
151 # like when w3mir is used to download a website).
152 my $web_url = "http://$file";
153 $web_url =~ s/\\/\//g; # for windows
154 $doc_obj->add_metadata($cursection, "URL", $web_url);
[721]155
[808]156 # remove header and footer
157 if (!$self->{'keep_head'}) {
158 $text =~ s/^.*?<body[^>]*>//is;
159 $text =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
160 }
[721]161
[808]162 # trap links
163 if (!$self->{'nolinks'}) {
[721]164
[808]165 # usemap="./#index" not handled correctly => change to "#index"
166 $text =~ s/(<img[^>]*?usemap\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
167 $self->replace_usemap_links($1, $2, $3)/isge;
[721]168
[897]169 $text =~ s/(<(?:a|area|frame|link)\s+[^>]*?(?:href|src)\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
170 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[721]171 }
172
[808]173 # trap images
174 $text =~ s/(<img[^>]*?src\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
[897]175 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[721]176
[808]177 $doc_obj->add_text ($cursection, $text);
[721]178
[808]179 # add an OID
180 $doc_obj->set_OID();
[721]181
[808]182 # process the document
183 $processor->process($doc_obj);
[721]184
[808]185 return 1; # processed the file
[721]186}
187
[808]188sub replace_images {
189 my $self = shift (@_);
[897]190 my ($front, $link, $back, $base_dir,
191 $file, $doc_obj, $section) = @_;
[808]192
193 $link =~ s/\n/ /g;
194
195 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
[965]196 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
[721]197}
198
[808]199sub replace_href_links {
[585]200 my $self = shift (@_);
[897]201 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
[585]202
[808]203 # attempt to sort out targets - frames are not handled
204 # well in this plugin and some cases will screw things
205 # up - e.g. the _parent target (so we'll just remove
206 # them all ;-)
207 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
208 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
209 $front =~ s/target=\"?_parent\"?//is;
210 $back =~ s/target=\"?_parent\"?//is;
[721]211
[808]212 return $front . $link . $back if $link =~ /^\#/s;
213 $link =~ s/\n/ /g;
[721]214
[808]215 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
[585]216
[850]217 my ($filename) = $href =~ /^(?:.*?):(?:\/\/)?(.*)/;
[897]218
219 ##### leave all these links alone (they won't be picked up by intermediate
220 ##### pages). I think that's safest when dealing with frames, targets etc.
221 ##### (at least until I think of a better way to do it). Problems occur with
222 ##### mailto links from within small frames, the intermediate page is displayed
223 ##### within that frame and can't be seen. There is still potential for this to
224 ##### happen even with html pages - the solution seems to be to somehow tell
225 ##### the browser from the server side to display the page being sent (i.e.
226 ##### the intermediate page) in the top level window - I'm not sure if that's
227 ##### possible - the following line should probably be deleted if that can be done
228 return $front . $link . $back if $href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/is;
229
230
[850]231 if (($rl == 0) || ($filename =~ /$self->{'process_exp'}/) ||
[808]232 ($href =~ /\/$/) || ($href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
[897]233 &html::urlsafe ($href);
234 return $front . "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part . $back;
[808]235
236 } else {
237 # link is to some other type of file (image, pdf etc.) so we'll
238 # need to associate that file
[965]239 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
[721]240 }
[808]241}
[721]242
[808]243sub add_file {
244 my $self = shift (@_);
[965]245 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
[808]246 my ($newname);
[585]247
[808]248 my $filename = $href;
249 $filename =~ s/^[^:]*:\/\///;
250 $filename = &util::filename_cat ($base_dir, $filename);
251 my ($ext) = $filename =~ /(\.[^\.]*)$/;
[965]252
253 if ((!defined $ext) || ($ext !~ /$self->{'assoc_files'}/)) {
254 return "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part;
255 }
256
[900]257 if ($self->{'rename_assoc_files'}) {
258 if (defined $self->{'aux_files'}->{$href}) {
259 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
260 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
261 } else {
262 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
[965]263 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
[900]264 $self->inc_filecount ();
265 }
266 $doc_obj->associate_file($filename, $newname, undef, $section);
267 return "_httpcollimg_/$newname";
268
[585]269 } else {
[900]270 ($newname) = $filename =~ /([^\/\\]*)$/;
271 $doc_obj->associate_file($filename, $newname, undef, $section);
272 return "_httpcollimg_/_thisOID_/$newname";
[585]273 }
[808]274}
[585]275
[721]276
[808]277sub format_link {
278 my $self = shift (@_);
279 my ($link, $base_dir, $file) = @_;
[585]280
[808]281 my ($before_hash, $hash_part) = $link =~ /^([^\#]*)(\#?.*)$/;
282 $hash_part = "" if !defined $hash_part;
283 if (!defined $before_hash || $before_hash !~ /[\w\.\/]/) {
[850]284 print STDERR "HTMLPlug: ERROR - badly formatted tag ignored ($link)\n"
[808]285 if $self->{'verbosity'};
286 return ($link, "", 0);
[732]287 }
[808]288
289 if ($before_hash =~ s/^((?:http|ftp|file):\/\/)//i) {
290 my $type = $1;
291
292 if ($link =~ /^(http|ftp):/i) {
293 # Turn url (using /) into file name (possibly using \ on windows)
294 my @http_dir_split = split('/', $before_hash);
295 $before_hash = &util::filename_cat(@http_dir_split);
[585]296 }
297
[808]298 $before_hash = $self->eval_dir_dots($before_hash);
299
300 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
[617]301
[808]302 my $rl = 0;
303 $rl = 1 if (-e $linkfilename);
[585]304
[808]305 # make sure there's a slash on the end if it's a directory
306 if ($before_hash !~ /\/$/) {
307 $before_hash .= "/" if (-d $linkfilename);
308 }
[585]309
[808]310 return ($type . $before_hash, $hash_part, $rl);
[721]311
[808]312 } elsif ($link !~ /^(mailto|news|gopher|nntp|telnet|javascript):/i) {
[721]313
[808]314 if ($before_hash =~ s/^\///) {
315 # the first directory will be the domain name if w3mir was used
316 # to generate archives, otherwise we'll assume all files are
317 # from the same site and base_dir is the root
318 if ($self->{'w3mir'}) {
319 my @dirs = split /[\/\\]/, $file;
320 my $domname = shift (@dirs);
321 $before_hash = &util::filename_cat($domname, $before_hash);
322 $before_hash =~ s/\\/\//g; # for windows
323 }
[721]324
[808]325 } else {
326 # Turn relative file path into full path
327 my $dirname = &File::Basename::dirname($file);
328 $before_hash = &util::filename_cat($dirname, $before_hash);
329 $before_hash = $self->eval_dir_dots($before_hash);
330 }
[721]331
[808]332 # make sure there's a slash on the end if it's a directory
333 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
334 if ($before_hash !~ /\/$/) {
335 $before_hash .= "/" if (-d $linkfilename);
336 }
[721]337
[808]338 return ("http://" . $before_hash, $hash_part, 1);
[721]339
[808]340 } else {
341 # mailto, news, nntp, telnet, javascript or gopher link
342 return ($before_hash, "", 0);
343 }
344}
[721]345
[808]346sub extract_metadata {
347 my $self = shift (@_);
348 my ($textref, $metadata, $doc_obj, $section) = @_;
[585]349
[850]350 foreach my $field (split /,/, $self->{'metadata_fields'}) {
[808]351
352 # don't need to extract field if it was passed in from a previous
353 # (recursive) plugin
354 next if defined $metadata->{$field};
[721]355
[808]356 # see if there's a <meta> tag for this field
357 if ($$textref =~ /<meta(.*?)(?:name|http-equiv)\s*=\s*\"?$field\"?([^>]*)/is) {
358 my $content = $1 . $2;
359 if ($content =~ /content\s*=\s*\"?(.*?)\"?/is) {
360 if (defined $1) {
361 my $value = $1;
362 $value =~ s/\s+/ /gs;
363 $doc_obj->add_metadata($section, $field, $value);
364 next;
365 }
366 }
367 }
368
369 # special case for Title metadata - try <title> tags
370 # then first 100 characters of text
371
372 if ($field =~ /^title$/i) {
[721]373
[808]374 # see if there's a <title> tag
375 if ($$textref =~ /<title[^>]*>([^<]*)<\/title[^>]*>/is) {
376 if (defined $1) {
377 my $title = $1;
378 if ($title =~ /\w/) {
379 $title =~ s/\s+/ /gs;
380 $doc_obj->add_metadata ($section, $field, $title);
381 next;
382 }
383 }
384 }
385
386 # if no title use first 100 characters
387 my $tmptext = $$textref;
388 $tmptext =~ s/<[^>]*>//g;
389 my $title = substr ($tmptext, 0, 100);
390 $title =~ s/\s+/ /gs;
391 $doc_obj->add_metadata ($section, $field, $title);
392 }
393 }
[585]394}
395
[808]396# evaluate any "../" to next directory up
397# evaluate any "./" as here
398sub eval_dir_dots {
399 my $self = shift (@_);
400 my ($filename) = @_;
[585]401
[721]402 my $dirsep_os = &util::get_os_dirsep();
403 my @dirsep = split(/$dirsep_os/,$filename);
404
405 my @eval_dirs = ();
[850]406 foreach my $d (@dirsep) {
[808]407 if ($d eq "..") {
[721]408 pop(@eval_dirs);
[808]409
410 } elsif ($d eq ".") {
[721]411 # do nothing!
[808]412
413 } else {
[721]414 push(@eval_dirs,$d);
415 }
[585]416 }
417
[721]418 return &util::filename_cat(@eval_dirs);
419}
420
[808]421sub replace_usemap_links {
422 my $self = shift (@_);
[721]423 my ($front, $link, $back) = @_;
424
425 $link =~ s/^\.\///;
[808]426 return $front . $link . $back;
[721]427}
428
[808]429sub inc_filecount {
430 my $self = shift (@_);
[721]431
[808]432 if ($self->{'file_num'} == 1000) {
433 $self->{'dir_num'} ++;
434 $self->{'file_num'} = 0;
435 } else {
436 $self->{'file_num'} ++;
437 }
438}
[721]439
[585]4401;
Note: See TracBrowser for help on using the repository browser.