source: trunk/gsdl/perllib/plugins/HTMLPlug.pm@ 847

Last change on this file since 847 was 808, checked in by sjboddie, 25 years ago

New html plugin with options

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 13.1 KB
Line 
1###########################################################################
2#
3# HTMLPlug.pm -- basic html plugin
4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34# To use frames properly you'll need to use the WebPlug plugin.
35#
36
37
38package HTMLPlug;
39
40use BasPlug;
41use html;
42use util;
43use parsargv;
44
45sub BEGIN {
46 @ISA = ('BasPlug');
47}
48
49sub print_usage {
50 print STDERR "\nIncorrect options passed to HTMLPlug, check your collect.cfg configuration file\n";
51
52 print STDERR "\n usage: plugin HTMLPlug [options]\n\n";
53 print STDERR " options:\n";
54 print STDERR " -process_exp A perl regular expression to match against filenames.\n";
55 print STDERR " Matching filenames will be processed by this plugin.\n";
56 print STDERR " Defaults to '(?i)\.html?\$' i.e. all documents ending in\n";
57 print STDERR " .htm or .html (case-insensitive).\n";
58 print STDERR " -nolinks Don't make any attempt to trap links (setting this flag may\n";
59 print STDERR " improve speed of building/importing but any relative links within\n";
60 print STDERR " documents will be broken).\n";
61 print STDERR " -block_exp Files matching this regular expression will be blocked from\n";
62 print STDERR " being passed to any further plugins in the list. By default\n";
63 print STDERR " HTMLPlug blocks any files with .gif, .jpg, .jpeg, .png, .pdf\n";
64 print STDERR " or .rtf file extensions.\n";
65 print STDERR " -keep_head Don't remove headers from html files.\n";
66 print STDERR " -no_metadata Don't attempt to extract any metadata from files.\n";
67 print STDERR " -metadata_fields Comma separated list of metadata fields to attempt to extract.\n";
68 print STDERR " Defaults to 'Title'\n";
69 print STDERR " -w3mir Set if w3mir was used to generate input file structure.\n\n";
70}
71
72sub new {
73 my $class = shift (@_);
74 $self = new BasPlug ();
75
76 if (!parsargv::parse(\@_,
77 q^process_exp/.*/(?i)\.html?$^, \$self->{'process_exp'},
78 q^nolinks^, \$self->{'nolinks'},
79 q^block_exp/.*/(?i)\.(gif|jpe?g|png|pdf|rtf)$^, \$self->{'block_exp'},
80 q^keep_head^, \$self->{'keep_head'},
81 q^no_metadata^, \$self->{'no_metadata'},
82 q^metadata_fields/.*/Title^, \$self->{'metadata_fields'},
83 q^w3mir^, \$self->{'w3mir'})) {
84 &print_usage();
85 die "\n";
86 }
87
88 $self->{'aux_files'} = {};
89 $self->{'dir_num'} = 0;
90 $self->{'file_num'} = 0;
91
92 return bless $self, $class;
93}
94
95sub is_recursive {
96 my $self = shift (@_);
97
98 return 0; # this is not a recursive plugin
99}
100
101# return number of files processed, undef if can't process
102# Note that $base_dir might be "" and that $file might
103# include directories
104sub read {
105 my $self = shift (@_);
106 my ($pluginfo, $base_dir, $file, $metadata, $processor) = @_;
107
108 my $filename = &util::filename_cat($base_dir, $file);
109 return 0 if $filename =~ /$self->{'block_exp'}/;
110 if ($filename !~ /$self->{'process_exp'}/ || !-f $filename) {
111 return undef;
112 }
113 $file =~ s/^[\/\\]+//;
114
115 $self->{'verbosity'} = $processor->{'verbosity'};
116 print STDERR "HTMLPlug: processing $file\n"
117 if $self->{'verbosity'} > 1;
118
119 # create a new document
120 my $doc_obj = new doc ($file, "indexed_doc");
121 my $cursection = $doc_obj->get_top_section();
122
123 # read in HTML file
124 open (FILE, $filename) || die "HTMLPlug::read - can't open $filename\n";
125 undef $/;
126 my $text = <FILE>;
127 $/ = "\n";
128 close FILE;
129 if ($text !~ /\w/) {
130 print STDERR "HTMLPlug: ERROR: $file contains no text\n" if $self->{'verbosity'};
131 return 0;
132 }
133
134 $self->add_external_metadata ($metadata, $doc_obj, $cursection);
135 $self->extract_metadata (\$text, $metadata, $doc_obj, $cursection)
136 unless $self->{'no_metadata'};
137
138 # Store URL for page as metadata - this can be used for an
139 # altavista style search interface. The URL won't be valid
140 # unless the file structure contains the domain name (i.e.
141 # like when w3mir is used to download a website).
142 my $web_url = "http://$file";
143 $web_url =~ s/\\/\//g; # for windows
144 $doc_obj->add_metadata($cursection, "URL", $web_url);
145
146 # remove header and footer
147 if (!$self->{'keep_head'}) {
148 $text =~ s/^.*?<body[^>]*>//is;
149 $text =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
150 }
151
152 # trap links
153 if (!$self->{'nolinks'}) {
154
155 # usemap="./#index" not handled correctly => change to "#index"
156 $text =~ s/(<img[^>]*?usemap\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
157 $self->replace_usemap_links($1, $2, $3)/isge;
158
159 $text =~ s/(<(?:a|area|frame)\s+[^>]*?(?:href|src)\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
160 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj)/isge;
161 }
162
163 # trap images
164 $text =~ s/(<img[^>]*?src\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/
165 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj)/isge;
166
167 $doc_obj->add_text ($cursection, $text);
168
169 # add an OID
170 $doc_obj->set_OID();
171
172 # process the document
173 $processor->process($doc_obj);
174
175 return 1; # processed the file
176}
177
178sub replace_images {
179 my $self = shift (@_);
180 my ($front, $link, $back, $base_dir, $file, $doc_obj) = @_;
181
182 $link =~ s/\n/ /g;
183
184 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
185 return $front . $self->add_file ($href, $base_dir, $doc_obj) . $back;
186}
187
188sub replace_href_links {
189 my $self = shift (@_);
190 my ($front, $link, $back, $base_dir, $file, $doc_obj) = @_;
191
192 # attempt to sort out targets - frames are not handled
193 # well in this plugin and some cases will screw things
194 # up - e.g. the _parent target (so we'll just remove
195 # them all ;-)
196 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
197 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
198 $front =~ s/target=\"?_parent\"?//is;
199 $back =~ s/target=\"?_parent\"?//is;
200
201 return $front . $link . $back if $link =~ /^\#/s;
202 $link =~ s/\n/ /g;
203
204 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
205
206 if (($rl == 0) || ($href =~ /$self->{'process_exp'}/) ||
207 ($href =~ /\/$/) || ($href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
208 $link = $href . $hash_part;
209 &html::urlsafe ($link);
210 return $front . "_httpextlink_&href=" . $link . "&rl=" . $rl . $back;
211
212 } else {
213 # link is to some other type of file (image, pdf etc.) so we'll
214 # need to associate that file
215 return $front . $self->add_file ($href, $base_dir, $doc_obj) . $back;
216 }
217}
218
219sub add_file {
220 my $self = shift (@_);
221 my ($href, $base_dir, $doc_obj) = @_;
222 my ($newname);
223
224 my $filename = $href;
225 $filename =~ s/^[^:]*:\/\///;
226 $filename = &util::filename_cat ($base_dir, $filename);
227 my ($ext) = $filename =~ /(\.[^\.]*)$/;
228 if (defined $self->{'aux_files'}->{$href}) {
229 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
230 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
231 } else {
232 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
233 $self->inc_filecount ();
234 }
235 $doc_obj->associate_file($filename, $newname);
236 return "_httpcollimg_/$newname";
237}
238
239
240sub format_link {
241 my $self = shift (@_);
242 my ($link, $base_dir, $file) = @_;
243
244 my ($before_hash, $hash_part) = $link =~ /^([^\#]*)(\#?.*)$/;
245 $hash_part = "" if !defined $hash_part;
246 if (!defined $before_hash || $before_hash !~ /[\w\.\/]/) {
247 print STDERR "HTMLPlug: ERROR - badly formatted tag ignored ($front$link$back)\n"
248 if $self->{'verbosity'};
249 return ($link, "", 0);
250 }
251
252 if ($before_hash =~ s/^((?:http|ftp|file):\/\/)//i) {
253 my $type = $1;
254
255 if ($link =~ /^(http|ftp):/i) {
256 # Turn url (using /) into file name (possibly using \ on windows)
257 my @http_dir_split = split('/', $before_hash);
258 $before_hash = &util::filename_cat(@http_dir_split);
259 }
260
261 $before_hash = $self->eval_dir_dots($before_hash);
262
263 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
264
265 my $rl = 0;
266 $rl = 1 if (-e $linkfilename);
267
268 # make sure there's a slash on the end if it's a directory
269 if ($before_hash !~ /\/$/) {
270 $before_hash .= "/" if (-d $linkfilename);
271 }
272
273 return ($type . $before_hash, $hash_part, $rl);
274
275 } elsif ($link !~ /^(mailto|news|gopher|nntp|telnet|javascript):/i) {
276
277 if ($before_hash =~ s/^\///) {
278 # the first directory will be the domain name if w3mir was used
279 # to generate archives, otherwise we'll assume all files are
280 # from the same site and base_dir is the root
281 if ($self->{'w3mir'}) {
282 my @dirs = split /[\/\\]/, $file;
283 my $domname = shift (@dirs);
284 $before_hash = &util::filename_cat($domname, $before_hash);
285 $before_hash =~ s/\\/\//g; # for windows
286 }
287
288 } else {
289 # Turn relative file path into full path
290 my $dirname = &File::Basename::dirname($file);
291 $before_hash = &util::filename_cat($dirname, $before_hash);
292 $before_hash = $self->eval_dir_dots($before_hash);
293 }
294
295 # make sure there's a slash on the end if it's a directory
296 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
297 if ($before_hash !~ /\/$/) {
298 $before_hash .= "/" if (-d $linkfilename);
299 }
300
301 return ("http://" . $before_hash, $hash_part, 1);
302
303 } else {
304 # mailto, news, nntp, telnet, javascript or gopher link
305 return ($before_hash, "", 0);
306 }
307}
308
309# Add metadata that has been provided externally (i.e. by
310# a previous (recursive) plugin
311sub add_external_metadata {
312 my $self = shift (@_);
313 my ($metadata, $doc_obj, $section) = @_;
314
315 foreach $field (keys(%$metadata)) {
316 # $metadata->{$field} may be an array reference
317 if (ref ($metadata->{$field}) eq "ARRAY") {
318 map {
319 $doc_obj->add_metadata ($section, $field, $_);
320 } @{$metadata->{$field}};
321 } else {
322 $doc_obj->add_metadata ($section, $field, $metadata->{$field});
323 }
324 }
325}
326
327sub extract_metadata {
328 my $self = shift (@_);
329 my ($textref, $metadata, $doc_obj, $section) = @_;
330
331 foreach $field (split /,/, $self->{'metadata_fields'}) {
332
333 # don't need to extract field if it was passed in from a previous
334 # (recursive) plugin
335 next if defined $metadata->{$field};
336
337 # see if there's a <meta> tag for this field
338 if ($$textref =~ /<meta(.*?)(?:name|http-equiv)\s*=\s*\"?$field\"?([^>]*)/is) {
339 my $content = $1 . $2;
340 if ($content =~ /content\s*=\s*\"?(.*?)\"?/is) {
341 if (defined $1) {
342 my $value = $1;
343 $value =~ s/\s+/ /gs;
344 $doc_obj->add_metadata($section, $field, $value);
345 next;
346 }
347 }
348 }
349
350 # special case for Title metadata - try <title> tags
351 # then first 100 characters of text
352
353 if ($field =~ /^title$/i) {
354
355 # see if there's a <title> tag
356 if ($$textref =~ /<title[^>]*>([^<]*)<\/title[^>]*>/is) {
357 if (defined $1) {
358 my $title = $1;
359 if ($title =~ /\w/) {
360 $title =~ s/\s+/ /gs;
361 $doc_obj->add_metadata ($section, $field, $title);
362 next;
363 }
364 }
365 }
366
367 # if no title use first 100 characters
368 my $tmptext = $$textref;
369 $tmptext =~ s/<[^>]*>//g;
370 my $title = substr ($tmptext, 0, 100);
371 $title =~ s/\s+/ /gs;
372 $doc_obj->add_metadata ($section, $field, $title);
373 }
374 }
375}
376
377# evaluate any "../" to next directory up
378# evaluate any "./" as here
379sub eval_dir_dots {
380 my $self = shift (@_);
381 my ($filename) = @_;
382
383 my $dirsep_os = &util::get_os_dirsep();
384 my @dirsep = split(/$dirsep_os/,$filename);
385
386 my @eval_dirs = ();
387 foreach $d (@dirsep) {
388 if ($d eq "..") {
389 pop(@eval_dirs);
390
391 } elsif ($d eq ".") {
392 # do nothing!
393
394 } else {
395 push(@eval_dirs,$d);
396 }
397 }
398
399 return &util::filename_cat(@eval_dirs);
400}
401
402sub replace_usemap_links {
403 my $self = shift (@_);
404 my ($front, $link, $back) = @_;
405
406 $link =~ s/^\.\///;
407 return $front . $link . $back;
408}
409
410sub inc_filecount {
411 my $self = shift (@_);
412
413 if ($self->{'file_num'} == 1000) {
414 $self->{'dir_num'} ++;
415 $self->{'file_num'} = 0;
416 } else {
417 $self->{'file_num'} ++;
418 }
419}
420
4211;
Note: See TracBrowser for help on using the repository browser.