########################################################################### # # HTMLPlug.pm -- basic html plugin # # A component of the Greenstone digital library software # from the New Zealand Digital Library Project at the # University of Waikato, New Zealand. # # Copyright (C) 1999 New Zealand Digital Library Project # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # ########################################################################### # # Note that this plugin handles frames only in a very simple way # i.e. each frame is treated as a separate document. This means # search results will contain links to individual frames rather # than linking to the top level frameset. # There may also be some problems caused by the _parent target # (it's removed by this plugin) # To use frames properly you'll need to use the WebPlug plugin. # package HTMLPlug; use BasPlug; use ghtml; use unicode; use util; use parsargv; sub BEGIN { @ISA = ('BasPlug'); } sub print_usage { print STDERR "\n usage: plugin HTMLPlug [options]\n\n"; print STDERR " options:\n"; print STDERR " -nolinks Don't make any attempt to trap links (setting this flag may\n"; print STDERR " improve speed of building/importing but any relative links within\n"; print STDERR " documents will be broken).\n"; print STDERR " -keep_head Don't remove headers from html files.\n"; print STDERR " -no_metadata Don't attempt to extract any metadata from files.\n"; print STDERR " -metadata_fields Comma separated list of metadata fields to attempt to extract.\n"; print STDERR " Defaults to 'Title'.\n"; print STDERR " Use 'tag' to have the contents of the first \n"; print STDERR " pair put in a metadata element called 'tagname' Capitalise \n"; print STDERR " 'tagname' as you want the metadata capitalised in the GML \n"; print STDERR " file, since the tag extraction is case insensitive.\n"; print STDERR " -hunt_creator_metadata Find as much metadata as possible on authorship and place it in the\n "; print STDERR " 'Creator' field. Requires the -metadata_fields flag.\n "; print STDERR " -file_is_url Set if input filenames make up url of original source documents\n"; print STDERR " e.g. if a web mirroring tool was used to create the import\n"; print STDERR " directory structure\n"; print STDERR " -assoc_files Perl regular expression of file extensions to associate with\n"; print STDERR " html documents. Defaults to '(?i)\.(jpe?g|gif|png|css)\$'\n"; print STDERR " -rename_assoc_files Renames files associated with documents (e.g. images). Also\n"; print STDERR " creates much shallower directory structure (useful when creating\n"; print STDERR " collections to go on cd-rom).\n\n"; print STDERR " -title_sub Substitution expression to modify string stored as Title.\n"; print STDERR " Used by, for example, PDFHtml to remove Page 1 etc from text\n"; print STDERR " chosen to be used as the title.\n"; } sub new { my $class = shift (@_); my $self = new BasPlug ($class, @_); if (!parsargv::parse(\@_, q^nolinks^, \$self->{'nolinks'}, q^keep_head^, \$self->{'keep_head'}, q^no_metadata^, \$self->{'no_metadata'}, q^metadata_fields/.*/Title^, \$self->{'metadata_fields'}, q^hunt_creator_metadata^, \$self->{'hunt_creator_metadata'}, q^w3mir^, \$self->{'w3mir'}, q^file_is_url^, \$self->{'file_is_url'}, q^assoc_files/.*/(?i)\.(jpe?g|gif|png|css)$^, \$self->{'assoc_files'}, q^rename_assoc_files^, \$self->{'rename_assoc_files'}, q^title_sub/.*/^, \$self->{'title_sub'}, "allow_extra_options")) { print STDERR "\nIncorrect options passed to HTMLPlug, check your collect.cfg configuration file\n"; &print_usage(); die "\n"; } # retain this for backward compatibility (w3mir option was replaced by # file_is_url) if ($self->{'w3mir'}) { $self->{'file_is_url'} = 1; } $self->{'aux_files'} = {}; $self->{'dir_num'} = 0; $self->{'file_num'} = 0; return bless $self, $class; } sub get_default_block_exp { my $self = shift (@_); return q^(?i)\.(gif|jpe?g|png|css)$^; } sub get_default_process_exp { my $self = shift (@_); # the last option is an attempt to encode the concept of an html query ... return q^(?i)(\.html?|\.shtml|\.shm|\.asp|\.php|\.cgi|.+\?.+=.*)$^; } # do plugin specific processing of doc_obj sub process { my $self = shift (@_); my ($textref, $pluginfo, $base_dir, $file, $metadata, $doc_obj) = @_; my $outhandle = $self->{'outhandle'}; print $outhandle "HTMLPlug: processing $file\n" if $self->{'verbosity'} > 1; my $cursection = $doc_obj->get_top_section(); $self->extract_metadata ($textref, $metadata, $doc_obj, $cursection) unless $self->{'no_metadata'}; # Store URL for page as metadata - this can be used for an # altavista style search interface. The URL won't be valid # unless the file structure contains the domain name (i.e. # like when w3mir is used to download a website). my $web_url = "http://$file"; $web_url =~ s/\\/\//g; # for windows $doc_obj->add_utf8_metadata($cursection, "URL", $web_url); # remove header and footer if (!$self->{'keep_head'}) { $$textref =~ s/^.*?]*>//is; $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg; } # trap links if (!$self->{'nolinks'}) { # usemap="./#index" not handled correctly => change to "#index" $$textref =~ s/(]*?usemap\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/ $self->replace_usemap_links($1, $2, $3)/isge; $$textref =~ s/(<(?:a|area|frame|link)\s+[^>]*?\s*(?:href|src)\s*=\s*\"?)([^\">\s]+)(\"?[^>]*>)/ $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge; } # trap images # allow spaces if inside quotes - jrm21 $$textref =~ s/(]*?src\s*=\s*)(\"[^\"]+\"|[^\s>]+)([^>]*>)/ $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge; # add text to document object $doc_obj->add_utf8_text($cursection, $$textref); return 1; } sub replace_images { my $self = shift (@_); my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_; # remove quotes from link at start and end if necessary if ($link=~/^\"/) { $link=~s/^\"//;$link=~s/\"$//; $front.='"'; $back="\"$back"; } $link =~ s/\n/ /g; my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file); return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back; } sub replace_href_links { my $self = shift (@_); my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_; # attempt to sort out targets - frames are not handled # well in this plugin and some cases will screw things # up - e.g. the _parent target (so we'll just remove # them all ;-) $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is; $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is; $front =~ s/target=\"?_parent\"?//is; $back =~ s/target=\"?_parent\"?//is; return $front . $link . $back if $link =~ /^\#/s; $link =~ s/\n/ /g; my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file); # href may use '\'s where '/'s should be on Windows $href =~ s/\\/\//g; my ($filename) = $href =~ /^(?:.*?):(?:\/\/)?(.*)/; ##### leave all these links alone (they won't be picked up by intermediate ##### pages). I think that's safest when dealing with frames, targets etc. ##### (at least until I think of a better way to do it). Problems occur with ##### mailto links from within small frames, the intermediate page is displayed ##### within that frame and can't be seen. There is still potential for this to ##### happen even with html pages - the solution seems to be to somehow tell ##### the browser from the server side to display the page being sent (i.e. ##### the intermediate page) in the top level window - I'm not sure if that's ##### possible - the following line should probably be deleted if that can be done return $front . $link . $back if $href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/is; if (($rl == 0) || ($filename =~ /$self->{'process_exp'}/) || ($href =~ /\/$/) || ($href =~ /^(mailto|news|gopher|nntp|telnet|javascript):/i)) { &ghtml::urlsafe ($href); return $front . "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part . $back; } else { # link is to some other type of file (eg image) so we'll # need to associate that file return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back; } } sub add_file { my $self = shift (@_); my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_; my ($newname); my $filename = $href; $filename =~ s/^[^:]*:\/\///; $filename = &util::filename_cat($base_dir, $filename); my ($ext) = $filename =~ /(\.[^\.]*)$/; if ((!defined $ext) || ($ext !~ /$self->{'assoc_files'}/)) { return "_httpextlink_&rl=" . $rl . "&href=" . $href . $hash_part; } if ($self->{'rename_assoc_files'}) { if (defined $self->{'aux_files'}->{$href}) { $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" . $self->{'aux_files'}->{$href}->{'file_num'} . $ext; } else { $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext; $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}}; $self->inc_filecount (); } $doc_obj->associate_file($filename, $newname, undef, $section); return "_httpcollimg_/$newname"; } else { ($newname) = $filename =~ /([^\/\\]*)$/; $doc_obj->associate_file($filename, $newname, undef, $section); return "_httpdocimg_/$newname"; } } sub format_link { my $self = shift (@_); my ($link, $base_dir, $file) = @_; my ($before_hash, $hash_part) = $link =~ /^([^\#]*)(\#?.*)$/; $hash_part = "" if !defined $hash_part; if (!defined $before_hash || $before_hash !~ /[\w\.\/]/) { my $outhandle = $self->{'outhandle'}; print $outhandle "HTMLPlug: ERROR - badly formatted tag ignored ($link)\n" if $self->{'verbosity'}; return ($link, "", 0); } if ($before_hash =~ s/^((?:http|ftp|file):\/\/)//i) { my $type = $1; if ($link =~ /^(http|ftp):/i) { # Turn url (using /) into file name (possibly using \ on windows) my @http_dir_split = split('/', $before_hash); $before_hash = &util::filename_cat(@http_dir_split); } $before_hash = $self->eval_dir_dots($before_hash); my $linkfilename = &util::filename_cat ($base_dir, $before_hash); my $rl = 0; $rl = 1 if (-e $linkfilename); # make sure there's a slash on the end if it's a directory if ($before_hash !~ /\/$/) { $before_hash .= "/" if (-d $linkfilename); } return ($type . $before_hash, $hash_part, $rl); } elsif ($link !~ /^(mailto|news|gopher|nntp|telnet|javascript):/i) { if ($before_hash =~ s/^\/// || $before_hash =~ /\\/) { # the first directory will be the domain name if file_is_url # to generate archives, otherwise we'll assume all files are # from the same site and base_dir is the root # This is not called in Windows if ($self->{'file_is_url'}) { my @dirs = split /[\/\\]/, $file; my $domname = shift (@dirs); $before_hash = &util::filename_cat($domname, $before_hash); $before_hash =~ s/\\/\//g; # for windows } else { # see if link shares directory with source document # => turn into relative link if this is so! my $before_hash_fix = ""; # filename_cat appends / or \ to the beginning of the # file pathname but in Windows we don't want to do that if ($ENV{'GSDLOS'} =~ /^windows/i) { $before_hash_fix = $before_hash; } else { $before_hash_fix = &util::filename_cat("",$before_hash); } my $base_dir_win_match = $base_dir; $base_dir_win_match =~ s/\\/\\\\/g; $before_hash_fix =~ s/^$base_dir_win_match(\\|\/)//; $before_hash = $before_hash_fix; } } else { # Turn relative file path into full path my $dirname = &File::Basename::dirname($file); $before_hash = &util::filename_cat($dirname, $before_hash); $before_hash = $self->eval_dir_dots($before_hash); } my $linkfilename = &util::filename_cat ($base_dir, $before_hash); # make sure there's a slash on the end if it's a directory if ($before_hash !~ /\/$/) { $before_hash .= "/" if (-d $linkfilename); } return ("http://" . $before_hash, $hash_part, 1); } else { # mailto, news, nntp, telnet, javascript or gopher link return ($before_hash, "", 0); } } sub extract_first_NNNN_characters { my $self = shift (@_); my ($textref, $doc_obj, $thissection) = @_; foreach my $size (split /,/, $self->{'first'}) { my $tmptext = $$textref; $tmptext =~ s/.*]*>//i; $tmptext =~ s/$self->{'title_sub'}// if (defined $self->{'title_sub'}); $tmptext =~ s/<[^>]*>/ /g; $tmptext =~ s/ / /g; $tmptext =~ s/^\s+//; $tmptext =~ s/\s+$//; $tmptext =~ s/\s+/ /gs; $tmptext = substr ($tmptext, 0, $size); $tmptext =~ s/\s\S*$/…/; $doc_obj->add_utf8_metadata ($thissection, "First$size", $tmptext); } } sub extract_metadata { my $self = shift (@_); my ($textref, $metadata, $doc_obj, $section) = @_; my $outhandle = $self->{'outhandle'}; # if we don't want metadata, we may as well not be here ... return if (!defined $self->{'metadata_fields'}); # hunt for an author look in the metadata elements: if (defined $self->{'hunt_creator_metadata'}) { for my $name (split /,/, "AUTHOR,AUTHOR.EMAIL,CREATOR,DC.CREATOR,DC.CREATOR.CORPORATENAME") { if ($$textref =~ /]*)/is) { my $content = $1 . $2; if ($content =~ /content\s*=\s*\"?(.*)\"?/is) { if (defined $1) { my $value = $1; $value =~ s/\"$//; $value =~ s/\s+/ /gs; $doc_obj->add_utf8_metadata($section, "Creator", $value); print $outhandle " extracted Creator metadata \"$value\"\n" if ($self->{'verbosity'} > 2); next; } } } } } foreach my $field (split /,/, $self->{'metadata_fields'}) { # don't need to extract field if it was passed in from a previous # (recursive) plugin next if defined $metadata->{$field}; # see if there's a tag for this field if ($$textref =~ /]*)/is) { my $content = $1 . $2; if ($content =~ /content\s*=\s*\"?(.*)\"?/is) { if (defined $1) { my $value = $1; $value =~ s/\"$//; $value =~ s/\s+/ /gs; $value =~ s/\".*//gs; $doc_obj->add_utf8_metadata($section, $field, $value); print $outhandle " extracted \"$field\" metadata \"$value\"\n" if ($self->{'verbosity'} > 2); next; } } } # TITLE: extract the document title if ($field =~ /^title$/i) { # see if there's a tag if ($$textref =~ /<title[^>]*>([^<]*)<\/title[^>]*>/is) { if (defined $1) { my $title = $1; if ($title =~ /\w/) { $title =~ s/<[^>]*>/ /g; $title =~ s/ / /g; $title =~ s/\s+/ /gs; $title =~ s/^\s+//; $title =~ s/\s+$//; $doc_obj->add_utf8_metadata ($section, $field, $title); print $outhandle " extracted \"$field\" metadata \"$title\"\n" if ($self->{'verbosity'} > 2); next; } } } # if no title use first 100 characters my $tmptext = $$textref; $tmptext =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space $tmptext =~ s/<[^>]*>/ /g; $tmptext =~ s/ / /g; $tmptext =~ s/^\s+//s; $tmptext =~ s/\s+$//; $tmptext =~ s/\s+/ /gs; $tmptext =~ s/$self->{'title_sub'}// if (defined $self->{'title_sub'}); $tmptext = substr ($tmptext, 0, 100); $tmptext =~ s/\s\S*$/.../; $doc_obj->add_utf8_metadata ($section, $field, $tmptext); print $outhandle " extracted \"$field\" metadata \"$tmptext\"\n" if ($self->{'verbosity'} > 2); next; } # tag: extract the text between the first <H1> and </H1> tags if ($field =~ /^tag[a-z0-9]+$/i) { my $tag = $field; $tag =~ s/^tag//i; my $tmptext = $$textref; $tmptext =~ s/\s+/ /gs; if ($tmptext =~ /<$tag[^>]*>/i) { foreach my $word ($tmptext =~ m/<$tag[^>]*>(.*?)<\/$tag[^>]*>/g) { $word =~ s/ / /g; $word =~ s/<[^>]*>/ /g; $word =~ s/^\s+//; $word =~ s/\s+$//; $word =~ s/\s+/ /gs; if ($word ne "") { $doc_obj->add_utf8_metadata ($section, $tag, $word); print $outhandle " extracted \"$tag\" metadata \"$word\"\n" if ($self->{'verbosity'} > 2); } } } next; } } } # evaluate any "../" to next directory up # evaluate any "./" as here sub eval_dir_dots { my $self = shift (@_); my ($filename) = @_; my $dirsep_os = &util::get_os_dirsep(); my @dirsep = split(/$dirsep_os/,$filename); my @eval_dirs = (); foreach my $d (@dirsep) { if ($d eq "..") { pop(@eval_dirs); } elsif ($d eq ".") { # do nothing! } else { push(@eval_dirs,$d); } } return &util::filename_cat(@eval_dirs); } sub replace_usemap_links { my $self = shift (@_); my ($front, $link, $back) = @_; $link =~ s/^\.\///; return $front . $link . $back; } sub inc_filecount { my $self = shift (@_); if ($self->{'file_num'} == 1000) { $self->{'dir_num'} ++; $self->{'file_num'} = 0; } else { $self->{'file_num'} ++; } } # Extend the BasPlug read_file so that strings like é are # converted to UTF8 internally. # # We don't convert < or > or & or " in case # they interfere with the GML files sub read_file { my ($self, $filename, $encoding, $textref) = @_; &BasPlug::read_file($self, $filename, $encoding, $textref); # turn \ into \\ so that the rest of greenstone doesn't think there # is an escape code following. $$textref =~ s/\\/\\\\/go; # Convert things like é to their UTF8 equivalents $$textref =~ s/&(lt|gt|amp|quot);/&z$1;/go; $$textref =~ s/&([^;]+);/&unicode::ascii2utf8(\&ghtml::getcharequiv($1,1))/gseo; $$textref =~ s/&z(lt|gt|amp|quot);/&$1;/go; } 1;