source: main/trunk/greenstone2/perllib/plugins/HTMLPlugin.pm@ 31415

Last change on this file since 31415 was 31415, checked in by Georgiy Litvinov, 7 years ago

Modified html links pointed to different section in the same document.

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 68.1 KB
RevLine 
[14665]1###########################################################################
2#
[15872]3# HTMLPlugin.pm -- basic html plugin
[14665]4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34#
35
[15872]36package HTMLPlugin;
[14665]37
[22842]38use Encode;
[23387]39use Unicode::Normalize 'normalize';
[22842]40
[15872]41use ReadTextFile;
42use HBPlugin;
[14665]43use ghtml;
44use unicode;
45use util;
[27306]46use FileUtils;
[14665]47use XMLParser;
48
[14913]49use File::Copy;
[14665]50
51sub BEGIN {
[15872]52 @HTMLPlugin::ISA = ('ReadTextFile', 'HBPlugin');
[28319]53 die "GSDLHOME not set\n" unless defined $ENV{'GSDLHOME'};
54 unshift (@INC, "$ENV{'GSDLHOME'}/perllib/cpan"); # for Image/Size.pm
[14665]55}
56
[28319]57use Image::Size;
58
[14665]59use strict; # every perl program should have this!
60no strict 'refs'; # make an exception so we can use variables as filehandles
61
62my $arguments =
63 [ { 'name' => "process_exp",
[15872]64 'desc' => "{BasePlugin.process_exp}",
[14665]65 'type' => "regexp",
66 'deft' => &get_default_process_exp() },
67 { 'name' => "block_exp",
[15872]68 'desc' => "{BasePlugin.block_exp}",
[14665]69 'type' => 'regexp',
70 'deft' => &get_default_block_exp() },
71 { 'name' => "nolinks",
[15872]72 'desc' => "{HTMLPlugin.nolinks}",
[14665]73 'type' => "flag" },
74 { 'name' => "keep_head",
[15872]75 'desc' => "{HTMLPlugin.keep_head}",
[14665]76 'type' => "flag" },
77 { 'name' => "no_metadata",
[15872]78 'desc' => "{HTMLPlugin.no_metadata}",
[14665]79 'type' => "flag" },
80 { 'name' => "metadata_fields",
[15872]81 'desc' => "{HTMLPlugin.metadata_fields}",
[14665]82 'type' => "string",
83 'deft' => "Title" },
[21800]84 { 'name' => "metadata_field_separator",
85 'desc' => "{HTMLPlugin.metadata_field_separator}",
86 'type' => "string",
87 'deft' => "" },
[14665]88 { 'name' => "hunt_creator_metadata",
[15872]89 'desc' => "{HTMLPlugin.hunt_creator_metadata}",
[14665]90 'type' => "flag" },
91 { 'name' => "file_is_url",
[15872]92 'desc' => "{HTMLPlugin.file_is_url}",
[14665]93 'type' => "flag" },
94 { 'name' => "assoc_files",
[15872]95 'desc' => "{HTMLPlugin.assoc_files}",
[14665]96 'type' => "regexp",
97 'deft' => &get_default_block_exp() },
98 { 'name' => "rename_assoc_files",
[15872]99 'desc' => "{HTMLPlugin.rename_assoc_files}",
[14665]100 'type' => "flag" },
101 { 'name' => "title_sub",
[15872]102 'desc' => "{HTMLPlugin.title_sub}",
[14665]103 'type' => "string",
104 'deft' => "" },
105 { 'name' => "description_tags",
[15872]106 'desc' => "{HTMLPlugin.description_tags}",
[14665]107 'type' => "flag" },
108 # retain this for backward compatibility (w3mir option was replaced by
109 # file_is_url)
110 { 'name' => "w3mir",
[15872]111# 'desc' => "{HTMLPlugin.w3mir}",
[14665]112 'type' => "flag",
113 'hiddengli' => "yes"},
114 { 'name' => "no_strip_metadata_html",
[15872]115 'desc' => "{HTMLPlugin.no_strip_metadata_html}",
[14665]116 'type' => "string",
117 'deft' => "",
118 'reqd' => "no"},
119 { 'name' => "sectionalise_using_h_tags",
[15872]120 'desc' => "{HTMLPlugin.sectionalise_using_h_tags}",
[14665]121 'type' => "flag" },
[14913]122 { 'name' => "use_realistic_book",
[15872]123 'desc' => "{HTMLPlugin.tidy_html}",
[14665]124 'type' => "flag"},
[15872]125 { 'name' => "old_style_HDL",
126 'desc' => "{HTMLPlugin.old_style_HDL}",
[20791]127 'type' => "flag"},
128 {'name' => "processing_tmp_files",
129 'desc' => "{BasePlugin.processing_tmp_files}",
130 'type' => "flag",
131 'hiddengli' => "yes"}
[14665]132 ];
133
[15872]134my $options = { 'name' => "HTMLPlugin",
135 'desc' => "{HTMLPlugin.desc}",
[14665]136 'abstract' => "no",
137 'inherits' => "yes",
138 'args' => $arguments };
139
140
141sub new {
142 my ($class) = shift (@_);
143 my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
144 push(@$pluginlist, $class);
145
[15872]146 push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
147 push(@{$hashArgOptLists->{"OptList"}},$options);
[16024]148
[14665]149
[15872]150 my $self = new ReadTextFile($pluginlist,$inputargs,$hashArgOptLists);
[14665]151
152 if ($self->{'w3mir'}) {
153 $self->{'file_is_url'} = 1;
154 }
155 $self->{'aux_files'} = {};
156 $self->{'dir_num'} = 0;
157 $self->{'file_num'} = 0;
158
159 return bless $self, $class;
160}
161
162# may want to use (?i)\.(gif|jpe?g|jpe|png|css|js(?:@.*)?)$
163# if have eg <script language="javascript" src="img/lib.js@123">
[20791]164# blocking is now done by reading through the file and recording all the
165# images and other files
[14665]166sub get_default_block_exp {
167 my $self = shift (@_);
168
[16392]169 #return q^(?i)\.(gif|jpe?g|jpe|jpg|png|css)$^;
170 return "";
[14665]171}
172
173sub get_default_process_exp {
174 my $self = shift (@_);
175
176 # the last option is an attempt to encode the concept of an html query ...
177 return q^(?i)(\.html?|\.shtml|\.shm|\.asp|\.php\d?|\.cgi|.+\?.+=.*)$^;
178}
179
180sub store_block_files
181{
182 my $self =shift (@_);
[16392]183 my ($filename_full_path, $block_hash) = @_;
184
185 my $html_fname = $filename_full_path;
[14665]186
[23335]187 my ($language, $content_encoding) = $self->textcat_get_language_encoding ($filename_full_path);
188 $self->{'store_content_encoding'}->{$filename_full_path} = $content_encoding;
[14665]189
190 # read in file ($text will be in utf8)
[16769]191 my $raw_text = "";
[23363]192 $self->read_file_no_decoding($filename_full_path, \$raw_text);
[16769]193
194 my $textref = \$raw_text;
[14665]195 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
196 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
197 $$textref =~ s/$opencom(.*?)$closecom//gs;
198
[23363]199 # Convert entities to their UTF8 equivalents
200 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
201 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,0)/gseo; # on this occassion, want it left as utf8
202 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
203
[14665]204 my $attval = "\\\"[^\\\"]+\\\"|[^\\s>]+";
205 my @img_matches = ($$textref =~ m/<img[^>]*?src\s*=\s*($attval)[^>]*>/igs);
206 my @usemap_matches = ($$textref =~ m/<img[^>]*?usemap\s*=\s*($attval)[^>]*>/igs);
207 my @link_matches = ($$textref =~ m/<link[^>]*?href\s*=\s*($attval)[^>]*>/igs);
208 my @embed_matches = ($$textref =~ m/<embed[^>]*?src\s*=\s*($attval)[^>]*>/igs);
[17127]209 my @tabbg_matches = ($$textref =~ m/<(?:body|table|tr|td)[^>]*?background\s*=\s*($attval)[^>]*>/igs);
[16638]210 my @script_matches = ($$textref =~ m/<script[^>]*?src\s*=\s*($attval)[^>]*>/igs);
[14665]211
[23387]212 if(!defined $self->{'unicode_to_original_filename'}) {
[16769]213 # maps from utf8 converted link name -> original filename referrred to by (possibly URL-encoded) src url
[23387]214 $self->{'unicode_to_original_filename'} = {};
[16769]215 }
216
[23387]217 foreach my $raw_link (@img_matches, @usemap_matches, @link_matches, @embed_matches, @tabbg_matches, @script_matches) {
[14665]218
219 # remove quotes from link at start and end if necessary
[23387]220 if ($raw_link =~ m/^\"/) {
221 $raw_link =~ s/^\"//;
222 $raw_link =~ s/\"$//;
[14665]223 }
224
[23371]225 # remove any anchor names, e.g. foo.html#name becomes foo.html
226 # but watch out for any #'s that are part of entities, such as &#x3B1;
[23387]227 $raw_link =~ s/([^&])\#.*$/$1/s;
[23371]228
[16638]229 # some links may just be anchor names
[23387]230 next unless ($raw_link =~ /\S+/);
[14665]231
[23415]232 if ($raw_link !~ m@^/@ && $raw_link !~ m/^([A-Z]:?)\\/i) {
[14665]233 # Turn relative file path into full path
[16392]234 my $dirname = &File::Basename::dirname($filename_full_path);
[27306]235 $raw_link = &FileUtils::filenameConcatenate($dirname, $raw_link);
[14665]236 }
[23387]237 $raw_link = $self->eval_dir_dots($raw_link);
[16638]238
[16769]239 # this is the actual filename on the filesystem (that the link refers to)
[23387]240 my $url_original_filename = $self->opt_url_decode($raw_link);
[16769]241
[23387]242 my ($uses_bytecodes,$exceeds_bytecodes) = &unicode::analyze_raw_string($url_original_filename);
[16769]243
[23387]244 if ($exceeds_bytecodes) {
245 # We have a link to a file name that is more complicated than a raw byte filename
246 # What we do next depends on the operating system we are on
[16769]247
[23387]248 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
249 # Assume we're dealing with a UTF-8 encoded filename
250 $url_original_filename = encode("utf8", $url_original_filename);
251 }
252 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
253 # HFS+ is UTF8 with decompostion
254 $url_original_filename = encode("utf8", $url_original_filename);
255 $url_original_filename = normalize('D', $url_original_filename); # Normalization Form D (decomposition)
256 }
257 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
258 # Don't need to do anything as later code maps Windows
259 # unicode filenames to DOS short filenames when needed
260 }
261 else {
262 my $outhandle = $self->{'outhandle'};
263 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
264 print $outhandle " in raw file system encoding of: $raw_link\n";
265 print $outhandle " Assuming filesystem is UTF-8 based.\n";
266 $url_original_filename = encode("utf8", $url_original_filename);
267 }
268 }
269
270 # Convert the (currently raw) link into its Unicode version.
271 # Store the Unicode link along with the url_original_filename
272 my $unicode_url_original_filename = "";
273 $self->decode_text($raw_link,$content_encoding,$language,\$unicode_url_original_filename);
274
275
276 $self->{'unicode_to_original_filename'}->{$unicode_url_original_filename} = $url_original_filename;
277
278
279 if ($url_original_filename ne $unicode_url_original_filename) {
[17088]280 my $outhandle = $self->{'outhandle'};
[23387]281
[17088]282 print $outhandle "URL Encoding $url_original_filename\n";
[23387]283 print $outhandle " ->$unicode_url_original_filename\n";
[17088]284
[30022]285 # make sure not to block the file itself, as happens when an html file links to itself
286 # e.g. if the current file is mary-boleyn/index.html and contains <link rel="canonical" href="index.html" />
287 my $unicode_html_fname = "";
288 $self->decode_text($html_fname,$content_encoding,$language,\$unicode_html_fname);
289 if($unicode_url_original_filename ne $unicode_html_fname) {
290 # Allow for possibility of raw byte version and Unicode versions of file
291 &util::block_filename($block_hash,$unicode_url_original_filename);
292 }
[23387]293 }
[23363]294
[23418]295 # $url_original_filename = &util::upgrade_if_dos_filename($url_original_filename);
[30022]296 &util::block_filename($block_hash,$url_original_filename) if $url_original_filename ne $html_fname;
297
298 # but only add the linked file to the blocklist if the current html file does not link to itself
[23418]299
[14665]300 }
301}
302
[16769]303# Given a filename in any encoding, will URL decode it to get back the original filename
304# in the original encoding. Because this method is intended to work out the *original*
[18320]305# filename*, it does not URL decode any filename if a file by the name of the *URL-encoded*
[16769]306# string already exists in the local folder.
[23363]307#
[16769]308sub opt_url_decode {
309 my $self = shift (@_);
[23387]310 my ($raw_link) = @_;
[16024]311
[23387]312
[16769]313 # Replace %XX's in URL with decoded value if required.
314 # Note that the filename may include the %XX in some situations
[23387]315
316## if ($raw_link =~ m/\%[A-F0-9]{2}/i) {
317
318 if (($raw_link =~ m/\%[A-F0-9]{2}/i) || ($raw_link =~ m/\&\#x[0-9A-F]+;/i) || ($raw_link =~ m/\&\#[0-9]+;/i)) {
319 if (!-e $raw_link) {
320 $raw_link = &unicode::url_decode($raw_link,1);
[16769]321 }
322 }
[23387]323
324 return $raw_link;
[16769]325}
326
[20774]327sub read_into_doc_obj
328{
329 my $self = shift (@_);
330 my ($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli) = @_;
331
[22330]332 my ($filename_full_path, $filename_no_path) = &util::get_full_filenames($base_dir, $file);
[23335]333
334 # Lookup content_encoding worked out in file_block pass for this file
335 # Store it under the local name 'content_encoding' so its nice and
336 # easy to access
337 $self->{'content_encoding'} = $self->{'store_content_encoding'}->{$filename_full_path};
338
[20774]339 # get the input file
340 my $input_filename = $file;
341 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
342 $suffix = lc($suffix);
[22330]343 my $tidy_filename;
[20774]344 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
345 {
346 # because the document has to be sectionalized set the description tags
347 $self->{'description_tags'} = 1;
348
349 # set the file to be tidied
[27306]350 $input_filename = &FileUtils::filenameConcatenate($base_dir,$file) if $base_dir =~ m/\w/;
[20774]351
352 # get the tidied file
353 #my $tidy_filename = $self->tmp_tidy_file($input_filename);
[22330]354 $tidy_filename = $self->convert_tidy_or_oldHDL_file($input_filename);
[20774]355
356 # derive tmp filename from input filename
357 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($tidy_filename, "\\.[^\\.]+\$");
358
359 # set the new input file and base_dir to be from the tidied file
360 $file = "$tailname$suffix";
361 $base_dir = $dirname;
362 }
363
364 # call the parent read_into_doc_obj
365 my ($process_status,$doc_obj) = $self->SUPER::read_into_doc_obj($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli);
[22330]366 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
367 {
368 # now we need to reset the filenames in the doc obj so that the converted filenames are not used
369 my $collect_file = &util::filename_within_collection($filename_full_path);
370 $doc_obj->set_source_filename ($collect_file, $self->{'file_rename_method'});
371 ## set_source_filename does not set the doc_obj source_path which is used in archives dbs for incremental
[23363]372 # build. So set it manually.
373 $doc_obj->set_source_path($filename_full_path);
[22330]374 my $collect_conv_file = &util::filename_within_collection($tidy_filename);
375 $doc_obj->set_converted_filename($collect_conv_file);
[23349]376
377 my $plugin_filename_encoding = $self->{'filename_encoding'};
[23352]378 my $filename_encoding = $self->deduce_filename_encoding($file,$metadata,$plugin_filename_encoding);
379 $self->set_Source_metadata($doc_obj, $filename_full_path, $filename_encoding);
[22330]380 }
[23335]381
382 delete $self->{'store_content_encoding'}->{$filename_full_path};
383 $self->{'content_encoding'} = undef;
384
[20774]385 return ($process_status,$doc_obj);
386}
[16769]387
[14665]388# do plugin specific processing of doc_obj
389sub process {
390 my $self = shift (@_);
391 my ($textref, $pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
392 my $outhandle = $self->{'outhandle'};
393
[16769]394 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
[16024]395 # this makes life so much easier... perl can cope with unix-style '/'s.
[23371]396 $base_dir =~ s@(\\)+@/@g;
397 $file =~ s@(\\)+@/@g;
[14665]398 }
[23371]399
[27306]400 my $filename = &FileUtils::filenameConcatenate($base_dir,$file);
[23371]401 my $upgraded_base_dir = &util::upgrade_if_dos_filename($base_dir);
402 my $upgraded_filename = &util::upgrade_if_dos_filename($filename);
403
404 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
405 # And again
406 $upgraded_base_dir =~ s@(\\)+@/@g;
407 $upgraded_filename =~ s@(\\)+@/@g;
408
409 # Need to make sure there is a '/' on the end of upgraded_base_dir
[23387]410 if (($upgraded_base_dir ne "") && ($upgraded_base_dir !~ m/\/$/)) {
[23371]411 $upgraded_base_dir .= "/";
412 }
413 }
414 my $upgraded_file = &util::filename_within_directory($upgraded_filename,$upgraded_base_dir);
[14665]415
416 # reset per-doc stuff...
417 $self->{'aux_files'} = {};
418 $self->{'dir_num'} = 0;
419 $self->{'file_num'} = 0;
420
421 # process an HTML file where sections are divided by headings tags (H1, H2 ...)
422 # you can also include metadata in the format (X can be any number)
423 # <hX>Title<!--gsdl-metadata
424 # <Metadata name="name1">value1</Metadata>
425 # ...
426 # <Metadata name="nameN">valueN</Metadata>
427 #--></hX>
428 if ($self->{'sectionalise_using_h_tags'}) {
429 # description_tags should allways be activated because we convert headings to description tags
430 $self->{'description_tags'} = 1;
431
432 my $arrSections = [];
[23371]433 $$textref =~ s/<h([0-9]+)[^>]*>(.*?)<\/h[0-9]+>/$self->process_heading($1, $2, $arrSections, $upgraded_file)/isge;
[14665]434
435 if (scalar(@$arrSections)) {
436 my $strMetadata = $self->update_section_data($arrSections, -1);
437 if (length($strMetadata)) {
438 $strMetadata = '<!--' . $strMetadata . "\n-->\n</body>";
439 $$textref =~ s/<\/body>/$strMetadata/ig;
440 }
441 }
442 }
443
444 my $cursection = $doc_obj->get_top_section();
445
446 $self->extract_metadata ($textref, $metadata, $doc_obj, $cursection)
447 unless $self->{'no_metadata'} || $self->{'description_tags'};
448
449 # Store URL for page as metadata - this can be used for an
450 # altavista style search interface. The URL won't be valid
451 # unless the file structure contains the domain name (i.e.
452 # like when w3mir is used to download a website).
453
454 # URL metadata (even invalid ones) are used to support internal
455 # links, so even if 'file_is_url' is off, still need to store info
456
[23371]457 my ($tailname,$dirname) = &File::Basename::fileparse($upgraded_file);
[23347]458
[23335]459# my $utf8_file = $self->filename_to_utf8_metadata($file);
460# $utf8_file =~ s/&\#095;/_/g;
[23835]461# variable below used to be utf8_file
462
[23387]463 my $url_encoded_file = &unicode::raw_filename_to_url_encoded($tailname);
464 my $utf8_url_encoded_file = &unicode::raw_filename_to_utf8_url_encoded($tailname);
[23335]465
[16735]466 my $web_url = "http://";
[23387]467 my $utf8_web_url = "http://";
[16735]468 if(defined $dirname) { # local directory
[22689]469 # Check for "ftp" in the domain name of the directory
470 # structure to determine if this URL should be a ftp:// URL
471 # This check is not infallible, but better than omitting the
472 # check, which would cause all files downloaded from ftp sites
473 # via mirroring with wget to have potentially erroneous http:// URLs
474 # assigned in their metadata
475 if ($dirname =~ /^[^\/]*ftp/i)
476 {
477 $web_url = "ftp://";
[23387]478 $utf8_web_url = "ftp://";
[22689]479 }
[16836]480 $dirname = $self->eval_dir_dots($dirname);
[18626]481 $dirname .= &util::get_dirsep() if $dirname ne ""; # if there's a directory, it should end on "/"
[23387]482
483 $web_url = $web_url.$dirname.$url_encoded_file;
484 $utf8_web_url = $utf8_web_url.$dirname.$utf8_url_encoded_file;
[16735]485 } else {
[23387]486 $web_url = $web_url.$url_encoded_file;
487 $utf8_web_url = $utf8_web_url.$utf8_url_encoded_file;
[16735]488 }
[19983]489 $web_url =~ s/\\/\//g;
[23387]490 $utf8_web_url =~ s/\\/\//g;
[23371]491
492 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]493 print STDERR "*******DEBUG: upgraded_file: $upgraded_file\n";
494 print STDERR "*******DEBUG: adding URL metadata: $utf8_url_encoded_file\n";
[23371]495 }
496
497
[15872]498 $doc_obj->add_utf8_metadata($cursection, "URL", $web_url);
[23387]499 $doc_obj->add_utf8_metadata($cursection, "UTF8URL", $utf8_web_url);
[15872]500
[14665]501 if ($self->{'file_is_url'}) {
502 $doc_obj->add_metadata($cursection, "weblink", "<a href=\"$web_url\">");
503 $doc_obj->add_metadata($cursection, "webicon", "_iconworld_");
504 $doc_obj->add_metadata($cursection, "/weblink", "</a>");
505 }
506
507 if ($self->{'description_tags'}) {
508 # remove the html header - note that doing this here means any
509 # sections defined within the header will be lost (so all <Section>
510 # tags must appear within the body of the HTML)
511 my ($head_keep) = ($$textref =~ m/^(.*?)<body[^>]*>/is);
512
513 $$textref =~ s/^.*?<body[^>]*>//is;
514 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
515
516 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
517 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
518
519 my $lt = '(?:<|&lt;)';
520 my $gt = '(?:>|&gt;)';
521 my $quot = '(?:"|&quot;|&rdquo;|&ldquo;)';
522
523 my $dont_strip = '';
524 if ($self->{'no_strip_metadata_html'}) {
525 ($dont_strip = $self->{'no_strip_metadata_html'}) =~ s{,}{|}g;
526 }
527
528 my $found_something = 0; my $top = 1;
529 while ($$textref =~ s/^(.*?)$opencom(.*?)$closecom//s) {
530 my $text = $1;
531 my $comment = $2;
532 if (defined $text) {
533 # text before a comment - note that getting to here
534 # doesn't necessarily mean there are Section tags in
535 # the document
[23371]536 $self->process_section(\$text, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[14665]537 }
538 while ($comment =~ s/$lt(.*?)$gt//s) {
539 my $tag = $1;
540 if ($tag eq "Section") {
541 $found_something = 1;
542 $cursection = $doc_obj->insert_section($doc_obj->get_end_child($cursection)) unless $top;
543 $top = 0;
544 } elsif ($tag eq "/Section") {
545 $found_something = 1;
546 $cursection = $doc_obj->get_parent_section ($cursection);
[16769]547 } elsif ($tag =~ m/^Metadata name=$quot(.*?)$quot/s) {
[14665]548 my $metaname = $1;
[16769]549 my $accumulate = $tag =~ m/mode=${quot}accumulate${quot}/ ? 1 : 0;
[14665]550 $comment =~ s/^(.*?)$lt\/Metadata$gt//s;
551 my $metavalue = $1;
552 $metavalue =~ s/^\s+//;
553 $metavalue =~ s/\s+$//;
554 # assume that no metadata value intentionally includes
555 # carriage returns or HTML tags (if they're there they
556 # were probably introduced when converting to HTML from
557 # some other format).
558 # actually some people want to have html tags in their
559 # metadata.
560 $metavalue =~ s/[\cJ\cM]/ /sg;
561 $metavalue =~ s/<[^>]+>//sg
[16769]562 unless $dont_strip && ($dont_strip eq 'all' || $metaname =~ m/^($dont_strip)$/);
[14665]563 $metavalue =~ s/\s+/ /sg;
[22348]564 if ($metaname =~ /\./) { # has a namespace
565 $metaname = "ex.$metaname";
566 }
[14665]567 if ($accumulate) {
568 $doc_obj->add_utf8_metadata($cursection, $metaname, $metavalue);
569 } else {
570 $doc_obj->set_utf8_metadata_element($cursection, $metaname, $metavalue);
571 }
572 } elsif ($tag eq "Description" || $tag eq "/Description") {
573 # do nothing with containing Description tags
574 } else {
575 # simple HTML tag (probably created by the conversion
576 # to HTML from some other format) - we'll ignore it and
577 # hope for the best ;-)
578 }
579 }
580 }
581 if ($cursection ne "") {
[23371]582 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains unmatched <Section></Section> tags\n";
[14665]583 }
584
585 $$textref =~ s/^.*?<body[^>]*>//is;
586 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
[16769]587 if ($$textref =~ m/\S/) {
[14665]588 if (!$found_something) {
589 if ($self->{'verbosity'} > 2) {
[23371]590 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags so\n";
[14665]591 print $outhandle " will be processed as a single section document\n";
592 }
593
594 # go ahead and process single-section document
[23371]595 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[14665]596
597 # if document contains no Section tags we'll go ahead
598 # and extract metadata (this won't have been done
599 # above as the -description_tags option prevents it)
600 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
601 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
602 unless $self->{'no_metadata'};
603
604 } else {
[23371]605 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains the following text outside\n";
[14665]606 print $outhandle " of the final closing </Section> tag. This text will\n";
607 print $outhandle " be ignored.";
608
609 my ($text);
610 if (length($$textref) > 30) {
611 $text = substr($$textref, 0, 30) . "...";
612 } else {
613 $text = $$textref;
614 }
615 $text =~ s/\n/ /isg;
616 print $outhandle " ($text)\n";
617 }
618 } elsif (!$found_something) {
619
620 if ($self->{'verbosity'} > 2) {
621 # may get to here if document contained no valid Section
622 # tags but did contain some comments. The text will have
623 # been processed already but we should print the warning
624 # as above and extract metadata
[23371]625 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags and\n";
[14665]626 print $outhandle " is blank or empty. Metadata will be assigned if present.\n";
627 }
628
629 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
630 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
631 unless $self->{'no_metadata'};
632 }
[31415]633 $self->replace_section_links($doc_obj);
[14665]634 } else {
635
636 # remove header and footer
637 if (!$self->{'keep_head'} || $self->{'description_tags'}) {
638 $$textref =~ s/^.*?<body[^>]*>//is;
639 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
640 }
641
[25555]642 $self->{'css_assoc_files'} = {};
643
[14665]644 # single section document
[23371]645 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[25555]646
647 #my $upgraded_filename_dirname = &File::Basename::dirname($upgraded_filename);
648
649 $self->acquire_css_associated_files($doc_obj, $cursection);
650
651 $self->{'css_assoc_files'} = {};
[14665]652 }
[23335]653
[14665]654 return 1;
655}
656
657
658sub process_heading
659{
660 my ($self, $nHeadNo, $strHeadingText, $arrSections, $file) = @_;
661 $strHeadingText = '' if (!defined($strHeadingText));
662
663 my $strMetadata = $self->update_section_data($arrSections, int($nHeadNo));
664
665 my $strSecMetadata = '';
666 while ($strHeadingText =~ s/<!--gsdl-metadata(.*?)-->//is)
667 {
668 $strSecMetadata .= $1;
669 }
670
671 $strHeadingText =~ s/^\s+//g;
672 $strHeadingText =~ s/\s+$//g;
673 $strSecMetadata =~ s/^\s+//g;
674 $strSecMetadata =~ s/\s+$//g;
675
676 $strMetadata .= "\n<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">" . $strHeadingText . "</Metadata>\n";
677
678 if (length($strSecMetadata)) {
679 $strMetadata .= "\t\t" . $strSecMetadata . "\n";
680 }
681
682 $strMetadata .= "\t</Description>\n";
683
684 return "<!--" . $strMetadata . "-->";
685}
686
687
688sub update_section_data
689{
690 my ($self, $arrSections, $nCurTocNo) = @_;
691 my ($strBuffer, $nLast, $nSections) = ('', 0, scalar(@$arrSections));
692
693 if ($nSections == 0) {
694 push @$arrSections, $nCurTocNo;
695 return $strBuffer;
696 }
697 $nLast = $arrSections->[$nSections - 1];
698 if ($nCurTocNo > $nLast) {
699 push @$arrSections, $nCurTocNo;
700 return $strBuffer;
701 }
702 for(my $i = $nSections - 1; $i >= 0; $i--) {
703 if ($nCurTocNo <= $arrSections->[$i]) {
704 $strBuffer .= "\n</Section>";
705 pop @$arrSections;
706 }
707 }
708 push @$arrSections, $nCurTocNo;
709 return $strBuffer;
710}
711
712
713# note that process_section may be called multiple times for a single
714# section (relying on the fact that add_utf8_text appends the text to any
715# that may exist already).
716sub process_section {
717 my $self = shift (@_);
718 my ($textref, $base_dir, $file, $doc_obj, $cursection) = @_;
[25555]719
720 my @styleTagsText = ($$textref =~ m/<style[^>]*>([^<]*)<\/style>/sg);
721 if(scalar(@styleTagsText) > 0)
722 {
[27306]723 my $css_filename_dirname = &File::Basename::dirname(&FileUtils::filenameConcatenate($base_dir, $file));
[25555]724 foreach my $styleText (@styleTagsText)
725 {
726 $self->acquire_css_associated_files_from_text_block($styleText, $css_filename_dirname);
727 }
728 }
729
[14665]730 # trap links
731 if (!$self->{'nolinks'}) {
732 # usemap="./#index" not handled correctly => change to "#index"
[16769]733## $$textref =~ s/(<img[^>]*?usemap\s*=\s*[\"\']?)([^\"\'>\s]+)([\"\']?[^>]*>)/
734
[23392]735## my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
736## my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
[23387]737
[16769]738 $$textref =~ s/(<img[^>]*?usemap\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
[14665]739 $self->replace_usemap_links($1, $2, $3)/isge;
740
[23463]741 $$textref =~ s/(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
742 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[23387]743
[23392]744## $$textref =~ s/($opencom.*?)?+(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)(.*?$closecom)?+/
745# $self->replace_href_links ($1, $2, $3, $4, $5, $base_dir, $file, $doc_obj, $cursection)/isge;
[14665]746 }
747
748 # trap images
749
[15872]750 # Previously, by default, HTMLPlugin would embed <img> tags inside anchor tags
[15176]751 # i.e. <a href="image><img src="image"></a> in order to overcome a problem that
752 # turned regular text succeeding images into links. That is, by embedding <imgs>
753 # inside <a href=""></a>, the text following images were no longer misbehaving.
754 # However, there would be many occasions whereby images were not meant to link
755 # to their source images but where the images would link to another web page.
756 # To allow this, the no_image_links option was introduced: it would prevent
757 # the behaviour of embedding images into links that referenced the source images.
758
759 # Somewhere along the line, the problem of normal text turning into links when
760 # such text followed images which were not embedded in <a href=""></a> ceased
761 # to occur. This is why the following lines have been commented out (as well as
762 # two lines in replace_images). They appear to no longer apply.
763
764 # If at any time, there is a need for having images embedded in <a> anchor tags,
[15872]765 # then it might be better to turn that into an HTMLPlugin option rather than make
[15176]766 # it the default behaviour. Also, eventually, no_image_links needs to become
[15872]767 # a deprecated option for HTMLPlugin as it has now become the default behaviour.
[15176]768
769 #if(!$self->{'no_image_links'}){
[16247]770 $$textref =~ s/(<(?:img|embed|table|tr|td)[^>]*?(?:src|background)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
[15872]771 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[15176]772 #}
773
[14665]774 # add text to document object
775 # turn \ into \\ so that the rest of greenstone doesn't think there
776 # is an escape code following. (Macro parsing loses them...)
777 $$textref =~ s/\\/\\\\/go;
778
779 $doc_obj->add_utf8_text($cursection, $$textref);
780}
781
782sub replace_images {
783 my $self = shift (@_);
784 my ($front, $link, $back, $base_dir,
785 $file, $doc_obj, $section) = @_;
786
787 # remove quotes from link at start and end if necessary
788 if ($link=~/^[\"\']/) {
[15838]789 $link=~s/^[\"\']//;
790 $link=~s/[\"\']$//;
[14665]791 $front.='"';
792 $back="\"$back";
793 }
[15872]794
[14665]795 $link =~ s/\n/ /g;
796
797 # Hack to overcome Windows wv 0.7.1 bug that causes embedded images to be broken
798 # If the Word file path has spaces in it, wv messes up and you end up with
799 # absolute paths for the images, and without the "file://" prefix
800 # So check for this special case and massage the data to be correct
[16769]801 if ($ENV{'GSDLOS'} =~ m/^windows/i && $self->{'plugin_type'} eq "WordPlug" && $link =~ m/^[A-Za-z]\:\\/) {
[14665]802 $link =~ s/^.*\\([^\\]+)$/$1/;
803 }
[16632]804
[14665]805 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
806
807 my $img_file = $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section);
808
[27703]809# print STDERR "**** link = $link\n**** href = $href\n**** img_file = $img_file, rl = $rl\n\n";
[16632]810
[14665]811 my $anchor_name = $img_file;
812 #$anchor_name =~ s/^.*\///;
813 #$anchor_name = "<a name=\"$anchor_name\" ></a>";
814
815 my $image_link = $front . $img_file .$back;
[15176]816 return $image_link;
[14665]817
[15176]818 # The reasons for why the following two lines are no longer necessary can be
819 # found in subroutine process_section
820 #my $anchor_link = "<a href=\"$img_file\" >".$image_link."</a>";
821 #return $anchor_link;
822
[14665]823 #return $front . $img_file . $back . $anchor_name;
824}
825
826sub replace_href_links {
827 my $self = shift (@_);
[23392]828 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
[25555]829
830 if($front =~ m/^<link / && $link =~ m/\.css"$/)
831 {
832 my $actual_link = $link;
833 $actual_link =~ s/^"(.*)"$/$1/;
834
835 my $directory = &File::Basename::dirname($file);
836
[27306]837 my $css_filename = &FileUtils::filenameConcatenate($base_dir, $directory, $actual_link);
[25555]838 $self->retrieve_css_associated_files($css_filename);
839 }
840
[16769]841 # remove quotes from link at start and end if necessary
842 if ($link=~/^[\"\']/) {
843 $link=~s/^[\"\']//;
844 $link=~s/[\"\']$//;
845 $front.='"';
846 $back="\"$back";
847 }
848
[14665]849 # attempt to sort out targets - frames are not handled
850 # well in this plugin and some cases will screw things
851 # up - e.g. the _parent target (so we'll just remove
852 # them all ;-)
853 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
854 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
855 $front =~ s/target=\"?_parent\"?//is;
856 $back =~ s/target=\"?_parent\"?//is;
857
[25673]858 if($link =~ m/^\#/s)
859 {
860 return $front . "_httpsamepagelink_" . $link . $back;
861 }
862
[14665]863 $link =~ s/\n/ /g;
864
[16769]865 # Find file referred to by $link on file system
866 # This is more complicated than it sounds when char encodings
867 # is taken in to account
[14665]868 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
[23835]869
[14665]870 # href may use '\'s where '/'s should be on Windows
871 $href =~ s/\\/\//g;
[16769]872 my ($filename) = $href =~ m/^(?:.*?):(?:\/\/)?(.*)/;
[14665]873
874 ##### leave all these links alone (they won't be picked up by intermediate
875 ##### pages). I think that's safest when dealing with frames, targets etc.
876 ##### (at least until I think of a better way to do it). Problems occur with
877 ##### mailto links from within small frames, the intermediate page is displayed
878 ##### within that frame and can't be seen. There is still potential for this to
879 ##### happen even with html pages - the solution seems to be to somehow tell
880 ##### the browser from the server side to display the page being sent (i.e.
881 ##### the intermediate page) in the top level window - I'm not sure if that's
882 ##### possible - the following line should probably be deleted if that can be done
[16769]883 return $front . $link . $back if $href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/is;
[14665]884
[16769]885 if (($rl == 0) || ($filename =~ m/$self->{'process_exp'}/) ||
886 ($href =~ m/\/$/) || ($href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
[23335]887
[23371]888 if ($ENV{'GSDLOS'} =~ m/^windows$/) {
[23335]889
[23371]890 # Don't do any encoding for now, as not clear what
891 # the right thing to do is to support filename
892 # encoding on Windows when they are not UTF16
893 #
[23347]894 }
[23371]895 else {
896 # => Unix-based system
[23335]897
[23371]898 # If web page didn't give encoding, then default to utf8
899 my $content_encoding= $self->{'content_encoding'} || "utf8";
[23835]900
[23371]901 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
902 print STDERR "**** Encoding with '$content_encoding', href: $href\n";
903 }
[23335]904
[23835]905 # on Darwin, the unicode filenames are stored on the file
906 # system in decomposed form, so any href link (including when
907 # URL-encoded) should refer to the decomposed name of the file
908 if ($ENV{'GSDLOS'} =~ /^darwin$/i) {
909 $href = normalize('D', $href); # Normalization Form D (decomposition)
910 }
911
[23371]912 $href = encode($content_encoding,$href);
913 }
914
[23835]915 $href = &unicode::raw_filename_to_utf8_url_encoded($href);
[23335]916 $href = &unicode::filename_to_url($href);
917
[16812]918 &ghtml::urlsafe ($href);
[23371]919
[23347]920 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]921 print STDERR "******DEBUG: href=$href\n";
[23347]922 }
[23335]923
[23347]924
[18521]925 return $front . "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part . $back;
[14665]926 } else {
[23335]927 # link is to some other type of file (e.g., an image) so we'll
[14665]928 # need to associate that file
929 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
930 }
931}
932
[25555]933sub retrieve_css_associated_files {
934 my $self = shift (@_);
935 my ($css_filename) = @_;
936
937 my $css_filename_dirname = &File::Basename::dirname($css_filename);
938
939 open (CSSFILE, $css_filename) || return;
940 sysread (CSSFILE, my $file_string, -s CSSFILE);
941
942 $self->acquire_css_associated_files_from_text_block($file_string, $css_filename_dirname) unless !defined $file_string;
943
944 close CSSFILE;
945}
946
947sub acquire_css_associated_files_from_text_block {
948 my $self = shift (@_);
949 my ($text, $css_filename_dirname) = @_;
950
951 my @image_urls = ($text =~ m/background-image:\s*url[^;]*;/sg);
952 foreach my $img_url (@image_urls)
953 {
954 $img_url =~ s/^.*url.*\((.*)\).*$/$1/;
955 $img_url =~ s/^\s*"?([^"]*)"?\s*$/$1/;
956
[27306]957 $self->{'css_assoc_files'}->{&FileUtils::filenameConcatenate($css_filename_dirname, $img_url)} = $img_url;
[25555]958 }
959}
960
961sub acquire_css_associated_files {
962 my $self = shift(@_);
963
964 my ($doc_obj, $section) = @_;
965
966 foreach my $image_filename (keys %{$self->{'css_assoc_files'}})
967 {
968 $doc_obj->associate_file($image_filename, $self->{'css_assoc_files'}->{$image_filename}, undef, $section);
969 }
970}
971
[14665]972sub add_file {
973 my $self = shift (@_);
974 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
975 my ($newname);
976
977 my $filename = $href;
978 if ($base_dir eq "") {
[23387]979 if ($ENV{'GSDLOS'} =~ m/^windows$/i) {
980 # remove http://
981 $filename =~ s/^[^:]*:\/\///;
982 }
983 else {
984 # remove http:/ thereby leaving one slash at the start as
985 # part of full pathname
986 $filename =~ s/^[^:]*:\///;
987 }
[14665]988 }
989 else {
990 # remove http://
991 $filename =~ s/^[^:]*:\/\///;
992 }
[27703]993
994 if ($ENV{'GSDLOS'} =~ m/^windows$/i) {
995 $filename =~ s@\/@\\@g;
996 }
997
[27306]998 $filename = &FileUtils::filenameConcatenate($base_dir, $filename);
[23363]999
[22355]1000 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'})) {
1001 # we are processing a tidytmp file - want paths to be in import
1002 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
1003 }
[23335]1004
1005 # Replace %XX's in URL with decoded value if required. Note that the
1006 # filename may include the %XX in some situations. If the *original*
1007 # file's name was in URL encoding, the following method will not decode
1008 # it.
[23387]1009 my $unicode_filename = $filename;
1010 my $opt_decode_unicode_filename = $self->opt_url_decode($unicode_filename);
[16769]1011
[23387]1012 # wvWare can generate <img src="StrangeNoGraphicData"> tags, but with no
1013 # (it seems) accompanying file
1014 if ($opt_decode_unicode_filename =~ m/StrangeNoGraphicData$/) { return ""; }
1015
[23335]1016 my $content_encoding= $self->{'content_encoding'} || "utf8";
1017
[23387]1018 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
1019 # The filenames that come through the HTML file have been decoded
1020 # into Unicode aware Perl strings. Need to convert them back
1021 # to their initial raw-byte encoding to match the file that
1022 # exists on the file system
1023 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1024 }
1025 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
1026 # HFS+ is UTF8 with decompostion
1027 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1028 $filename = normalize('D', $filename); # Normalization Form D (decomposition)
[23335]1029
[23387]1030 }
1031 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
1032 my $long_filename = Win32::GetLongPathName($opt_decode_unicode_filename);
1033
1034 if (defined $long_filename) {
1035 my $short_filename = Win32::GetLongPathName($long_filename);
1036 $filename = $short_filename;
1037 }
1038# else {
1039# print STDERR "***** failed to map href to real file:\n";
1040# print STDERR "****** $href -> $opt_decode_unicode_filename\n";
1041# }
1042 }
1043 else {
1044 my $outhandle = $self->{'outhandle'};
1045 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
1046 print $outhandle " in file system encoding of href: $href\n";
1047 print $outhandle " No character encoding done.\n";
1048 }
1049
1050
[16769]1051 # some special processing if the intended filename was converted to utf8, but
1052 # the actual file still needs to be renamed
[27306]1053 #if (!&util::fd_exists($filename)) {
1054 if (!&FileUtils::fileExists($filename)) {
[16769]1055 # try the original filename stored in map
[23347]1056 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23363]1057 print STDERR "******!! orig filename did not exist: $filename\n";
[23347]1058 }
[23335]1059
[23387]1060## print STDERR "**** trying to look up unicode_filename: $unicode_filename\n";
[23363]1061
[23387]1062 my $original_filename = $self->{'unicode_to_original_filename'}->{$unicode_filename};
[23335]1063
[23347]1064 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]1065 print STDERR "****** From lookup unicode_filename, now trying for: $original_filename\n";
[23347]1066 }
[23335]1067
[16920]1068 if (defined $original_filename && -e $original_filename) {
[23347]1069 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23363]1070 print STDERR "****** Found match!\n";
[23347]1071 }
[16769]1072 $filename = $original_filename;
[14665]1073 }
1074 }
[16769]1075
1076 my ($ext) = $filename =~ m/(\.[^\.]*)$/;
[14665]1077
1078 if ($rl == 0) {
[16769]1079 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
[18521]1080 return "_httpextlink_&amp;rl=0&amp;el=prompt&amp;href=" . $href . $hash_part;
[14665]1081 }
1082 else {
[18521]1083 return "_httpextlink_&amp;rl=0&amp;el=direct&amp;href=" . $href . $hash_part;
[14665]1084 }
1085 }
1086
[16769]1087 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
[18521]1088 return "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part;
[14665]1089 }
[20778]1090 # add the original image file as a source file
[20791]1091 if (!$self->{'processing_tmp_files'} ) {
1092 $doc_obj->associate_source_file($filename);
1093 }
[14665]1094 if ($self->{'rename_assoc_files'}) {
1095 if (defined $self->{'aux_files'}->{$href}) {
1096 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
1097 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
1098 } else {
1099 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
1100 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
1101 $self->inc_filecount ();
1102 }
1103 $doc_obj->associate_file($filename, $newname, undef, $section);
1104 return "_httpdocimg_/$newname";
1105 } else {
[23387]1106 if(&unicode::is_url_encoded($unicode_filename)) {
[16904]1107 # use the possibly-decoded filename instead to avoid double URL encoding
1108 ($newname) = $filename =~ m/([^\/\\]*)$/;
1109 } else {
[23387]1110 ($newname) = $unicode_filename =~ m/([^\/\\]*)$/;
[16904]1111 }
[16935]1112
[18320]1113 # Make sure this name uses only ASCII characters.
1114 # We use either base64 or URL encoding, as these preserve original encoding
1115 $newname = &util::rename_file($newname, $self->{'file_rename_method'});
[16632]1116
[23363]1117### print STDERR "***** associating $filename (raw-byte/utf8)-> $newname\n";
[14665]1118 $doc_obj->associate_file($filename, $newname, undef, $section);
[16632]1119
[16769]1120 # Since the generated image will be URL-encoded to avoid file-system/browser mess-ups
1121 # of filenames, URL-encode the additional percent signs of the URL-encoded filename
[16632]1122 my $newname_url = $newname;
[18404]1123 $newname_url = &unicode::filename_to_url($newname_url);
[16769]1124 return "_httpdocimg_/$newname_url";
[14665]1125 }
1126}
1127
[31415]1128sub replace_section_links {
1129 my $self = shift(@_);
1130 my ($doc_obj) = @_;
1131 my %anchors;
1132 my $top_section = $doc_obj->get_top_section();
1133 my $thissection = $doc_obj->get_next_section($top_section);
1134 while ( defined $thissection ) {
1135 my $text = $doc_obj->get_text($thissection);
1136 while ( $text =~ /(?:(?:id|name)\s*=\s*[\'\"])([^\'\"]+)/gi ) {
1137 $anchors{$1} = $thissection;
1138 }
1139 $thissection = $doc_obj->get_next_section($thissection);
1140 }
1141 $thissection = $top_section;
1142 while (defined $thissection) {
1143 my $text = $doc_obj->get_text($thissection);
1144 $text =~ s/(href\s*=\s*[\"\'])(_httpsamepagelink_#)([^\'\"]+)/$self->replace_link_to_anchor($1,$2,$3,$thissection,$anchors{$3})/ige;
1145 $doc_obj->delete_text( $thissection);
1146 $doc_obj->add_utf8_text( $thissection, $text );
1147 $thissection = $doc_obj->get_next_section ($thissection);
1148 }
1149}
1150sub replace_link_to_anchor {
1151 my $self = shift(@_);
1152 my ($href_part,$old_link,$identifier,$current_section,$target_section) = @_;
1153 if (length $target_section && $current_section ne $target_section){
1154 return $href_part . "javascript:goToAnchor(\'" . $target_section . "\',\'" . $identifier . "\');" ;
1155 }
1156 return $href_part . $old_link . $identifier ;
1157}
[14665]1158
1159sub format_link {
1160 my $self = shift (@_);
1161 my ($link, $base_dir, $file) = @_;
1162
[23371]1163 # strip off hash part, e.g. #foo, but watch out for any entities, e.g. &#x3B1;
1164 my ($before_hash, $hash_part) = $link =~ m/^(.*?[^&])(\#.*)?$/;
[23463]1165
[14665]1166 $hash_part = "" if !defined $hash_part;
[16769]1167 if (!defined $before_hash || $before_hash !~ m/[\w\.\/]/) {
[23463]1168 my $outhandle = $self->{'outhandle'};
1169 print $outhandle "HTMLPlugin: ERROR - badly formatted tag ignored ($link)\n"
1170 if $self->{'verbosity'};
1171 return ($link, "", 0);
[14665]1172 }
[23463]1173
[20576]1174 if ($before_hash =~ s@^((?:http|https|ftp|file|mms)://)@@i) {
[23463]1175 my $type = $1;
[27703]1176 my $before_hash_file = $before_hash;
1177
[16769]1178 if ($link =~ m/^(http|ftp):/i) {
[27703]1179
[14665]1180 # Turn url (using /) into file name (possibly using \ on windows)
[27703]1181 my @http_dir_split = split('/', $before_hash_file);
1182 $before_hash_file = &FileUtils::filenameConcatenate(@http_dir_split);
[14665]1183 }
[16024]1184
[27703]1185 # want to maintain two version of "before_hash": one representing the URL, the other using filesystem specific directory separator
1186 $before_hash_file = $self->eval_dir_dots($before_hash_file);
1187 my $before_hash_url = $before_hash_file;
1188 if ($ENV{'GSDLOS'} =~ /^windows$/i) {
1189 $before_hash_url =~ s@\\@\/@g;
1190 }
[14665]1191
[27703]1192 my $linkfilename = &FileUtils::filenameConcatenate($base_dir, $before_hash_file);
1193
[14665]1194 my $rl = 0;
1195 $rl = 1 if (-e $linkfilename);
1196
1197 # make sure there's a slash on the end if it's a directory
[27703]1198 if ($before_hash_url !~ m/\/$/) {
1199 $before_hash_url .= "/" if (-d $linkfilename);
[14665]1200 }
[27703]1201 return ($type . $before_hash_url, $hash_part, $rl);
[16024]1202
[16769]1203 } elsif ($link !~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i && $link !~ m/^\//) {
[14665]1204
[16769]1205 if ($before_hash =~ s@^/@@ || $before_hash =~ m/\\/) {
1206
[14665]1207 # the first directory will be the domain name if file_is_url
1208 # to generate archives, otherwise we'll assume all files are
1209 # from the same site and base_dir is the root
1210
1211 if ($self->{'file_is_url'}) {
1212 my @dirs = split /[\/\\]/, $file;
1213 my $domname = shift (@dirs);
[27306]1214 $before_hash = &FileUtils::filenameConcatenate($domname, $before_hash);
[14665]1215 $before_hash =~ s@\\@/@g; # for windows
1216 }
1217 else
1218 {
1219 # see if link shares directory with source document
1220 # => turn into relative link if this is so!
1221
[16769]1222 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
[14665]1223 # too difficult doing a pattern match with embedded '\'s...
1224 my $win_before_hash=$before_hash;
1225 $win_before_hash =~ s@(\\)+@/@g;
1226 # $base_dir is already similarly "converted" on windows.
1227 if ($win_before_hash =~ s@^$base_dir/@@o) {
[16024]1228 # if this is true, we removed a prefix
1229 $before_hash=$win_before_hash;
[14665]1230 }
1231 }
1232 else {
1233 # before_hash has lost leading slash by this point,
1234 # -> add back in prior to substitution with $base_dir
1235 $before_hash = "/$before_hash";
1236
[27306]1237 $before_hash = &FileUtils::filenameConcatenate("",$before_hash);
[14665]1238 $before_hash =~ s@^$base_dir/@@;
1239 }
1240 }
1241 } else {
1242 # Turn relative file path into full path
1243 my $dirname = &File::Basename::dirname($file);
[27306]1244 $before_hash = &FileUtils::filenameConcatenate($dirname, $before_hash);
[27703]1245 $before_hash = $self->eval_dir_dots($before_hash);
1246 $before_hash =~ s@\\@/@g; # for windows
[14665]1247 }
1248
[27306]1249 my $linkfilename = &FileUtils::filenameConcatenate($base_dir, $before_hash);
[23387]1250
1251
1252# print STDERR "**** linkfilename = $linkfilename\n";
1253# if (!&util::fd_exists($linkfilename)) {
1254# print STDERR "***** Warning: Could not find $linkfilename\n";
1255# }
1256
1257
[14665]1258 # make sure there's a slash on the end if it's a directory
[16769]1259 if ($before_hash !~ m/\/$/) {
[14665]1260 $before_hash .= "/" if (-d $linkfilename);
1261 }
[23387]1262
1263# print STDERR "*** returning: $before_hash\n";
1264
[14665]1265 return ("http://" . $before_hash, $hash_part, 1);
1266 } else {
1267 # mailto, news, nntp, telnet, javascript or gopher link
1268 return ($before_hash, "", 0);
1269 }
1270}
1271
1272sub extract_first_NNNN_characters {
1273 my $self = shift (@_);
1274 my ($textref, $doc_obj, $thissection) = @_;
1275
1276 foreach my $size (split /,/, $self->{'first'}) {
1277 my $tmptext = $$textref;
1278 # skip to the body
1279 $tmptext =~ s/.*<body[^>]*>//i;
1280 # remove javascript
1281 $tmptext =~ s@<script.*?</script>@ @sig;
1282 $tmptext =~ s/<[^>]*>/ /g;
1283 $tmptext =~ s/&nbsp;/ /g;
1284 $tmptext =~ s/^\s+//;
1285 $tmptext =~ s/\s+$//;
1286 $tmptext =~ s/\s+/ /gs;
1287 $tmptext = &unicode::substr ($tmptext, 0, $size);
1288 $tmptext =~ s/\s\S*$/&#8230;/; # adds an ellipse (...)
1289 $doc_obj->add_utf8_metadata ($thissection, "First$size", $tmptext);
1290 }
1291}
1292
1293
1294sub extract_metadata {
1295 my $self = shift (@_);
1296 my ($textref, $metadata, $doc_obj, $section) = @_;
1297 my $outhandle = $self->{'outhandle'};
1298 # if we don't want metadata, we may as well not be here ...
1299 return if (!defined $self->{'metadata_fields'});
[22348]1300
[21800]1301 my $separator = $self->{'metadata_field_separator'};
1302 if ($separator eq "") {
1303 undef $separator;
1304 }
1305
[14665]1306 # metadata fields to extract/save. 'key' is the (lowercase) name of the
1307 # html meta, 'value' is the metadata name for greenstone to use
1308 my %find_fields = ();
1309
1310 my %creator_fields = (); # short-cut for lookups
1311
1312
1313 foreach my $field (split /,/, $self->{'metadata_fields'}) {
1314 $field =~ s/^\s+//; # remove leading whitespace
1315 $field =~ s/\s+$//; # remove trailing whitespace
[22348]1316
[14665]1317 # support tag<tagname>
[20689]1318 if ($field =~ m/^(.*?)\s*<(.*?)>$/) {
[14665]1319 # "$2" is the user's preferred gs metadata name
1320 $find_fields{lc($1)}=$2; # lc = lowercase
1321 } else { # no <tagname> for mapping
1322 # "$field" is the user's preferred gs metadata name
1323 $find_fields{lc($field)}=$field; # lc = lowercase
1324 }
1325 }
1326
1327 if (defined $self->{'hunt_creator_metadata'} &&
1328 $self->{'hunt_creator_metadata'} == 1 ) {
1329 my @extra_fields =
1330 (
1331 'author',
1332 'author.email',
1333 'creator',
1334 'dc.creator',
1335 'dc.creator.corporatename',
1336 );
1337
1338 # add the creator_metadata fields to search for
1339 foreach my $field (@extra_fields) {
1340 $creator_fields{$field}=0; # add to lookup hash
1341 }
1342 }
1343
1344
1345 # find the header in the html file, which has the meta tags
1346 $$textref =~ m@<head>(.*?)</head>@si;
1347
1348 my $html_header=$1;
1349
1350 # go through every <meta... tag defined in the html and see if it is
1351 # one of the tags we want to match.
1352
1353 # special case for title - we want to remember if its been found
1354 my $found_title = 0;
1355 # this assumes that ">" won't appear. (I don't think it's allowed to...)
[16769]1356 $html_header =~ m/^/; # match the start of the string, for \G assertion
[16024]1357
[14665]1358 while ($html_header =~ m/\G.*?<meta(.*?)>/sig) {
1359 my $metatag=$1;
1360 my ($tag, $value);
1361
1362 # find the tag name
[16769]1363 $metatag =~ m/(?:name|http-equiv)\s*=\s*([\"\'])?(.*?)\1/is;
[14665]1364 $tag=$2;
1365 # in case they're not using " or ', but they should...
1366 if (! $tag) {
[16769]1367 $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
[14665]1368 $tag=$1;
1369 }
1370
1371 if (!defined $tag) {
[15872]1372 print $outhandle "HTMLPlugin: can't find NAME in \"$metatag\"\n";
[14665]1373 next;
1374 }
1375
1376 # don't need to assign this field if it was passed in from a previous
1377 # (recursive) plugin
1378 if (defined $metadata->{$tag}) {next}
1379
1380 # find the tag content
[16769]1381 $metatag =~ m/content\s*=\s*([\"\'])?(.*?)\1/is;
[14665]1382 $value=$2;
1383
[24431]1384 # The following code assigns the metaname to value if value is
1385 # empty. Why would we do this?
1386 #if (! $value) {
1387 # $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
1388 # $value=$1;
1389 #}
1390 if (!defined $value || $value eq "") {
1391 print $outhandle "HTMLPlugin: can't find VALUE in <meta $metatag >\n" if ($self->{'verbosity'} > 2);
[14665]1392 next;
1393 }
[22348]1394
[14665]1395 # clean up and add
1396 $value =~ s/\s+/ /gs;
1397 chomp($value); # remove trailing \n, if any
1398 if (exists $creator_fields{lc($tag)}) {
1399 # map this value onto greenstone's "Creator" metadata
1400 $tag='Creator';
1401 } elsif (!exists $find_fields{lc($tag)}) {
[16024]1402 next; # don't want this tag
[14665]1403 } else {
1404 # get the user's preferred capitalisation
1405 $tag = $find_fields{lc($tag)};
1406 }
1407 if (lc($tag) eq "title") {
1408 $found_title = 1;
1409 }
[18521]1410
1411 if ($self->{'verbosity'} > 2) {
1412 print $outhandle " extracted \"$tag\" metadata \"$value\"\n";
[14665]1413 }
[18521]1414
[22348]1415 if ($tag =~ /\./) {
1416 # there is a . so has a namespace, add ex.
1417 $tag = "ex.$tag";
1418 }
[21800]1419 if (defined $separator) {
1420 my @values = split($separator, $value);
1421 foreach my $v (@values) {
1422 $doc_obj->add_utf8_metadata($section, $tag, $v) if $v =~ /\S/;
1423 }
1424 }
1425 else {
1426 $doc_obj->add_utf8_metadata($section, $tag, $value);
1427 }
[14665]1428 }
1429
1430 # TITLE: extract the document title
1431 if (exists $find_fields{'title'} && !$found_title) {
1432 # we want a title, and didn't find one in the meta tags
1433 # see if there's a <title> tag
1434 my $title;
1435 my $from = ""; # for debugging output only
[16769]1436 if ($html_header =~ m/<title[^>]*>([^<]+)<\/title[^>]*>/is) {
[14665]1437 $title = $1;
1438 $from = "<title> tags";
1439 }
1440
1441 if (!defined $title) {
1442 $from = "first 100 chars";
1443 # if no title use first 100 or so characters
1444 $title = $$textref;
1445 $title =~ s/^\xFE\xFF//; # Remove unicode byte order mark
1446 $title =~ s/^.*?<body>//si;
1447 # ignore javascript!
1448 $title =~ s@<script.*?</script>@ @sig;
1449 $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
1450 $title =~ s/<[^>]*>/ /g; # remove all HTML tags
[27742]1451 $title =~ s@\r@@g; # remove Windows carriage returns to ensure that titles of pdftohtml docs are consistent (the same 100 chars) across windows and linux
[14665]1452 $title = substr ($title, 0, 100);
1453 $title =~ s/\s\S*$/.../;
1454 }
1455 $title =~ s/<[^>]*>/ /g; # remove html tags
1456 $title =~ s/&nbsp;/ /g;
1457 $title =~ s/(?:&nbsp;|\xc2\xa0)/ /g; # utf-8 for nbsp...
1458 $title =~ s/\s+/ /gs; # collapse multiple spaces
1459 $title =~ s/^\s*//; # remove leading spaces
1460 $title =~ s/\s*$//; # remove trailing spaces
1461
1462 $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
1463 $title =~ s/^\s+//s; # in case title_sub introduced any...
[23335]1464 $doc_obj->add_utf8_metadata ($section, "Title", $title);
[14665]1465 print $outhandle " extracted Title metadata \"$title\" from $from\n"
1466 if ($self->{'verbosity'} > 2);
1467 }
1468
1469 # add FileFormat metadata
1470 $doc_obj->add_metadata($section,"FileFormat", "HTML");
1471
1472 # Special, for metadata names such as tagH1 - extracts
1473 # the text between the first <H1> and </H1> tags into "H1" metadata.
1474
1475 foreach my $field (keys %find_fields) {
[16769]1476 if ($field !~ m/^tag([a-z0-9]+)$/i) {next}
[14665]1477 my $tag = $1;
1478 if ($$textref =~ m@<$tag[^>]*>(.*?)</$tag[^>]*>@g) {
1479 my $content = $1;
1480 $content =~ s/&nbsp;/ /g;
1481 $content =~ s/<[^>]*>/ /g;
1482 $content =~ s/^\s+//;
1483 $content =~ s/\s+$//;
1484 $content =~ s/\s+/ /gs;
1485 if ($content) {
1486 $tag=$find_fields{"tag$tag"}; # get the user's capitalisation
1487 $tag =~ s/^tag//i;
1488 $doc_obj->add_utf8_metadata ($section, $tag, $content);
1489 print $outhandle " extracted \"$tag\" metadata \"$content\"\n"
1490 if ($self->{'verbosity'} > 2);
1491 }
1492 }
1493 }
1494}
1495
1496
1497# evaluate any "../" to next directory up
1498# evaluate any "./" as here
1499sub eval_dir_dots {
1500 my $self = shift (@_);
1501 my ($filename) = @_;
1502 my $dirsep_os = &util::get_os_dirsep();
1503 my @dirsep = split(/$dirsep_os/,$filename);
1504
1505 my @eval_dirs = ();
1506 foreach my $d (@dirsep) {
1507 if ($d eq "..") {
1508 pop(@eval_dirs);
1509
1510 } elsif ($d eq ".") {
1511 # do nothing!
1512
1513 } else {
1514 push(@eval_dirs,$d);
1515 }
1516 }
1517
1518 # Need to fiddle with number of elements in @eval_dirs if the
1519 # first one is the empty string. This is because of a
[27306]1520 # modification to FileUtils::filenameConcatenate that supresses the addition
[14665]1521 # of a leading '/' character (or \ if windows) (intended to help
1522 # filename cat with relative paths) if the first entry in the
1523 # array is the empty string. Making the array start with *two*
1524 # empty strings is a way to defeat this "smart" option.
1525 #
1526 if (scalar(@eval_dirs) > 0) {
1527 if ($eval_dirs[0] eq ""){
1528 unshift(@eval_dirs,"");
1529 }
1530 }
[16836]1531
[27306]1532 my $evaluated_filename = (scalar @eval_dirs > 0) ? &FileUtils::filenameConcatenate(@eval_dirs) : "";
[16836]1533 return $evaluated_filename;
[14665]1534}
1535
1536sub replace_usemap_links {
1537 my $self = shift (@_);
1538 my ($front, $link, $back) = @_;
1539
[16769]1540 # remove quotes from link at start and end if necessary
1541 if ($link=~/^[\"\']/) {
1542 $link=~s/^[\"\']//;
1543 $link=~s/[\"\']$//;
1544 $front.='"';
1545 $back="\"$back";
1546 }
1547
[14665]1548 $link =~ s/^\.\///;
1549 return $front . $link . $back;
1550}
1551
1552sub inc_filecount {
1553 my $self = shift (@_);
1554
1555 if ($self->{'file_num'} == 1000) {
1556 $self->{'dir_num'} ++;
1557 $self->{'file_num'} = 0;
1558 } else {
1559 $self->{'file_num'} ++;
1560 }
1561}
1562
1563
[15872]1564# Extend read_file so that strings like &eacute; are
[14665]1565# converted to UTF8 internally.
1566#
1567# We don't convert &lt; or &gt; or &amp; or &quot; in case
1568# they interfere with the GML files
1569
1570sub read_file {
[15872]1571 my $self = shift(@_);
1572 my ($filename, $encoding, $language, $textref) = @_;
[14665]1573
[15872]1574 $self->SUPER::read_file($filename, $encoding, $language, $textref);
[14665]1575
[23363]1576 # Convert entities to their Unicode code-point equivalents
[14665]1577 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
[22951]1578 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,1)/gseo;
[14665]1579 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
[22842]1580
[14665]1581}
1582
[20774]1583sub HB_read_html_file {
1584 my $self = shift (@_);
1585 my ($htmlfile, $text) = @_;
1586
1587 # load in the file
1588 if (!open (FILE, $htmlfile)) {
1589 print STDERR "ERROR - could not open $htmlfile\n";
1590 return;
1591 }
1592
1593 my $foundbody = 0;
1594 $self->HB_gettext (\$foundbody, $text, "FILE");
1595 close FILE;
1596
1597 # just in case there was no <body> tag
1598 if (!$foundbody) {
1599 $foundbody = 1;
1600 open (FILE, $htmlfile) || return;
1601 $self->HB_gettext (\$foundbody, $text, "FILE");
1602 close FILE;
1603 }
1604 # text is in utf8
1605}
1606
1607# converts the text to utf8, as ghtml does that for &eacute; etc.
1608sub HB_gettext {
1609 my $self = shift (@_);
1610 my ($foundbody, $text, $handle) = @_;
1611
1612 my $line = "";
1613 while (defined ($line = <$handle>)) {
1614 # look for body tag
1615 if (!$$foundbody) {
1616 if ($line =~ s/^.*<body[^>]*>//i) {
1617 $$foundbody = 1;
1618 } else {
1619 next;
1620 }
1621 }
1622
1623 # check for symbol fonts
1624 if ($line =~ m/<font [^>]*?face\s*=\s*\"?(\w+)\"?/i) {
1625 my $font = $1;
1626 print STDERR "HBPlug::HB_gettext - warning removed font $font\n"
1627 if ($font !~ m/^arial$/i);
1628 }
1629
1630 $$text .= $line;
1631 }
1632
1633 if ($self->{'input_encoding'} eq "iso_8859_1") {
1634 # convert to utf-8
1635 $$text=&unicode::unicode2utf8(&unicode::convert2unicode("iso_8859_1", $text));
1636 }
1637 # convert any alphanumeric character entities to their utf-8
1638 # equivalent for indexing purposes
1639 #&ghtml::convertcharentities ($$text);
1640
1641 $$text =~ s/\s+/ /g; # remove \n's
[22857]1642
1643 # At this point $$text is a binary byte string
1644 # => turn it into a Unicode aware string, so full
1645 # Unicode aware pattern matching can be used.
1646 # For instance: 's/\x{0101}//g' or '[[:upper:]]'
1647 #
1648
1649 $$text = decode("utf8",$$text);
[20774]1650}
1651
1652sub HB_clean_section {
1653 my $self = shift (@_);
1654 my ($section) = @_;
1655
1656 # remove tags without a starting tag from the section
1657 my ($tag, $tagstart);
1658 while ($section =~ m/<\/([^>]{1,10})>/) {
1659 $tag = $1;
1660 $tagstart = index($section, "<$tag");
1661 last if (($tagstart >= 0) && ($tagstart < index($section, "<\/$tag")));
1662 $section =~ s/<\/$tag>//;
1663 }
1664
1665 # remove extra paragraph tags
1666 while ($section =~ s/<p\b[^>]*>\s*<p\b/<p/ig) {}
1667
1668 # remove extra stuff at the end of the section
1669 while ($section =~ s/(<u>|<i>|<b>|<p\b[^>]*>|&nbsp;|\s)$//i) {}
1670
1671 # add a newline at the beginning of each paragraph
1672 $section =~ s/(.)\s*<p\b/$1\n\n<p/gi;
1673
1674 # add a newline every 80 characters at a word boundary
1675 # Note: this regular expression puts a line feed before
1676 # the last word in each section, even when it is not
1677 # needed.
1678 $section =~ s/(.{1,80})\s/$1\n/g;
1679
1680 # fix up the image links
1681 $section =~ s/<img[^>]*?src=\"?([^\">]+)\"?[^>]*>/
1682 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1683 $section =~ s/&lt;&lt;I&gt;&gt;\s*([^\.]+\.(png|jpg|gif))/
1684 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1685
1686 return $section;
1687}
1688
1689# Will convert the oldHDL format to the new HDL format (using the Section tag)
1690sub convert_to_newHDLformat
1691{
1692 my $self = shift (@_);
1693 my ($file,$cnfile) = @_;
1694 my $input_filename = $file;
1695 my $tmp_filename = $cnfile;
1696
1697 # write HTML tmp file with new HDL format
1698 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1699
1700 # read in the file and do basic html cleaning (removing header etc)
1701 my $html = "";
1702 $self->HB_read_html_file ($input_filename, \$html);
1703
1704 # process the file one section at a time
1705 my $curtoclevel = 1;
1706 my $firstsection = 1;
1707 my $toclevel = 0;
1708 while (length ($html) > 0) {
1709 if ($html =~ s/^.*?(?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC(\d+)&gt;&gt;\s*(.*?)<p\b/<p/i) {
1710 $toclevel = $3;
1711 my $title = $4;
1712 my $sectiontext = "";
1713 if ($html =~ s/^(.*?)((?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC\d+&gt;&gt;)/$2/i) {
1714 $sectiontext = $1;
1715 } else {
1716 $sectiontext = $html;
1717 $html = "";
1718 }
1719
1720 # remove tags and extra spaces from the title
1721 $title =~ s/<\/?[^>]+>//g;
1722 $title =~ s/^\s+|\s+$//g;
1723
1724 # close any sections below the current level and
1725 # create a new section (special case for the firstsection)
1726 print PROD "<!--\n";
1727 while (($curtoclevel > $toclevel) ||
1728 (!$firstsection && $curtoclevel == $toclevel)) {
1729 $curtoclevel--;
1730 print PROD "</Section>\n";
1731 }
1732 if ($curtoclevel+1 < $toclevel) {
1733 print STDERR "WARNING - jump in toc levels in $input_filename " .
1734 "from $curtoclevel to $toclevel\n";
1735 }
1736 while ($curtoclevel < $toclevel) {
1737 $curtoclevel++;
1738 }
1739
1740 if ($curtoclevel == 1) {
1741 # add the header tag
1742 print PROD "-->\n";
1743 print PROD "<HTML>\n<HEAD>\n<TITLE>$title</TITLE>\n</HEAD>\n<BODY>\n";
1744 print PROD "<!--\n";
1745 }
1746
1747 print PROD "<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">$title</Metadata>\n\t</Description>\n";
1748
1749 print PROD "-->\n";
1750
1751 # clean up the section html
1752 $sectiontext = $self->HB_clean_section($sectiontext);
1753
1754 print PROD "$sectiontext\n";
1755
1756 } else {
1757 print STDERR "WARNING - leftover text\n" , $self->shorten($html),
1758 "\nin $input_filename\n";
1759 last;
1760 }
1761 $firstsection = 0;
1762 }
1763
1764 print PROD "<!--\n";
1765 while ($curtoclevel > 0) {
1766 $curtoclevel--;
1767 print PROD "</Section>\n";
1768 }
1769 print PROD "-->\n";
1770
1771 close (PROD) || die("Error Closing File: $tmp_filename $!");
1772
1773 return $tmp_filename;
1774}
1775
1776sub shorten {
1777 my $self = shift (@_);
1778 my ($text) = @_;
1779
1780 return "\"$text\"" if (length($text) < 100);
1781
1782 return "\"" . substr ($text, 0, 50) . "\" ... \"" .
1783 substr ($text, length($text)-50) . "\"";
1784}
1785
1786sub convert_tidy_or_oldHDL_file
1787{
1788 my $self = shift (@_);
1789 my ($file) = @_;
1790 my $input_filename = $file;
1791
1792 if (-d $input_filename)
1793 {
1794 return $input_filename;
1795 }
1796
1797 # get the input filename
1798 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1799 my $base_dirname = $dirname;
1800 $suffix = lc($suffix);
1801
1802 # derive tmp filename from input filename
1803 # Remove any white space from filename -- no risk of name collision, and
1804 # makes later conversion by utils simpler. Leave spaces in path...
1805 # tidy up the filename with space, dot, hyphen between
1806 $tailname =~ s/\s+//g;
1807 $tailname =~ s/\.+//g;
1808 $tailname =~ s/\-+//g;
1809 # convert to utf-8 otherwise we have problems with the doc.xml file
1810 # later on
1811 &unicode::ensure_utf8(\$tailname);
1812
1813 # softlink to collection tmp dir
[27306]1814 my $tmp_dirname = &FileUtils::filenameConcatenate($ENV{'GSDLCOLLECTDIR'}, "tidytmp");
[28196]1815 &FileUtils::makeDirectory($tmp_dirname) if (!-e $tmp_dirname);
[20774]1816
1817 my $test_dirname = "";
1818 my $f_separator = &util::get_os_dirsep();
1819
1820 if ($dirname =~ m/import$f_separator/)
1821 {
1822 $test_dirname = $'; #'
1823
1824 #print STDERR "init $'\n";
1825
1826 while ($test_dirname =~ m/[$f_separator]/)
1827 {
1828 my $folderdirname = $`;
[27306]1829 $tmp_dirname = &FileUtils::filenameConcatenate($tmp_dirname,$folderdirname);
[28196]1830 &FileUtils::makeDirectory($tmp_dirname) if (!-e $tmp_dirname);
[20774]1831 $test_dirname = $'; #'
1832 }
1833 }
1834
[27306]1835 my $tmp_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
[20774]1836
1837 # tidy or convert the input file if it is a HTML-like file or it is accepted by the process_exp
1838 if (($suffix eq ".htm") || ($suffix eq ".html") || ($suffix eq ".shtml"))
1839 {
1840 #convert the input file to a new style HDL
1841 my $hdl_output_filename = $input_filename;
1842 if ($self->{'old_style_HDL'})
1843 {
[27306]1844 $hdl_output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
[20774]1845 $hdl_output_filename = $self->convert_to_newHDLformat($input_filename,$hdl_output_filename);
1846 }
1847
1848 #just for checking copy all other file from the base dir to tmp dir if it is not exists
1849 opendir(DIR,$base_dirname) or die "Can't open base directory : $base_dirname!";
1850 my @files = grep {!/^\.+$/} readdir(DIR);
1851 close(DIR);
1852
1853 foreach my $file (@files)
1854 {
[27306]1855 my $src_file = &FileUtils::filenameConcatenate($base_dirname,$file);
1856 my $dest_file = &FileUtils::filenameConcatenate($tmp_dirname,$file);
[20774]1857 if ((!-e $dest_file) && (!-d $src_file))
1858 {
1859 # just copy the original file back to the tmp directory
1860 copy($src_file,$dest_file) or die "Can't copy file $src_file to $dest_file $!";
1861 }
1862 }
1863
1864 # tidy the input file
1865 my $tidy_output_filename = $hdl_output_filename;
1866 if ($self->{'use_realistic_book'})
1867 {
[27306]1868 $tidy_output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
[20774]1869 $tidy_output_filename = $self->tmp_tidy_file($hdl_output_filename,$tidy_output_filename);
1870 }
1871 $tmp_filename = $tidy_output_filename;
1872 }
1873 else
1874 {
1875 if (!-e $tmp_filename)
1876 {
1877 # just copy the original file back to the tmp directory
1878 copy($input_filename,$tmp_filename) or die "Can't copy file $input_filename to $tmp_filename $!";
1879 }
1880 }
1881
1882 return $tmp_filename;
1883}
1884
1885
1886# Will make the html input file as a proper XML file with removed font tag and
1887# image size added to the img tag.
1888# The tidying process takes place in a collection specific 'tmp' directory so
1889# that we don't accidentally damage the input.
1890sub tmp_tidy_file
1891{
1892 my $self = shift (@_);
1893 my ($file,$cnfile) = @_;
1894 my $input_filename = $file;
1895 my $tmp_filename = $cnfile;
1896
1897 # get the input filename
1898 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1899
1900 require HTML::TokeParser::Simple;
1901
1902 # create HTML parser to decode the input file
1903 my $parser = HTML::TokeParser::Simple->new($input_filename);
1904
1905 # write HTML tmp file without the font tag and image size are added to the img tag
1906 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1907 while (my $token = $parser->get_token())
1908 {
1909 # is it an img tag
1910 if ($token->is_start_tag('img'))
1911 {
1912 # get the attributes
1913 my $attr = $token->return_attr;
1914
1915 # get the full path to the image
[27306]1916 my $img_file = &FileUtils::filenameConcatenate($dirname,$attr->{src});
[20774]1917
1918 # set the width and height attribute
1919 ($attr->{width}, $attr->{height}) = imgsize($img_file);
1920
1921 # recreate the tag
1922 print PROD "<img";
1923 print PROD map { qq { $_="$attr->{$_}"} } keys %$attr;
1924 print PROD ">";
1925 }
1926 # is it a font tag
1927 else
1928 {
1929 if (($token->is_start_tag('font')) || ($token->is_end_tag('font')))
1930 {
1931 # remove font tag
1932 print PROD "";
1933 }
1934 else
1935 {
1936 # print without changes
1937 print PROD $token->as_is;
1938 }
1939 }
1940 }
1941 close (PROD) || die("Error Closing File: $tmp_filename $!");
1942
1943 # run html-tidy on the tmp file to make it a proper XML file
1944
[22594]1945 my $outhandle = $self->{'outhandle'};
1946 print $outhandle "Converting HTML to be XML compliant:\n";
1947
1948 my $tidy_cmd = "tidy";
1949 $tidy_cmd .= " -q" if ($self->{'verbosity'} <= 2);
[22636]1950 $tidy_cmd .= " -raw -wrap 0 -asxml \"$tmp_filename\"";
[22594]1951 if ($self->{'verbosity'} <= 2) {
1952 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
1953 $tidy_cmd .= " 2>nul";
1954 }
1955 else {
1956 $tidy_cmd .= " 2>/dev/null";
1957 }
1958 print $outhandle " => $tidy_cmd\n";
1959 }
1960
1961 my $tidyfile = `$tidy_cmd`;
1962
[20774]1963 # write result back to the tmp file
1964 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1965 print PROD $tidyfile;
1966 close (PROD) || die("Error Closing File: $tmp_filename $!");
1967
1968 # return the output filename
1969 return $tmp_filename;
1970}
1971
[22355]1972sub associate_cover_image
1973{
1974 my $self = shift(@_);
1975 my ($doc_obj, $filename) = @_;
1976 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
1977 {
1978 # we will have cover image in tidytmp, but want it from import
1979 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
1980 }
1981 $self->SUPER::associate_cover_image($doc_obj, $filename);
1982}
1983
1984
[14665]19851;
Note: See TracBrowser for help on using the repository browser.