source: main/trunk/greenstone2/perllib/plugins/HTMLPlugin.pm@ 31440

Last change on this file since 31440 was 31440, checked in by kjdon, 4 years ago

nearly there for handling russian etc subfolders in import. need to test on windows though. still one part to be worked out.

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 70.5 KB
Line 
1###########################################################################
2#
3# HTMLPlugin.pm -- basic html plugin
4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34#
35
36package HTMLPlugin;
37
38use Encode;
39use Unicode::Normalize 'normalize';
40
41use ReadTextFile;
42use HBPlugin;
43use ghtml;
44use unicode;
45use util;
46use FileUtils;
47use XMLParser;
48
49use File::Copy;
50
51sub BEGIN {
52 @HTMLPlugin::ISA = ('ReadTextFile', 'HBPlugin');
53 die "GSDLHOME not set\n" unless defined $ENV{'GSDLHOME'};
54 unshift (@INC, "$ENV{'GSDLHOME'}/perllib/cpan"); # for Image/Size.pm
55}
56
57use Image::Size;
58
59use strict; # every perl program should have this!
60no strict 'refs'; # make an exception so we can use variables as filehandles
61
62my $arguments =
63 [ { 'name' => "process_exp",
64 'desc' => "{BasePlugin.process_exp}",
65 'type' => "regexp",
66 'deft' => &get_default_process_exp() },
67 { 'name' => "block_exp",
68 'desc' => "{BasePlugin.block_exp}",
69 'type' => 'regexp',
70 'deft' => &get_default_block_exp() },
71 { 'name' => "nolinks",
72 'desc' => "{HTMLPlugin.nolinks}",
73 'type' => "flag" },
74 { 'name' => "keep_head",
75 'desc' => "{HTMLPlugin.keep_head}",
76 'type' => "flag" },
77 { 'name' => "no_metadata",
78 'desc' => "{HTMLPlugin.no_metadata}",
79 'type' => "flag" },
80 { 'name' => "metadata_fields",
81 'desc' => "{HTMLPlugin.metadata_fields}",
82 'type' => "string",
83 'deft' => "Title" },
84 { 'name' => "metadata_field_separator",
85 'desc' => "{HTMLPlugin.metadata_field_separator}",
86 'type' => "string",
87 'deft' => "" },
88 { 'name' => "hunt_creator_metadata",
89 'desc' => "{HTMLPlugin.hunt_creator_metadata}",
90 'type' => "flag" },
91 { 'name' => "file_is_url",
92 'desc' => "{HTMLPlugin.file_is_url}",
93 'type' => "flag" },
94 { 'name' => "assoc_files",
95 'desc' => "{HTMLPlugin.assoc_files}",
96 'type' => "regexp",
97 'deft' => &get_default_block_exp() },
98 { 'name' => "rename_assoc_files",
99 'desc' => "{HTMLPlugin.rename_assoc_files}",
100 'type' => "flag" },
101 { 'name' => "title_sub",
102 'desc' => "{HTMLPlugin.title_sub}",
103 'type' => "string",
104 'deft' => "" },
105 { 'name' => "description_tags",
106 'desc' => "{HTMLPlugin.description_tags}",
107 'type' => "flag" },
108 # retain this for backward compatibility (w3mir option was replaced by
109 # file_is_url)
110 { 'name' => "w3mir",
111# 'desc' => "{HTMLPlugin.w3mir}",
112 'type' => "flag",
113 'hiddengli' => "yes"},
114 { 'name' => "no_strip_metadata_html",
115 'desc' => "{HTMLPlugin.no_strip_metadata_html}",
116 'type' => "string",
117 'deft' => "",
118 'reqd' => "no"},
119 { 'name' => "sectionalise_using_h_tags",
120 'desc' => "{HTMLPlugin.sectionalise_using_h_tags}",
121 'type' => "flag" },
122 { 'name' => "use_realistic_book",
123 'desc' => "{HTMLPlugin.tidy_html}",
124 'type' => "flag"},
125 { 'name' => "old_style_HDL",
126 'desc' => "{HTMLPlugin.old_style_HDL}",
127 'type' => "flag"},
128 {'name' => "processing_tmp_files",
129 'desc' => "{BasePlugin.processing_tmp_files}",
130 'type' => "flag",
131 'hiddengli' => "yes"}
132 ];
133
134my $options = { 'name' => "HTMLPlugin",
135 'desc' => "{HTMLPlugin.desc}",
136 'abstract' => "no",
137 'inherits' => "yes",
138 'args' => $arguments };
139
140
141sub new {
142 my ($class) = shift (@_);
143 my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
144 push(@$pluginlist, $class);
145
146 push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
147 push(@{$hashArgOptLists->{"OptList"}},$options);
148
149
150 my $self = new ReadTextFile($pluginlist,$inputargs,$hashArgOptLists);
151
152 if ($self->{'w3mir'}) {
153 $self->{'file_is_url'} = 1;
154 }
155 $self->{'aux_files'} = {};
156 $self->{'dir_num'} = 0;
157 $self->{'file_num'} = 0;
158
159 return bless $self, $class;
160}
161
162# may want to use (?i)\.(gif|jpe?g|jpe|png|css|js(?:@.*)?)$
163# if have eg <script language="javascript" src="img/lib.js@123">
164# blocking is now done by reading through the file and recording all the
165# images and other files
166sub get_default_block_exp {
167 my $self = shift (@_);
168
169 #return q^(?i)\.(gif|jpe?g|jpe|jpg|png|css)$^;
170 return "";
171}
172
173sub get_default_process_exp {
174 my $self = shift (@_);
175
176 # the last option is an attempt to encode the concept of an html query ...
177 return q^(?i)(\.html?|\.shtml|\.shm|\.asp|\.php\d?|\.cgi|.+\?.+=.*)$^;
178}
179
180sub store_block_files
181{
182 my $self =shift (@_);
183 my ($filename_full_path, $block_hash) = @_;
184
185 my $html_fname = $filename_full_path;
186
187 my ($language, $content_encoding) = $self->textcat_get_language_encoding ($filename_full_path);
188 $self->{'store_content_encoding'}->{$filename_full_path} = [$content_encoding, $language];
189
190
191 # read in file ($text will be in the filesystem encoding)
192 my $raw_text = "";
193 $self->read_file_no_decoding($filename_full_path, \$raw_text);
194
195 my $textref = \$raw_text;
196 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
197 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
198 $$textref =~ s/$opencom(.*?)$closecom//gs;
199
200 # Convert entities to their UTF8 equivalents
201 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
202 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,0)/gseo; # on this occassion, want it left as utf8
203 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
204
205 my $attval = "\\\"[^\\\"]+\\\"|[^\\s>]+";
206 my @img_matches = ($$textref =~ m/<img[^>]*?src\s*=\s*($attval)[^>]*>/igs);
207 my @usemap_matches = ($$textref =~ m/<img[^>]*?usemap\s*=\s*($attval)[^>]*>/igs);
208 my @link_matches = ($$textref =~ m/<link[^>]*?href\s*=\s*($attval)[^>]*>/igs);
209 my @embed_matches = ($$textref =~ m/<embed[^>]*?src\s*=\s*($attval)[^>]*>/igs);
210 my @tabbg_matches = ($$textref =~ m/<(?:body|table|tr|td)[^>]*?background\s*=\s*($attval)[^>]*>/igs);
211 my @script_matches = ($$textref =~ m/<script[^>]*?src\s*=\s*($attval)[^>]*>/igs);
212
213 if(!defined $self->{'unicode_to_original_filename'}) {
214 # maps from utf8 converted link name -> original filename referrred to by (possibly URL-encoded) src url
215 $self->{'unicode_to_original_filename'} = {};
216 }
217
218 foreach my $raw_link (@img_matches, @usemap_matches, @link_matches, @embed_matches, @tabbg_matches, @script_matches) {
219
220 # remove quotes from link at start and end if necessary
221 if ($raw_link =~ m/^\"/) {
222 $raw_link =~ s/^\"//;
223 $raw_link =~ s/\"$//;
224 }
225
226 # remove any anchor names, e.g. foo.html#name becomes foo.html
227 # but watch out for any #'s that are part of entities, such as &#x3B1;
228 $raw_link =~ s/([^&])\#.*$/$1/s;
229
230 # some links may just be anchor names
231 next unless ($raw_link =~ /\S+/);
232
233 if ($raw_link !~ m@^/@ && $raw_link !~ m/^([A-Z]:?)\\/i) {
234 # Turn relative file path into full path
235 my $dirname = &File::Basename::dirname($filename_full_path);
236 $raw_link = &FileUtils::filenameConcatenate($dirname, $raw_link);
237 }
238 $raw_link = $self->eval_dir_dots($raw_link);
239
240 # this is the actual filename on the filesystem (that the link refers to)
241 my $url_original_filename = $self->opt_url_decode($raw_link);
242
243 my ($uses_bytecodes,$exceeds_bytecodes) = &unicode::analyze_raw_string($url_original_filename);
244
245 if ($exceeds_bytecodes) {
246 # We have a link to a file name that is more complicated than a raw byte filename
247 # What we do next depends on the operating system we are on
248
249 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
250 # Assume we're dealing with a UTF-8 encoded filename
251 $url_original_filename = encode("utf8", $url_original_filename);
252 }
253 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
254 # HFS+ is UTF8 with decompostion
255 $url_original_filename = encode("utf8", $url_original_filename);
256 $url_original_filename = normalize('D', $url_original_filename); # Normalization Form D (decomposition)
257 }
258 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
259 # Don't need to do anything as later code maps Windows
260 # unicode filenames to DOS short filenames when needed
261 }
262 else {
263 my $outhandle = $self->{'outhandle'};
264 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
265 print $outhandle " in raw file system encoding of: $raw_link\n";
266 print $outhandle " Assuming filesystem is UTF-8 based.\n";
267 $url_original_filename = encode("utf8", $url_original_filename);
268 }
269 }
270
271 # Convert the (currently raw) link into its Unicode version.
272 # Store the Unicode link along with the url_original_filename
273 my $unicode_url_original_filename = "";
274 $self->decode_text($raw_link,$content_encoding,$language,\$unicode_url_original_filename);
275
276
277 $self->{'unicode_to_original_filename'}->{$unicode_url_original_filename} = $url_original_filename;
278
279
280 if ($url_original_filename ne $unicode_url_original_filename) {
281 my $outhandle = $self->{'outhandle'};
282
283 print $outhandle "URL Encoding $url_original_filename\n";
284 print $outhandle " ->$unicode_url_original_filename\n";
285
286 # make sure not to block the file itself, as happens when an html file links to itself
287 # e.g. if the current file is mary-boleyn/index.html and contains <link rel="canonical" href="index.html" />
288 my $unicode_html_fname = "";
289 $self->decode_text($html_fname,$content_encoding,$language,\$unicode_html_fname);
290 if($unicode_url_original_filename ne $unicode_html_fname) {
291 # Allow for possibility of raw byte version and Unicode versions of file
292 &util::block_filename($block_hash,$unicode_url_original_filename);
293 }
294 }
295
296 # $url_original_filename = &util::upgrade_if_dos_filename($url_original_filename);
297 &util::block_filename($block_hash,$url_original_filename) if $url_original_filename ne $html_fname;
298
299 # but only add the linked file to the blocklist if the current html file does not link to itself
300
301 }
302}
303
304# Given a filename in any encoding, will URL decode it to get back the original filename
305# in the original encoding. Because this method is intended to work out the *original*
306# filename*, it does not URL decode any filename if a file by the name of the *URL-encoded*
307# string already exists in the local folder.
308#
309sub opt_url_decode {
310 my $self = shift (@_);
311 my ($raw_link) = @_;
312
313
314 # Replace %XX's in URL with decoded value if required.
315 # Note that the filename may include the %XX in some situations
316
317## if ($raw_link =~ m/\%[A-F0-9]{2}/i) {
318
319 if (($raw_link =~ m/\%[A-F0-9]{2}/i) || ($raw_link =~ m/\&\#x[0-9A-F]+;/i) || ($raw_link =~ m/\&\#[0-9]+;/i)) {
320 if (!-e $raw_link) {
321 $raw_link = &unicode::url_decode($raw_link,1);
322 }
323 }
324
325 return $raw_link;
326}
327
328sub read_into_doc_obj
329{
330 my $self = shift (@_);
331 my ($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli) = @_;
332
333 my ($filename_full_path, $filename_no_path) = &util::get_full_filenames($base_dir, $file);
334
335 # Lookup content_encoding and language worked out in file_block pass for this file
336 # Store them under the local names they are nice and easy to access
337 $self->{'content_encoding'} = $self->{'store_content_encoding'}->{$filename_full_path}[0];
338 $self->{'language'} = $self->{'store_content_encoding'}->{$filename_full_path}[1];
339
340 # get the input file
341 my $input_filename = $file;
342 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
343 $suffix = lc($suffix);
344 my $tidy_filename;
345 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
346 {
347 # because the document has to be sectionalized set the description tags
348 $self->{'description_tags'} = 1;
349
350 # set the file to be tidied
351 $input_filename = &FileUtils::filenameConcatenate($base_dir,$file) if $base_dir =~ m/\w/;
352
353 # get the tidied file
354 #my $tidy_filename = $self->tmp_tidy_file($input_filename);
355 $tidy_filename = $self->convert_tidy_or_oldHDL_file($input_filename);
356
357 # derive tmp filename from input filename
358 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($tidy_filename, "\\.[^\\.]+\$");
359
360 # set the new input file and base_dir to be from the tidied file
361 $file = "$tailname$suffix";
362 $base_dir = $dirname;
363 }
364
365 # call the parent read_into_doc_obj
366 my ($process_status,$doc_obj) = $self->SUPER::read_into_doc_obj($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli);
367 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
368 {
369 # now we need to reset the filenames in the doc obj so that the converted filenames are not used
370 my $collect_file = &util::filename_within_collection($filename_full_path);
371 $doc_obj->set_source_filename ($collect_file, $self->{'file_rename_method'});
372 ## set_source_filename does not set the doc_obj source_path which is used in archives dbs for incremental
373 # build. So set it manually.
374 $doc_obj->set_source_path($filename_full_path);
375 my $collect_conv_file = &util::filename_within_collection($tidy_filename);
376 $doc_obj->set_converted_filename($collect_conv_file);
377
378 my $plugin_filename_encoding = $self->{'filename_encoding'};
379 my $filename_encoding = $self->deduce_filename_encoding($file,$metadata,$plugin_filename_encoding);
380 $self->set_Source_metadata($doc_obj, $filename_full_path, $filename_encoding);
381 }
382
383 delete $self->{'store_content_encoding'}->{$filename_full_path};
384 $self->{'content_encoding'} = undef;
385
386 return ($process_status,$doc_obj);
387}
388
389# do plugin specific processing of doc_obj
390sub process {
391 my $self = shift (@_);
392 my ($textref, $pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
393 my $outhandle = $self->{'outhandle'};
394
395 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
396 # this makes life so much easier... perl can cope with unix-style '/'s.
397 $base_dir =~ s@(\\)+@/@g;
398 $file =~ s@(\\)+@/@g;
399 }
400
401 my $filename = &FileUtils::filenameConcatenate($base_dir,$file);
402 my $upgraded_base_dir = &util::upgrade_if_dos_filename($base_dir);
403 my $upgraded_filename = &util::upgrade_if_dos_filename($filename);
404
405 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
406 # And again
407 $upgraded_base_dir =~ s@(\\)+@/@g;
408 $upgraded_filename =~ s@(\\)+@/@g;
409
410 # Need to make sure there is a '/' on the end of upgraded_base_dir
411 if (($upgraded_base_dir ne "") && ($upgraded_base_dir !~ m/\/$/)) {
412 $upgraded_base_dir .= "/";
413 }
414 }
415 my $upgraded_file = &util::filename_within_directory($upgraded_filename,$upgraded_base_dir);
416
417 # reset per-doc stuff...
418 $self->{'aux_files'} = {};
419 $self->{'dir_num'} = 0;
420 $self->{'file_num'} = 0;
421
422 # process an HTML file where sections are divided by headings tags (H1, H2 ...)
423 # you can also include metadata in the format (X can be any number)
424 # <hX>Title<!--gsdl-metadata
425 # <Metadata name="name1">value1</Metadata>
426 # ...
427 # <Metadata name="nameN">valueN</Metadata>
428 #--></hX>
429 if ($self->{'sectionalise_using_h_tags'}) {
430 # description_tags should allways be activated because we convert headings to description tags
431 $self->{'description_tags'} = 1;
432
433 my $arrSections = [];
434 $$textref =~ s/<h([0-9]+)[^>]*>(.*?)<\/h[0-9]+>/$self->process_heading($1, $2, $arrSections, $upgraded_file)/isge;
435
436 if (scalar(@$arrSections)) {
437 my $strMetadata = $self->update_section_data($arrSections, -1);
438 if (length($strMetadata)) {
439 $strMetadata = '<!--' . $strMetadata . "\n-->\n</body>";
440 $$textref =~ s/<\/body>/$strMetadata/ig;
441 }
442 }
443 }
444
445 my $cursection = $doc_obj->get_top_section();
446
447 $self->extract_metadata ($textref, $metadata, $doc_obj, $cursection)
448 unless $self->{'no_metadata'} || $self->{'description_tags'};
449
450 # Store URL for page as metadata - this can be used for an
451 # altavista style search interface. The URL won't be valid
452 # unless the file structure contains the domain name (i.e.
453 # like when w3mir is used to download a website).
454
455 # URL metadata (even invalid ones) are used to support internal
456 # links, so even if 'file_is_url' is off, still need to store info
457
458 my ($tailname,$dirname) = &File::Basename::fileparse($upgraded_file);
459
460# my $utf8_file = $self->filename_to_utf8_metadata($file);
461# $utf8_file =~ s/&\#095;/_/g;
462# variable below used to be utf8_file
463
464 my $url_encoded_file = &unicode::raw_filename_to_url_encoded($tailname);
465 my $utf8_url_encoded_file = &unicode::raw_filename_to_utf8_url_encoded($tailname);
466
467 my $web_url = "http://";
468 my $utf8_web_url = "http://";
469
470 if(defined $dirname) { # local directory
471
472 # Check for "ftp" in the domain name of the directory
473 # structure to determine if this URL should be a ftp:// URL
474 # This check is not infallible, but better than omitting the
475 # check, which would cause all files downloaded from ftp sites
476 # via mirroring with wget to have potentially erroneous http:// URLs
477 # assigned in their metadata
478 if ($dirname =~ /^[^\/]*ftp/i)
479 {
480 $web_url = "ftp://";
481 $utf8_web_url = "ftp://";
482 }
483 $dirname = $self->eval_dir_dots($dirname);
484 $dirname .= &util::get_dirsep() if $dirname ne ""; # if there's a directory, it should end on "/"
485
486 # this local directory in import may need to be URL encoded like the file
487 my $url_encoded_dir = &unicode::raw_filename_to_url_encoded($dirname);
488 my $utf8_url_encoded_dir = &unicode::raw_filename_to_utf8_url_encoded($dirname);
489
490 # changed here
491 $web_url = $web_url.$url_encoded_dir.$url_encoded_file;
492 $utf8_web_url = $utf8_web_url.$utf8_url_encoded_dir.$utf8_url_encoded_file;
493 } else {
494 $web_url = $web_url.$url_encoded_file;
495 $utf8_web_url = $utf8_web_url.$utf8_url_encoded_file;
496 }
497 $web_url =~ s/\\/\//g;
498 $utf8_web_url =~ s/\\/\//g;
499
500 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
501 print STDERR "*******DEBUG: upgraded_file: $upgraded_file\n";
502 print STDERR "*******DEBUG: adding URL metadata: $utf8_url_encoded_file\n";
503 print STDERR "*******DEBUG: web url: $web_url\n";
504 print STDERR "*******DEBUG: utf8 web url: $utf8_web_url\n";
505 }
506
507
508 $doc_obj->add_utf8_metadata($cursection, "URL", $web_url);
509 $doc_obj->add_utf8_metadata($cursection, "UTF8URL", $utf8_web_url);
510
511 if ($self->{'file_is_url'}) {
512 $doc_obj->add_metadata($cursection, "weblink", "<a href=\"$web_url\">");
513 $doc_obj->add_metadata($cursection, "webicon", "_iconworld_");
514 $doc_obj->add_metadata($cursection, "/weblink", "</a>");
515 }
516
517 if ($self->{'description_tags'}) {
518 # remove the html header - note that doing this here means any
519 # sections defined within the header will be lost (so all <Section>
520 # tags must appear within the body of the HTML)
521 my ($head_keep) = ($$textref =~ m/^(.*?)<body[^>]*>/is);
522
523 $$textref =~ s/^.*?<body[^>]*>//is;
524 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
525
526 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
527 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
528
529 my $lt = '(?:<|&lt;)';
530 my $gt = '(?:>|&gt;)';
531 my $quot = '(?:"|&quot;|&rdquo;|&ldquo;)';
532
533 my $dont_strip = '';
534 if ($self->{'no_strip_metadata_html'}) {
535 ($dont_strip = $self->{'no_strip_metadata_html'}) =~ s{,}{|}g;
536 }
537
538 my $found_something = 0; my $top = 1;
539 while ($$textref =~ s/^(.*?)$opencom(.*?)$closecom//s) {
540 my $text = $1;
541 my $comment = $2;
542 if (defined $text) {
543 # text before a comment - note that getting to here
544 # doesn't necessarily mean there are Section tags in
545 # the document
546 $self->process_section(\$text, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
547 }
548 while ($comment =~ s/$lt(.*?)$gt//s) {
549 my $tag = $1;
550 if ($tag eq "Section") {
551 $found_something = 1;
552 $cursection = $doc_obj->insert_section($doc_obj->get_end_child($cursection)) unless $top;
553 $top = 0;
554 } elsif ($tag eq "/Section") {
555 $found_something = 1;
556 $cursection = $doc_obj->get_parent_section ($cursection);
557 } elsif ($tag =~ m/^Metadata name=$quot(.*?)$quot/s) {
558 my $metaname = $1;
559 my $accumulate = $tag =~ m/mode=${quot}accumulate${quot}/ ? 1 : 0;
560 $comment =~ s/^(.*?)$lt\/Metadata$gt//s;
561 my $metavalue = $1;
562 $metavalue =~ s/^\s+//;
563 $metavalue =~ s/\s+$//;
564 # assume that no metadata value intentionally includes
565 # carriage returns or HTML tags (if they're there they
566 # were probably introduced when converting to HTML from
567 # some other format).
568 # actually some people want to have html tags in their
569 # metadata.
570 $metavalue =~ s/[\cJ\cM]/ /sg;
571 $metavalue =~ s/<[^>]+>//sg
572 unless $dont_strip && ($dont_strip eq 'all' || $metaname =~ m/^($dont_strip)$/);
573 $metavalue =~ s/\s+/ /sg;
574 if ($metaname =~ /\./) { # has a namespace
575 $metaname = "ex.$metaname";
576 }
577 if ($accumulate) {
578 $doc_obj->add_utf8_metadata($cursection, $metaname, $metavalue);
579 } else {
580 $doc_obj->set_utf8_metadata_element($cursection, $metaname, $metavalue);
581 }
582 } elsif ($tag eq "Description" || $tag eq "/Description") {
583 # do nothing with containing Description tags
584 } else {
585 # simple HTML tag (probably created by the conversion
586 # to HTML from some other format) - we'll ignore it and
587 # hope for the best ;-)
588 }
589 }
590 }
591 if ($cursection ne "") {
592 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains unmatched <Section></Section> tags\n";
593 }
594
595 $$textref =~ s/^.*?<body[^>]*>//is;
596 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
597 if ($$textref =~ m/\S/) {
598 if (!$found_something) {
599 if ($self->{'verbosity'} > 2) {
600 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags so\n";
601 print $outhandle " will be processed as a single section document\n";
602 }
603
604 # go ahead and process single-section document
605 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
606
607 # if document contains no Section tags we'll go ahead
608 # and extract metadata (this won't have been done
609 # above as the -description_tags option prevents it)
610 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
611 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
612 unless $self->{'no_metadata'};
613
614 } else {
615 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains the following text outside\n";
616 print $outhandle " of the final closing </Section> tag. This text will\n";
617 print $outhandle " be ignored.";
618
619 my ($text);
620 if (length($$textref) > 30) {
621 $text = substr($$textref, 0, 30) . "...";
622 } else {
623 $text = $$textref;
624 }
625 $text =~ s/\n/ /isg;
626 print $outhandle " ($text)\n";
627 }
628 } elsif (!$found_something) {
629
630 if ($self->{'verbosity'} > 2) {
631 # may get to here if document contained no valid Section
632 # tags but did contain some comments. The text will have
633 # been processed already but we should print the warning
634 # as above and extract metadata
635 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags and\n";
636 print $outhandle " is blank or empty. Metadata will be assigned if present.\n";
637 }
638
639 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
640 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
641 unless $self->{'no_metadata'};
642 }
643 $self->replace_section_links($doc_obj);
644 } else {
645
646 # remove header and footer
647 if (!$self->{'keep_head'} || $self->{'description_tags'}) {
648 $$textref =~ s/^.*?<body[^>]*>//is;
649 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
650 }
651
652 $self->{'css_assoc_files'} = {};
653
654 # single section document
655 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
656
657 #my $upgraded_filename_dirname = &File::Basename::dirname($upgraded_filename);
658
659 $self->acquire_css_associated_files($doc_obj, $cursection);
660
661 $self->{'css_assoc_files'} = {};
662 }
663
664 return 1;
665}
666
667
668sub process_heading
669{
670 my ($self, $nHeadNo, $strHeadingText, $arrSections, $file) = @_;
671 $strHeadingText = '' if (!defined($strHeadingText));
672
673 my $strMetadata = $self->update_section_data($arrSections, int($nHeadNo));
674
675 my $strSecMetadata = '';
676 while ($strHeadingText =~ s/<!--gsdl-metadata(.*?)-->//is)
677 {
678 $strSecMetadata .= $1;
679 }
680
681 $strHeadingText =~ s/^\s+//g;
682 $strHeadingText =~ s/\s+$//g;
683 $strSecMetadata =~ s/^\s+//g;
684 $strSecMetadata =~ s/\s+$//g;
685
686 $strMetadata .= "\n<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">" . $strHeadingText . "</Metadata>\n";
687
688 if (length($strSecMetadata)) {
689 $strMetadata .= "\t\t" . $strSecMetadata . "\n";
690 }
691
692 $strMetadata .= "\t</Description>\n";
693
694 return "<!--" . $strMetadata . "-->";
695}
696
697
698sub update_section_data
699{
700 my ($self, $arrSections, $nCurTocNo) = @_;
701 my ($strBuffer, $nLast, $nSections) = ('', 0, scalar(@$arrSections));
702
703 if ($nSections == 0) {
704 push @$arrSections, $nCurTocNo;
705 return $strBuffer;
706 }
707 $nLast = $arrSections->[$nSections - 1];
708 if ($nCurTocNo > $nLast) {
709 push @$arrSections, $nCurTocNo;
710 return $strBuffer;
711 }
712 for(my $i = $nSections - 1; $i >= 0; $i--) {
713 if ($nCurTocNo <= $arrSections->[$i]) {
714 $strBuffer .= "\n</Section>";
715 pop @$arrSections;
716 }
717 }
718 push @$arrSections, $nCurTocNo;
719 return $strBuffer;
720}
721
722
723# note that process_section may be called multiple times for a single
724# section (relying on the fact that add_utf8_text appends the text to any
725# that may exist already).
726sub process_section {
727 my $self = shift (@_);
728 my ($textref, $base_dir, $file, $doc_obj, $cursection) = @_;
729
730 my @styleTagsText = ($$textref =~ m/<style[^>]*>([^<]*)<\/style>/sg);
731 if(scalar(@styleTagsText) > 0)
732 {
733 my $css_filename_dirname = &File::Basename::dirname(&FileUtils::filenameConcatenate($base_dir, $file));
734 foreach my $styleText (@styleTagsText)
735 {
736 $self->acquire_css_associated_files_from_text_block($styleText, $css_filename_dirname);
737 }
738 }
739
740 # trap links
741 if (!$self->{'nolinks'}) {
742 # usemap="./#index" not handled correctly => change to "#index"
743## $$textref =~ s/(<img[^>]*?usemap\s*=\s*[\"\']?)([^\"\'>\s]+)([\"\']?[^>]*>)/
744
745## my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
746## my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
747
748 $$textref =~ s/(<img[^>]*?usemap\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
749 $self->replace_usemap_links($1, $2, $3)/isge;
750
751 $$textref =~ s/(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
752 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
753
754## $$textref =~ s/($opencom.*?)?+(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)(.*?$closecom)?+/
755# $self->replace_href_links ($1, $2, $3, $4, $5, $base_dir, $file, $doc_obj, $cursection)/isge;
756 }
757
758 # trap images
759
760 # Previously, by default, HTMLPlugin would embed <img> tags inside anchor tags
761 # i.e. <a href="image><img src="image"></a> in order to overcome a problem that
762 # turned regular text succeeding images into links. That is, by embedding <imgs>
763 # inside <a href=""></a>, the text following images were no longer misbehaving.
764 # However, there would be many occasions whereby images were not meant to link
765 # to their source images but where the images would link to another web page.
766 # To allow this, the no_image_links option was introduced: it would prevent
767 # the behaviour of embedding images into links that referenced the source images.
768
769 # Somewhere along the line, the problem of normal text turning into links when
770 # such text followed images which were not embedded in <a href=""></a> ceased
771 # to occur. This is why the following lines have been commented out (as well as
772 # two lines in replace_images). They appear to no longer apply.
773
774 # If at any time, there is a need for having images embedded in <a> anchor tags,
775 # then it might be better to turn that into an HTMLPlugin option rather than make
776 # it the default behaviour. Also, eventually, no_image_links needs to become
777 # a deprecated option for HTMLPlugin as it has now become the default behaviour.
778
779 #if(!$self->{'no_image_links'}){
780 $$textref =~ s/(<(?:img|embed|table|tr|td)[^>]*?(?:src|background)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
781 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
782 #}
783
784 # add text to document object
785 # turn \ into \\ so that the rest of greenstone doesn't think there
786 # is an escape code following. (Macro parsing loses them...)
787 $$textref =~ s/\\/\\\\/go;
788
789 $doc_obj->add_utf8_text($cursection, $$textref);
790}
791
792sub replace_images {
793 my $self = shift (@_);
794 my ($front, $link, $back, $base_dir,
795 $file, $doc_obj, $section) = @_;
796
797 # remove quotes from link at start and end if necessary
798 if ($link=~/^[\"\']/) {
799 $link=~s/^[\"\']//;
800 $link=~s/[\"\']$//;
801 $front.='"';
802 $back="\"$back";
803 }
804
805 $link =~ s/\n/ /g;
806
807 # Hack to overcome Windows wv 0.7.1 bug that causes embedded images to be broken
808 # If the Word file path has spaces in it, wv messes up and you end up with
809 # absolute paths for the images, and without the "file://" prefix
810 # So check for this special case and massage the data to be correct
811 if ($ENV{'GSDLOS'} =~ m/^windows/i && $self->{'plugin_type'} eq "WordPlug" && $link =~ m/^[A-Za-z]\:\\/) {
812 $link =~ s/^.*\\([^\\]+)$/$1/;
813 }
814
815 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
816
817 my $img_file = $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section);
818
819# print STDERR "**** link = $link\n**** href = $href\n**** img_file = $img_file, rl = $rl\n\n";
820
821 my $anchor_name = $img_file;
822 #$anchor_name =~ s/^.*\///;
823 #$anchor_name = "<a name=\"$anchor_name\" ></a>";
824
825 my $image_link = $front . $img_file .$back;
826 return $image_link;
827
828 # The reasons for why the following two lines are no longer necessary can be
829 # found in subroutine process_section
830 #my $anchor_link = "<a href=\"$img_file\" >".$image_link."</a>";
831 #return $anchor_link;
832
833 #return $front . $img_file . $back . $anchor_name;
834}
835
836sub replace_href_links {
837 my $self = shift (@_);
838 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
839
840 if($front =~ m/^<link / && $link =~ m/\.css"$/)
841 {
842 my $actual_link = $link;
843 $actual_link =~ s/^"(.*)"$/$1/;
844
845 my $directory = &File::Basename::dirname($file);
846
847 my $css_filename = &FileUtils::filenameConcatenate($base_dir, $directory, $actual_link);
848 $self->retrieve_css_associated_files($css_filename);
849 }
850
851 # remove quotes from link at start and end if necessary
852 if ($link=~/^[\"\']/) {
853 $link=~s/^[\"\']//;
854 $link=~s/[\"\']$//;
855 $front.='"';
856 $back="\"$back";
857 }
858
859 # can't remember adding this :-( must have had a reason though...
860 if ($link =~ /^\_http/ || $link =~ /^\_libraryname\_/) {
861 # assume it is a greenstone one and leave alone
862 return $front . $link . $back;
863 }
864
865 # attempt to sort out targets - frames are not handled
866 # well in this plugin and some cases will screw things
867 # up - e.g. the _parent target (so we'll just remove
868 # them all ;-)
869 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
870 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
871 $front =~ s/target=\"?_parent\"?//is;
872 $back =~ s/target=\"?_parent\"?//is;
873
874 if($link =~ m/^\#/s)
875 {
876 return $front . "_httpsamepagelink_" . $link . $back;
877 }
878
879 $link =~ s/\n/ /g;
880
881 # Find file referred to by $link on file system
882 # This is more complicated than it sounds when char encodings
883 # is taken in to account
884 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
885
886 # href may use '\'s where '/'s should be on Windows
887 $href =~ s/\\/\//g;
888 my ($filename) = $href =~ m/^(?:.*?):(?:\/\/)?(.*)/;
889
890 ##### leave all these links alone (they won't be picked up by intermediate
891 ##### pages). I think that's safest when dealing with frames, targets etc.
892 ##### (at least until I think of a better way to do it). Problems occur with
893 ##### mailto links from within small frames, the intermediate page is displayed
894 ##### within that frame and can't be seen. There is still potential for this to
895 ##### happen even with html pages - the solution seems to be to somehow tell
896 ##### the browser from the server side to display the page being sent (i.e.
897 ##### the intermediate page) in the top level window - I'm not sure if that's
898 ##### possible - the following line should probably be deleted if that can be done
899 return $front . $link . $back if $href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/is;
900
901 if (($rl == 0) || ($filename =~ m/$self->{'process_exp'}/) ||
902 ($href =~ m/\/$/) || ($href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
903
904 if ($ENV{'GSDLOS'} =~ m/^windows$/) {
905
906 # Don't do any encoding for now, as not clear what
907 # the right thing to do is to support filename
908 # encoding on Windows when they are not UTF16
909 #
910 }
911 else {
912 # => Unix-based system
913
914 # If web page didn't give encoding, then default to utf8
915 my $content_encoding= $self->{'content_encoding'} || "utf8";
916
917 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
918 print STDERR "**** Encoding with '$content_encoding', href: $href\n";
919 }
920
921 # on Darwin, the unicode filenames are stored on the file
922 # system in decomposed form, so any href link (including when
923 # URL-encoded) should refer to the decomposed name of the file
924 if ($ENV{'GSDLOS'} =~ /^darwin$/i) {
925 $href = normalize('D', $href); # Normalization Form D (decomposition)
926 }
927
928 $href = encode($content_encoding,$href);
929 }
930
931 $href = &unicode::raw_filename_to_utf8_url_encoded($href);
932 $href = &unicode::filename_to_url($href);
933
934 &ghtml::urlsafe ($href);
935
936 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
937 print STDERR "******DEBUG: href=$href\n";
938 }
939
940 #TODO here
941# if ($rl ==1) {
942 # have a relative link, we need to do URL encoding etc so it matches what has happened for that file
943 #$href = &util::rename_file($href, $self->{'file_rename_method'});
944# $href = &unicode::raw_filename_to_url_encoded($href);
945 # then, this might be url encoded, so we replace % with %25
946# $href = &unicode::filename_to_url($href);
947# print STDERR "DEBUG: url encoded href = $href\n";
948# }
949
950 return $front . "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part . $back;
951 } else {
952 # link is to some other type of file (e.g., an image) so we'll
953 # need to associate that file
954 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
955 }
956}
957
958sub retrieve_css_associated_files {
959 my $self = shift (@_);
960 my ($css_filename) = @_;
961
962 my $css_filename_dirname = &File::Basename::dirname($css_filename);
963
964 open (CSSFILE, $css_filename) || return;
965 sysread (CSSFILE, my $file_string, -s CSSFILE);
966
967 $self->acquire_css_associated_files_from_text_block($file_string, $css_filename_dirname) unless !defined $file_string;
968
969 close CSSFILE;
970}
971
972sub acquire_css_associated_files_from_text_block {
973 my $self = shift (@_);
974 my ($text, $css_filename_dirname) = @_;
975
976 my @image_urls = ($text =~ m/background-image:\s*url[^;]*;/sg);
977 foreach my $img_url (@image_urls)
978 {
979 $img_url =~ s/^.*url.*\((.*)\).*$/$1/;
980 $img_url =~ s/^\s*"?([^"]*)"?\s*$/$1/;
981
982 $self->{'css_assoc_files'}->{&FileUtils::filenameConcatenate($css_filename_dirname, $img_url)} = $img_url;
983 }
984}
985
986sub acquire_css_associated_files {
987 my $self = shift(@_);
988
989 my ($doc_obj, $section) = @_;
990
991 foreach my $image_filename (keys %{$self->{'css_assoc_files'}})
992 {
993 $doc_obj->associate_file($image_filename, $self->{'css_assoc_files'}->{$image_filename}, undef, $section);
994 }
995}
996
997sub add_file {
998 my $self = shift (@_);
999 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
1000 my ($newname);
1001
1002 my $filename = $href;
1003 if ($base_dir eq "") {
1004 if ($ENV{'GSDLOS'} =~ m/^windows$/i) {
1005 # remove http://
1006 $filename =~ s/^[^:]*:\/\///;
1007 }
1008 else {
1009 # remove http:/ thereby leaving one slash at the start as
1010 # part of full pathname
1011 $filename =~ s/^[^:]*:\///;
1012 }
1013 }
1014 else {
1015 # remove http://
1016 $filename =~ s/^[^:]*:\/\///;
1017 }
1018
1019 if ($ENV{'GSDLOS'} =~ m/^windows$/i) {
1020 $filename =~ s@\/@\\@g;
1021 }
1022
1023 $filename = &FileUtils::filenameConcatenate($base_dir, $filename);
1024
1025 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'})) {
1026 # we are processing a tidytmp file - want paths to be in import
1027 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
1028 }
1029
1030 # Replace %XX's in URL with decoded value if required. Note that the
1031 # filename may include the %XX in some situations. If the *original*
1032 # file's name was in URL encoding, the following method will not decode
1033 # it.
1034 my $unicode_filename = $filename;
1035 my $opt_decode_unicode_filename = $self->opt_url_decode($unicode_filename);
1036
1037 # wvWare can generate <img src="StrangeNoGraphicData"> tags, but with no
1038 # (it seems) accompanying file
1039 if ($opt_decode_unicode_filename =~ m/StrangeNoGraphicData$/) { return ""; }
1040
1041 my $content_encoding= $self->{'content_encoding'} || "utf8";
1042
1043 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
1044 # The filenames that come through the HTML file have been decoded
1045 # into Unicode aware Perl strings. Need to convert them back
1046 # to their initial raw-byte encoding to match the file that
1047 # exists on the file system
1048 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1049
1050 }
1051 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
1052 # HFS+ is UTF8 with decompostion
1053 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1054 $filename = normalize('D', $filename); # Normalization Form D (decomposition)
1055
1056 }
1057 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
1058 my $long_filename = Win32::GetLongPathName($opt_decode_unicode_filename);
1059
1060 if (defined $long_filename) {
1061 my $short_filename = Win32::GetLongPathName($long_filename);
1062 $filename = $short_filename;
1063 }
1064# else {
1065# print STDERR "***** failed to map href to real file:\n";
1066# print STDERR "****** $href -> $opt_decode_unicode_filename\n";
1067# }
1068 }
1069 else {
1070 my $outhandle = $self->{'outhandle'};
1071 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
1072 print $outhandle " in file system encoding of href: $href\n";
1073 print $outhandle " No character encoding done.\n";
1074 }
1075
1076
1077 # some special processing if the intended filename was converted to utf8, but
1078 # the actual file still needs to be renamed
1079 if (!&FileUtils::fileExists($filename)) {
1080 # try the original filename stored in map
1081 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
1082 print STDERR "******!! orig filename did not exist: $filename\n";
1083 }
1084
1085## print STDERR "**** trying to look up unicode_filename: $unicode_filename\n";
1086
1087 my $original_filename = $self->{'unicode_to_original_filename'}->{$unicode_filename};
1088
1089 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
1090 print STDERR "****** From lookup unicode_filename, now trying for: $original_filename\n";
1091 }
1092
1093 if (defined $original_filename && -e $original_filename) {
1094 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
1095 print STDERR "****** Found match!\n";
1096 }
1097 $filename = $original_filename;
1098 }
1099 }
1100
1101 my ($ext) = $filename =~ m/(\.[^\.]*)$/;
1102
1103 if ($rl == 0) {
1104 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
1105 return "_httpextlink_&amp;rl=0&amp;el=prompt&amp;href=" . $href . $hash_part;
1106 }
1107 else {
1108 return "_httpextlink_&amp;rl=0&amp;el=direct&amp;href=" . $href . $hash_part;
1109 }
1110 }
1111
1112 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
1113 return "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part;
1114 }
1115 # add the original image file as a source file
1116 if (!$self->{'processing_tmp_files'} ) {
1117 $doc_obj->associate_source_file($filename);
1118 }
1119 if ($self->{'rename_assoc_files'}) {
1120 if (defined $self->{'aux_files'}->{$href}) {
1121 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
1122 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
1123 } else {
1124 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
1125 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
1126 $self->inc_filecount ();
1127 }
1128 $doc_obj->associate_file($filename, $newname, undef, $section);
1129 return "_httpdocimg_/$newname";
1130 } else {
1131 if(&unicode::is_url_encoded($unicode_filename)) {
1132 # use the possibly-decoded filename instead to avoid double URL encoding
1133 ($newname) = $filename =~ m/([^\/\\]*)$/;
1134 } else {
1135 ($newname) = $unicode_filename =~ m/([^\/\\]*)$/;
1136 }
1137
1138 # Make sure this name uses only ASCII characters.
1139 # We use either base64 or URL encoding, as these preserve original encoding
1140 $newname = &util::rename_file($newname, $self->{'file_rename_method'});
1141
1142### print STDERR "***** associating $filename (raw-byte/utf8)-> $newname\n";
1143 $doc_obj->associate_file($filename, $newname, undef, $section);
1144
1145 # Since the generated image will be URL-encoded to avoid file-system/browser mess-ups
1146 # of filenames, URL-encode the additional percent signs of the URL-encoded filename
1147 my $newname_url = $newname;
1148 $newname_url = &unicode::filename_to_url($newname_url);
1149 return "_httpdocimg_/$newname_url";
1150 }
1151}
1152
1153sub replace_section_links {
1154 my $self = shift(@_);
1155 my ($doc_obj) = @_;
1156 my %anchors;
1157 my $top_section = $doc_obj->get_top_section();
1158 my $thissection = $doc_obj->get_next_section($top_section);
1159 while ( defined $thissection ) {
1160 my $text = $doc_obj->get_text($thissection);
1161 while ( $text =~ /(?:(?:id|name)\s*=\s*[\'\"])([^\'\"]+)/gi ) {
1162 $anchors{$1} = $thissection;
1163 }
1164 $thissection = $doc_obj->get_next_section($thissection);
1165 }
1166 $thissection = $top_section;
1167 while (defined $thissection) {
1168 my $text = $doc_obj->get_text($thissection);
1169 $text =~ s/(href\s*=\s*[\"\'])(_httpsamepagelink_#)([^\'\"]+)/$self->replace_link_to_anchor($1,$2,$3,$thissection,$anchors{$3})/ige;
1170 $doc_obj->delete_text( $thissection);
1171 $doc_obj->add_utf8_text( $thissection, $text );
1172 $thissection = $doc_obj->get_next_section ($thissection);
1173 }
1174}
1175sub replace_link_to_anchor {
1176 my $self = shift(@_);
1177 my ($href_part,$old_link,$identifier,$current_section,$target_section) = @_;
1178 if (length $target_section && $current_section ne $target_section){
1179 return $href_part . "javascript:goToAnchor(\'" . $target_section . "\',\'" . $identifier . "\');" ;
1180 }
1181 return $href_part . $old_link . $identifier ;
1182}
1183
1184sub format_link {
1185 my $self = shift (@_);
1186 my ($link, $base_dir, $file) = @_;
1187
1188 # strip off hash part, e.g. #foo, but watch out for any entities, e.g. &#x3B1;
1189 my ($before_hash, $hash_part) = $link =~ m/^(.*?[^&])(\#.*)?$/;
1190
1191 $hash_part = "" if !defined $hash_part;
1192 if (!defined $before_hash || $before_hash !~ m/[\w\.\/]/) {
1193 my $outhandle = $self->{'outhandle'};
1194 print $outhandle "HTMLPlugin: ERROR - badly formatted tag ignored ($link)\n"
1195 if $self->{'verbosity'};
1196 return ($link, "", 0);
1197 }
1198
1199# my $dirname;
1200 if ($before_hash =~ s@^((?:http|https|ftp|file|mms)://)@@i) {
1201 my $type = $1;
1202 my $before_hash_file = $before_hash;
1203
1204 if ($link =~ m/^(http|ftp):/i) {
1205
1206 # Turn url (using /) into file name (possibly using \ on windows)
1207 my @http_dir_split = split('/', $before_hash_file);
1208 $before_hash_file = &FileUtils::filenameConcatenate(@http_dir_split);
1209 }
1210
1211 # want to maintain two version of "before_hash": one representing the URL, the other using filesystem specific directory separator
1212 $before_hash_file = $self->eval_dir_dots($before_hash_file);
1213 my $before_hash_url = $before_hash_file;
1214 if ($ENV{'GSDLOS'} =~ /^windows$/i) {
1215 $before_hash_url =~ s@\\@\/@g;
1216 }
1217
1218 ######## TODO need to check this for encoding stufff
1219 my $linkfilename = &FileUtils::filenameConcatenate($base_dir, $before_hash_file);
1220 print STDERR "chekcing for existence whether relative link or not $linkfilename\n";
1221 my $rl = 0;
1222 $rl = 1 if (-e $linkfilename);
1223 if (-e $linkfilename) {
1224
1225 print STDERR "DOES exist $linkfilename\n";
1226 } else {
1227 print STDERR "DOESN'T exist $linkfilename\n";
1228 }
1229 # make sure there's a slash on the end if it's a directory
1230 if ($before_hash_url !~ m/\/$/) {
1231 $before_hash_url .= "/" if (-d $linkfilename);
1232 }
1233 return ($type . $before_hash_url, $hash_part, $rl);
1234
1235 } elsif ($link !~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i && $link !~ m/^\//) {
1236
1237 #### TODO whst is this test doing???
1238 if ($before_hash =~ s@^/@@ || $before_hash =~ m/\\/) {
1239
1240 # the first directory will be the domain name if file_is_url
1241 # to generate archives, otherwise we'll assume all files are
1242 # from the same site and base_dir is the root
1243
1244 if ($self->{'file_is_url'}) {
1245 my @dirs = split /[\/\\]/, $file;
1246 my $domname = shift (@dirs);
1247 $before_hash = &FileUtils::filenameConcatenate($domname, $before_hash);
1248 $before_hash =~ s@\\@/@g; # for windows
1249 }
1250 else
1251 {
1252 # see if link shares directory with source document
1253 # => turn into relative link if this is so!
1254
1255 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
1256 # too difficult doing a pattern match with embedded '\'s...
1257 my $win_before_hash=$before_hash;
1258 $win_before_hash =~ s@(\\)+@/@g;
1259 # $base_dir is already similarly "converted" on windows.
1260 if ($win_before_hash =~ s@^$base_dir/@@o) {
1261 # if this is true, we removed a prefix
1262 $before_hash=$win_before_hash;
1263 }
1264 }
1265 else {
1266 # before_hash has lost leading slash by this point,
1267 # -> add back in prior to substitution with $base_dir
1268 $before_hash = "/$before_hash";
1269
1270 $before_hash = &FileUtils::filenameConcatenate("",$before_hash);
1271 $before_hash =~ s@^$base_dir/@@;
1272 }
1273 }
1274 } else {
1275
1276 # Turn relative file path into full path (inside import dir)
1277 my $dirname = &File::Basename::dirname($file);
1278
1279 # we want to add dirname (which is raw filesystem path) onto $before_hash, (which is perl unicode aware string). Convert dirname to perl string
1280
1281 my $unicode_dirname ="";
1282 #my $content_encoding = $self->{'content_encoding'};
1283 #my $language = $self->{'language'};
1284
1285 # actually I think this is wrong. why should we use content encoding?
1286 #$self->decode_text($dirname, $content_encoding, $language, \$unicode_dirname);
1287 #my $filename_encoding = $self->{'filename_encoding'};
1288 # filename_encoding might be auto...
1289
1290 # TODO what is the best thing to do here?????
1291 # try and guess default filesystem encoding, similar to deduce_filename_encoding, but without a file?
1292 my $filename_encoding = "utf8";
1293 # copied this from set_Source_metadata in BasePlugin
1294 if ((defined $filename_encoding) && ($filename_encoding ne "ascii")) {
1295 # Use filename_encoding to map raw filename to a Perl unicode-aware string
1296 $unicode_dirname = decode($filename_encoding,$dirname);
1297 }
1298 else {
1299 # otherwise generate %xx encoded version of filename for char > 127
1300 $unicode_dirname = &unicode::raw_filename_to_url_encoded($dirname);
1301 }
1302
1303 $before_hash = &FileUtils::filenameConcatenate($unicode_dirname, $before_hash);
1304 $before_hash = $self->eval_dir_dots($before_hash);
1305 $before_hash =~ s@\\@/@g; # for windows
1306 }
1307
1308 my $linkfilename = &FileUtils::filenameConcatenate($base_dir, $before_hash);
1309
1310 # make sure there's a slash on the end if it's a directory
1311 if ($before_hash !~ m/\/$/) {
1312 $before_hash .= "/" if (-d $linkfilename);
1313 }
1314 return ("http://" . $before_hash, $hash_part, 1);
1315 } else {
1316 # mailto, news, nntp, telnet, javascript or gopher link
1317 return ($before_hash, "", 0);
1318 }
1319}
1320
1321sub extract_first_NNNN_characters {
1322 my $self = shift (@_);
1323 my ($textref, $doc_obj, $thissection) = @_;
1324
1325 foreach my $size (split /,/, $self->{'first'}) {
1326 my $tmptext = $$textref;
1327 # skip to the body
1328 $tmptext =~ s/.*<body[^>]*>//i;
1329 # remove javascript
1330 $tmptext =~ s@<script.*?</script>@ @sig;
1331 $tmptext =~ s/<[^>]*>/ /g;
1332 $tmptext =~ s/&nbsp;/ /g;
1333 $tmptext =~ s/^\s+//;
1334 $tmptext =~ s/\s+$//;
1335 $tmptext =~ s/\s+/ /gs;
1336 $tmptext = &unicode::substr ($tmptext, 0, $size);
1337 $tmptext =~ s/\s\S*$/&#8230;/; # adds an ellipse (...)
1338 $doc_obj->add_utf8_metadata ($thissection, "First$size", $tmptext);
1339 }
1340}
1341
1342
1343sub extract_metadata {
1344 my $self = shift (@_);
1345 my ($textref, $metadata, $doc_obj, $section) = @_;
1346 my $outhandle = $self->{'outhandle'};
1347 # if we don't want metadata, we may as well not be here ...
1348 return if (!defined $self->{'metadata_fields'});
1349
1350 my $separator = $self->{'metadata_field_separator'};
1351 if ($separator eq "") {
1352 undef $separator;
1353 }
1354
1355 # metadata fields to extract/save. 'key' is the (lowercase) name of the
1356 # html meta, 'value' is the metadata name for greenstone to use
1357 my %find_fields = ();
1358
1359 my %creator_fields = (); # short-cut for lookups
1360
1361
1362 foreach my $field (split /,/, $self->{'metadata_fields'}) {
1363 $field =~ s/^\s+//; # remove leading whitespace
1364 $field =~ s/\s+$//; # remove trailing whitespace
1365
1366 # support tag<tagname>
1367 if ($field =~ m/^(.*?)\s*<(.*?)>$/) {
1368 # "$2" is the user's preferred gs metadata name
1369 $find_fields{lc($1)}=$2; # lc = lowercase
1370 } else { # no <tagname> for mapping
1371 # "$field" is the user's preferred gs metadata name
1372 $find_fields{lc($field)}=$field; # lc = lowercase
1373 }
1374 }
1375
1376 if (defined $self->{'hunt_creator_metadata'} &&
1377 $self->{'hunt_creator_metadata'} == 1 ) {
1378 my @extra_fields =
1379 (
1380 'author',
1381 'author.email',
1382 'creator',
1383 'dc.creator',
1384 'dc.creator.corporatename',
1385 );
1386
1387 # add the creator_metadata fields to search for
1388 foreach my $field (@extra_fields) {
1389 $creator_fields{$field}=0; # add to lookup hash
1390 }
1391 }
1392
1393
1394 # find the header in the html file, which has the meta tags
1395 $$textref =~ m@<head>(.*?)</head>@si;
1396
1397 my $html_header=$1;
1398
1399 # go through every <meta... tag defined in the html and see if it is
1400 # one of the tags we want to match.
1401
1402 # special case for title - we want to remember if its been found
1403 my $found_title = 0;
1404 # this assumes that ">" won't appear. (I don't think it's allowed to...)
1405 $html_header =~ m/^/; # match the start of the string, for \G assertion
1406
1407 while ($html_header =~ m/\G.*?<meta(.*?)>/sig) {
1408 my $metatag=$1;
1409 my ($tag, $value);
1410
1411 # find the tag name
1412 $metatag =~ m/(?:name|http-equiv)\s*=\s*([\"\'])?(.*?)\1/is;
1413 $tag=$2;
1414 # in case they're not using " or ', but they should...
1415 if (! $tag) {
1416 $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
1417 $tag=$1;
1418 }
1419
1420 if (!defined $tag) {
1421 print $outhandle "HTMLPlugin: can't find NAME in \"$metatag\"\n";
1422 next;
1423 }
1424
1425 # don't need to assign this field if it was passed in from a previous
1426 # (recursive) plugin
1427 if (defined $metadata->{$tag}) {next}
1428
1429 # find the tag content
1430 $metatag =~ m/content\s*=\s*([\"\'])?(.*?)\1/is;
1431 $value=$2;
1432
1433 # The following code assigns the metaname to value if value is
1434 # empty. Why would we do this?
1435 #if (! $value) {
1436 # $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
1437 # $value=$1;
1438 #}
1439 if (!defined $value || $value eq "") {
1440 print $outhandle "HTMLPlugin: can't find VALUE in <meta $metatag >\n" if ($self->{'verbosity'} > 2);
1441 next;
1442 }
1443
1444 # clean up and add
1445 $value =~ s/\s+/ /gs;
1446 chomp($value); # remove trailing \n, if any
1447 if (exists $creator_fields{lc($tag)}) {
1448 # map this value onto greenstone's "Creator" metadata
1449 $tag='Creator';
1450 } elsif (!exists $find_fields{lc($tag)}) {
1451 next; # don't want this tag
1452 } else {
1453 # get the user's preferred capitalisation
1454 $tag = $find_fields{lc($tag)};
1455 }
1456 if (lc($tag) eq "title") {
1457 $found_title = 1;
1458 }
1459
1460 if ($self->{'verbosity'} > 2) {
1461 print $outhandle " extracted \"$tag\" metadata \"$value\"\n";
1462 }
1463
1464 if ($tag =~ /\./) {
1465 # there is a . so has a namespace, add ex.
1466 $tag = "ex.$tag";
1467 }
1468 if (defined $separator) {
1469 my @values = split($separator, $value);
1470 foreach my $v (@values) {
1471 $doc_obj->add_utf8_metadata($section, $tag, $v) if $v =~ /\S/;
1472 }
1473 }
1474 else {
1475 $doc_obj->add_utf8_metadata($section, $tag, $value);
1476 }
1477 }
1478
1479 # TITLE: extract the document title
1480 if (exists $find_fields{'title'} && !$found_title) {
1481 # we want a title, and didn't find one in the meta tags
1482 # see if there's a <title> tag
1483 my $title;
1484 my $from = ""; # for debugging output only
1485 if ($html_header =~ m/<title[^>]*>([^<]+)<\/title[^>]*>/is) {
1486 $title = $1;
1487 $from = "<title> tags";
1488 }
1489
1490 if (!defined $title) {
1491 $from = "first 100 chars";
1492 # if no title use first 100 or so characters
1493 $title = $$textref;
1494 $title =~ s/^\xFE\xFF//; # Remove unicode byte order mark
1495 $title =~ s/^.*?<body>//si;
1496 # ignore javascript!
1497 $title =~ s@<script.*?</script>@ @sig;
1498 $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
1499 $title =~ s/<[^>]*>/ /g; # remove all HTML tags
1500 $title =~ s@\r@@g; # remove Windows carriage returns to ensure that titles of pdftohtml docs are consistent (the same 100 chars) across windows and linux
1501 $title = substr ($title, 0, 100);
1502 $title =~ s/\s\S*$/.../;
1503 }
1504 $title =~ s/<[^>]*>/ /g; # remove html tags
1505 $title =~ s/&nbsp;/ /g;
1506 $title =~ s/(?:&nbsp;|\xc2\xa0)/ /g; # utf-8 for nbsp...
1507 $title =~ s/\s+/ /gs; # collapse multiple spaces
1508 $title =~ s/^\s*//; # remove leading spaces
1509 $title =~ s/\s*$//; # remove trailing spaces
1510
1511 $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
1512 $title =~ s/^\s+//s; # in case title_sub introduced any...
1513 $doc_obj->add_utf8_metadata ($section, "Title", $title);
1514 print $outhandle " extracted Title metadata \"$title\" from $from\n"
1515 if ($self->{'verbosity'} > 2);
1516 }
1517
1518 # add FileFormat metadata
1519 $doc_obj->add_metadata($section,"FileFormat", "HTML");
1520
1521 # Special, for metadata names such as tagH1 - extracts
1522 # the text between the first <H1> and </H1> tags into "H1" metadata.
1523
1524 foreach my $field (keys %find_fields) {
1525 if ($field !~ m/^tag([a-z0-9]+)$/i) {next}
1526 my $tag = $1;
1527 if ($$textref =~ m@<$tag[^>]*>(.*?)</$tag[^>]*>@g) {
1528 my $content = $1;
1529 $content =~ s/&nbsp;/ /g;
1530 $content =~ s/<[^>]*>/ /g;
1531 $content =~ s/^\s+//;
1532 $content =~ s/\s+$//;
1533 $content =~ s/\s+/ /gs;
1534 if ($content) {
1535 $tag=$find_fields{"tag$tag"}; # get the user's capitalisation
1536 $tag =~ s/^tag//i;
1537 $doc_obj->add_utf8_metadata ($section, $tag, $content);
1538 print $outhandle " extracted \"$tag\" metadata \"$content\"\n"
1539 if ($self->{'verbosity'} > 2);
1540 }
1541 }
1542 }
1543}
1544
1545
1546# evaluate any "../" to next directory up
1547# evaluate any "./" as here
1548sub eval_dir_dots {
1549 my $self = shift (@_);
1550 my ($filename) = @_;
1551 my $dirsep_os = &util::get_os_dirsep();
1552 my @dirsep = split(/$dirsep_os/,$filename);
1553
1554 my @eval_dirs = ();
1555 foreach my $d (@dirsep) {
1556 if ($d eq "..") {
1557 pop(@eval_dirs);
1558
1559 } elsif ($d eq ".") {
1560 # do nothing!
1561
1562 } else {
1563 push(@eval_dirs,$d);
1564 }
1565 }
1566
1567 # Need to fiddle with number of elements in @eval_dirs if the
1568 # first one is the empty string. This is because of a
1569 # modification to FileUtils::filenameConcatenate that supresses the addition
1570 # of a leading '/' character (or \ if windows) (intended to help
1571 # filename cat with relative paths) if the first entry in the
1572 # array is the empty string. Making the array start with *two*
1573 # empty strings is a way to defeat this "smart" option.
1574 #
1575 if (scalar(@eval_dirs) > 0) {
1576 if ($eval_dirs[0] eq ""){
1577 unshift(@eval_dirs,"");
1578 }
1579 }
1580
1581 my $evaluated_filename = (scalar @eval_dirs > 0) ? &FileUtils::filenameConcatenate(@eval_dirs) : "";
1582 return $evaluated_filename;
1583}
1584
1585sub replace_usemap_links {
1586 my $self = shift (@_);
1587 my ($front, $link, $back) = @_;
1588
1589 # remove quotes from link at start and end if necessary
1590 if ($link=~/^[\"\']/) {
1591 $link=~s/^[\"\']//;
1592 $link=~s/[\"\']$//;
1593 $front.='"';
1594 $back="\"$back";
1595 }
1596
1597 $link =~ s/^\.\///;
1598 return $front . $link . $back;
1599}
1600
1601sub inc_filecount {
1602 my $self = shift (@_);
1603
1604 if ($self->{'file_num'} == 1000) {
1605 $self->{'dir_num'} ++;
1606 $self->{'file_num'} = 0;
1607 } else {
1608 $self->{'file_num'} ++;
1609 }
1610}
1611
1612
1613# Extend read_file so that strings like &eacute; are
1614# converted to UTF8 internally.
1615#
1616# We don't convert &lt; or &gt; or &amp; or &quot; in case
1617# they interfere with the GML files
1618
1619sub read_file {
1620 my $self = shift(@_);
1621 my ($filename, $encoding, $language, $textref) = @_;
1622
1623 $self->SUPER::read_file($filename, $encoding, $language, $textref);
1624
1625 # Convert entities to their Unicode code-point equivalents
1626 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
1627 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,1)/gseo;
1628 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
1629
1630}
1631
1632sub HB_read_html_file {
1633 my $self = shift (@_);
1634 my ($htmlfile, $text) = @_;
1635
1636 # load in the file
1637 if (!open (FILE, $htmlfile)) {
1638 print STDERR "ERROR - could not open $htmlfile\n";
1639 return;
1640 }
1641
1642 my $foundbody = 0;
1643 $self->HB_gettext (\$foundbody, $text, "FILE");
1644 close FILE;
1645
1646 # just in case there was no <body> tag
1647 if (!$foundbody) {
1648 $foundbody = 1;
1649 open (FILE, $htmlfile) || return;
1650 $self->HB_gettext (\$foundbody, $text, "FILE");
1651 close FILE;
1652 }
1653 # text is in utf8
1654}
1655
1656# converts the text to utf8, as ghtml does that for &eacute; etc.
1657sub HB_gettext {
1658 my $self = shift (@_);
1659 my ($foundbody, $text, $handle) = @_;
1660
1661 my $line = "";
1662 while (defined ($line = <$handle>)) {
1663 # look for body tag
1664 if (!$$foundbody) {
1665 if ($line =~ s/^.*<body[^>]*>//i) {
1666 $$foundbody = 1;
1667 } else {
1668 next;
1669 }
1670 }
1671
1672 # check for symbol fonts
1673 if ($line =~ m/<font [^>]*?face\s*=\s*\"?(\w+)\"?/i) {
1674 my $font = $1;
1675 print STDERR "HBPlug::HB_gettext - warning removed font $font\n"
1676 if ($font !~ m/^arial$/i);
1677 }
1678
1679 $$text .= $line;
1680 }
1681
1682 if ($self->{'input_encoding'} eq "iso_8859_1") {
1683 # convert to utf-8
1684 $$text=&unicode::unicode2utf8(&unicode::convert2unicode("iso_8859_1", $text));
1685 }
1686 # convert any alphanumeric character entities to their utf-8
1687 # equivalent for indexing purposes
1688 #&ghtml::convertcharentities ($$text);
1689
1690 $$text =~ s/\s+/ /g; # remove \n's
1691
1692 # At this point $$text is a binary byte string
1693 # => turn it into a Unicode aware string, so full
1694 # Unicode aware pattern matching can be used.
1695 # For instance: 's/\x{0101}//g' or '[[:upper:]]'
1696 #
1697
1698 $$text = decode("utf8",$$text);
1699}
1700
1701sub HB_clean_section {
1702 my $self = shift (@_);
1703 my ($section) = @_;
1704
1705 # remove tags without a starting tag from the section
1706 my ($tag, $tagstart);
1707 while ($section =~ m/<\/([^>]{1,10})>/) {
1708 $tag = $1;
1709 $tagstart = index($section, "<$tag");
1710 last if (($tagstart >= 0) && ($tagstart < index($section, "<\/$tag")));
1711 $section =~ s/<\/$tag>//;
1712 }
1713
1714 # remove extra paragraph tags
1715 while ($section =~ s/<p\b[^>]*>\s*<p\b/<p/ig) {}
1716
1717 # remove extra stuff at the end of the section
1718 while ($section =~ s/(<u>|<i>|<b>|<p\b[^>]*>|&nbsp;|\s)$//i) {}
1719
1720 # add a newline at the beginning of each paragraph
1721 $section =~ s/(.)\s*<p\b/$1\n\n<p/gi;
1722
1723 # add a newline every 80 characters at a word boundary
1724 # Note: this regular expression puts a line feed before
1725 # the last word in each section, even when it is not
1726 # needed.
1727 $section =~ s/(.{1,80})\s/$1\n/g;
1728
1729 # fix up the image links
1730 $section =~ s/<img[^>]*?src=\"?([^\">]+)\"?[^>]*>/
1731 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1732 $section =~ s/&lt;&lt;I&gt;&gt;\s*([^\.]+\.(png|jpg|gif))/
1733 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1734
1735 return $section;
1736}
1737
1738# Will convert the oldHDL format to the new HDL format (using the Section tag)
1739sub convert_to_newHDLformat
1740{
1741 my $self = shift (@_);
1742 my ($file,$cnfile) = @_;
1743 my $input_filename = $file;
1744 my $tmp_filename = $cnfile;
1745
1746 # write HTML tmp file with new HDL format
1747 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1748
1749 # read in the file and do basic html cleaning (removing header etc)
1750 my $html = "";
1751 $self->HB_read_html_file ($input_filename, \$html);
1752
1753 # process the file one section at a time
1754 my $curtoclevel = 1;
1755 my $firstsection = 1;
1756 my $toclevel = 0;
1757 while (length ($html) > 0) {
1758 if ($html =~ s/^.*?(?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC(\d+)&gt;&gt;\s*(.*?)<p\b/<p/i) {
1759 $toclevel = $3;
1760 my $title = $4;
1761 my $sectiontext = "";
1762 if ($html =~ s/^(.*?)((?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC\d+&gt;&gt;)/$2/i) {
1763 $sectiontext = $1;
1764 } else {
1765 $sectiontext = $html;
1766 $html = "";
1767 }
1768
1769 # remove tags and extra spaces from the title
1770 $title =~ s/<\/?[^>]+>//g;
1771 $title =~ s/^\s+|\s+$//g;
1772
1773 # close any sections below the current level and
1774 # create a new section (special case for the firstsection)
1775 print PROD "<!--\n";
1776 while (($curtoclevel > $toclevel) ||
1777 (!$firstsection && $curtoclevel == $toclevel)) {
1778 $curtoclevel--;
1779 print PROD "</Section>\n";
1780 }
1781 if ($curtoclevel+1 < $toclevel) {
1782 print STDERR "WARNING - jump in toc levels in $input_filename " .
1783 "from $curtoclevel to $toclevel\n";
1784 }
1785 while ($curtoclevel < $toclevel) {
1786 $curtoclevel++;
1787 }
1788
1789 if ($curtoclevel == 1) {
1790 # add the header tag
1791 print PROD "-->\n";
1792 print PROD "<HTML>\n<HEAD>\n<TITLE>$title</TITLE>\n</HEAD>\n<BODY>\n";
1793 print PROD "<!--\n";
1794 }
1795
1796 print PROD "<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">$title</Metadata>\n\t</Description>\n";
1797
1798 print PROD "-->\n";
1799
1800 # clean up the section html
1801 $sectiontext = $self->HB_clean_section($sectiontext);
1802
1803 print PROD "$sectiontext\n";
1804
1805 } else {
1806 print STDERR "WARNING - leftover text\n" , $self->shorten($html),
1807 "\nin $input_filename\n";
1808 last;
1809 }
1810 $firstsection = 0;
1811 }
1812
1813 print PROD "<!--\n";
1814 while ($curtoclevel > 0) {
1815 $curtoclevel--;
1816 print PROD "</Section>\n";
1817 }
1818 print PROD "-->\n";
1819
1820 close (PROD) || die("Error Closing File: $tmp_filename $!");
1821
1822 return $tmp_filename;
1823}
1824
1825sub shorten {
1826 my $self = shift (@_);
1827 my ($text) = @_;
1828
1829 return "\"$text\"" if (length($text) < 100);
1830
1831 return "\"" . substr ($text, 0, 50) . "\" ... \"" .
1832 substr ($text, length($text)-50) . "\"";
1833}
1834
1835sub convert_tidy_or_oldHDL_file
1836{
1837 my $self = shift (@_);
1838 my ($file) = @_;
1839 my $input_filename = $file;
1840
1841 if (-d $input_filename)
1842 {
1843 return $input_filename;
1844 }
1845
1846 # get the input filename
1847 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1848 my $base_dirname = $dirname;
1849 $suffix = lc($suffix);
1850
1851 # derive tmp filename from input filename
1852 # Remove any white space from filename -- no risk of name collision, and
1853 # makes later conversion by utils simpler. Leave spaces in path...
1854 # tidy up the filename with space, dot, hyphen between
1855 $tailname =~ s/\s+//g;
1856 $tailname =~ s/\.+//g;
1857 $tailname =~ s/\-+//g;
1858 # convert to utf-8 otherwise we have problems with the doc.xml file
1859 # later on
1860 &unicode::ensure_utf8(\$tailname);
1861
1862 # softlink to collection tmp dir
1863 my $tmp_dirname = &FileUtils::filenameConcatenate($ENV{'GSDLCOLLECTDIR'}, "tidytmp");
1864 &FileUtils::makeDirectory($tmp_dirname) if (!-e $tmp_dirname);
1865
1866 my $test_dirname = "";
1867 my $f_separator = &util::get_os_dirsep();
1868
1869 if ($dirname =~ m/import$f_separator/)
1870 {
1871 $test_dirname = $'; #'
1872
1873 #print STDERR "init $'\n";
1874
1875 while ($test_dirname =~ m/[$f_separator]/)
1876 {
1877 my $folderdirname = $`;
1878 $tmp_dirname = &FileUtils::filenameConcatenate($tmp_dirname,$folderdirname);
1879 &FileUtils::makeDirectory($tmp_dirname) if (!-e $tmp_dirname);
1880 $test_dirname = $'; #'
1881 }
1882 }
1883
1884 my $tmp_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
1885
1886 # tidy or convert the input file if it is a HTML-like file or it is accepted by the process_exp
1887 if (($suffix eq ".htm") || ($suffix eq ".html") || ($suffix eq ".shtml"))
1888 {
1889 #convert the input file to a new style HDL
1890 my $hdl_output_filename = $input_filename;
1891 if ($self->{'old_style_HDL'})
1892 {
1893 $hdl_output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
1894 $hdl_output_filename = $self->convert_to_newHDLformat($input_filename,$hdl_output_filename);
1895 }
1896
1897 #just for checking copy all other file from the base dir to tmp dir if it is not exists
1898 opendir(DIR,$base_dirname) or die "Can't open base directory : $base_dirname!";
1899 my @files = grep {!/^\.+$/} readdir(DIR);
1900 close(DIR);
1901
1902 foreach my $file (@files)
1903 {
1904 my $src_file = &FileUtils::filenameConcatenate($base_dirname,$file);
1905 my $dest_file = &FileUtils::filenameConcatenate($tmp_dirname,$file);
1906 if ((!-e $dest_file) && (!-d $src_file))
1907 {
1908 # just copy the original file back to the tmp directory
1909 copy($src_file,$dest_file) or die "Can't copy file $src_file to $dest_file $!";
1910 }
1911 }
1912
1913 # tidy the input file
1914 my $tidy_output_filename = $hdl_output_filename;
1915 if ($self->{'use_realistic_book'})
1916 {
1917 $tidy_output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "$tailname$suffix");
1918 $tidy_output_filename = $self->tmp_tidy_file($hdl_output_filename,$tidy_output_filename);
1919 }
1920 $tmp_filename = $tidy_output_filename;
1921 }
1922 else
1923 {
1924 if (!-e $tmp_filename)
1925 {
1926 # just copy the original file back to the tmp directory
1927 copy($input_filename,$tmp_filename) or die "Can't copy file $input_filename to $tmp_filename $!";
1928 }
1929 }
1930
1931 return $tmp_filename;
1932}
1933
1934
1935# Will make the html input file as a proper XML file with removed font tag and
1936# image size added to the img tag.
1937# The tidying process takes place in a collection specific 'tmp' directory so
1938# that we don't accidentally damage the input.
1939sub tmp_tidy_file
1940{
1941 my $self = shift (@_);
1942 my ($file,$cnfile) = @_;
1943 my $input_filename = $file;
1944 my $tmp_filename = $cnfile;
1945
1946 # get the input filename
1947 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1948
1949 require HTML::TokeParser::Simple;
1950
1951 # create HTML parser to decode the input file
1952 my $parser = HTML::TokeParser::Simple->new($input_filename);
1953
1954 # write HTML tmp file without the font tag and image size are added to the img tag
1955 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1956 while (my $token = $parser->get_token())
1957 {
1958 # is it an img tag
1959 if ($token->is_start_tag('img'))
1960 {
1961 # get the attributes
1962 my $attr = $token->return_attr;
1963
1964 # get the full path to the image
1965 my $img_file = &FileUtils::filenameConcatenate($dirname,$attr->{src});
1966
1967 # set the width and height attribute
1968 ($attr->{width}, $attr->{height}) = imgsize($img_file);
1969
1970 # recreate the tag
1971 print PROD "<img";
1972 print PROD map { qq { $_="$attr->{$_}"} } keys %$attr;
1973 print PROD ">";
1974 }
1975 # is it a font tag
1976 else
1977 {
1978 if (($token->is_start_tag('font')) || ($token->is_end_tag('font')))
1979 {
1980 # remove font tag
1981 print PROD "";
1982 }
1983 else
1984 {
1985 # print without changes
1986 print PROD $token->as_is;
1987 }
1988 }
1989 }
1990 close (PROD) || die("Error Closing File: $tmp_filename $!");
1991
1992 # run html-tidy on the tmp file to make it a proper XML file
1993
1994 my $outhandle = $self->{'outhandle'};
1995 print $outhandle "Converting HTML to be XML compliant:\n";
1996
1997 my $tidy_cmd = "tidy";
1998 $tidy_cmd .= " -q" if ($self->{'verbosity'} <= 2);
1999 $tidy_cmd .= " -raw -wrap 0 -asxml \"$tmp_filename\"";
2000 if ($self->{'verbosity'} <= 2) {
2001 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
2002 $tidy_cmd .= " 2>nul";
2003 }
2004 else {
2005 $tidy_cmd .= " 2>/dev/null";
2006 }
2007 print $outhandle " => $tidy_cmd\n";
2008 }
2009
2010 my $tidyfile = `$tidy_cmd`;
2011
2012 # write result back to the tmp file
2013 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
2014 print PROD $tidyfile;
2015 close (PROD) || die("Error Closing File: $tmp_filename $!");
2016
2017 # return the output filename
2018 return $tmp_filename;
2019}
2020
2021sub associate_cover_image
2022{
2023 my $self = shift(@_);
2024 my ($doc_obj, $filename) = @_;
2025 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
2026 {
2027 # we will have cover image in tidytmp, but want it from import
2028 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
2029 }
2030 $self->SUPER::associate_cover_image($doc_obj, $filename);
2031}
2032
2033
20341;
Note: See TracBrowser for help on using the repository browser.