source: main/trunk/greenstone2/perllib/plugins/HTMLPlugin.pm@ 25673

Last change on this file since 25673 was 25673, checked in by sjm84, 12 years ago

Links that only contain # values now have a macro added to the front of them, in Greenstone 2 this is defined to be empty, and in Greenstone 3 it is the current page's url (so that it works with the RESTful url style)

  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 65.2 KB
RevLine 
[14665]1###########################################################################
2#
[15872]3# HTMLPlugin.pm -- basic html plugin
[14665]4#
5# A component of the Greenstone digital library software
6# from the New Zealand Digital Library Project at the
7# University of Waikato, New Zealand.
8#
9# Copyright (C) 1999 New Zealand Digital Library Project
10#
11# This program is free software; you can redistribute it and/or modify
12# it under the terms of the GNU General Public License as published by
13# the Free Software Foundation; either version 2 of the License, or
14# (at your option) any later version.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with this program; if not, write to the Free Software
23# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24#
25###########################################################################
26
27#
28# Note that this plugin handles frames only in a very simple way
29# i.e. each frame is treated as a separate document. This means
30# search results will contain links to individual frames rather
31# than linking to the top level frameset.
32# There may also be some problems caused by the _parent target
33# (it's removed by this plugin)
34#
35
[15872]36package HTMLPlugin;
[14665]37
[22842]38use Encode;
[23387]39use Unicode::Normalize 'normalize';
[22842]40
[15872]41use ReadTextFile;
42use HBPlugin;
[14665]43use ghtml;
44use unicode;
45use util;
46use XMLParser;
47
48use Image::Size;
[14913]49use File::Copy;
[14665]50
51sub BEGIN {
[15872]52 @HTMLPlugin::ISA = ('ReadTextFile', 'HBPlugin');
[14665]53}
54
55use strict; # every perl program should have this!
56no strict 'refs'; # make an exception so we can use variables as filehandles
57
58my $arguments =
59 [ { 'name' => "process_exp",
[15872]60 'desc' => "{BasePlugin.process_exp}",
[14665]61 'type' => "regexp",
62 'deft' => &get_default_process_exp() },
63 { 'name' => "block_exp",
[15872]64 'desc' => "{BasePlugin.block_exp}",
[14665]65 'type' => 'regexp',
66 'deft' => &get_default_block_exp() },
67 { 'name' => "nolinks",
[15872]68 'desc' => "{HTMLPlugin.nolinks}",
[14665]69 'type' => "flag" },
70 { 'name' => "keep_head",
[15872]71 'desc' => "{HTMLPlugin.keep_head}",
[14665]72 'type' => "flag" },
73 { 'name' => "no_metadata",
[15872]74 'desc' => "{HTMLPlugin.no_metadata}",
[14665]75 'type' => "flag" },
76 { 'name' => "metadata_fields",
[15872]77 'desc' => "{HTMLPlugin.metadata_fields}",
[14665]78 'type' => "string",
79 'deft' => "Title" },
[21800]80 { 'name' => "metadata_field_separator",
81 'desc' => "{HTMLPlugin.metadata_field_separator}",
82 'type' => "string",
83 'deft' => "" },
[14665]84 { 'name' => "hunt_creator_metadata",
[15872]85 'desc' => "{HTMLPlugin.hunt_creator_metadata}",
[14665]86 'type' => "flag" },
87 { 'name' => "file_is_url",
[15872]88 'desc' => "{HTMLPlugin.file_is_url}",
[14665]89 'type' => "flag" },
90 { 'name' => "assoc_files",
[15872]91 'desc' => "{HTMLPlugin.assoc_files}",
[14665]92 'type' => "regexp",
93 'deft' => &get_default_block_exp() },
94 { 'name' => "rename_assoc_files",
[15872]95 'desc' => "{HTMLPlugin.rename_assoc_files}",
[14665]96 'type' => "flag" },
97 { 'name' => "title_sub",
[15872]98 'desc' => "{HTMLPlugin.title_sub}",
[14665]99 'type' => "string",
100 'deft' => "" },
101 { 'name' => "description_tags",
[15872]102 'desc' => "{HTMLPlugin.description_tags}",
[14665]103 'type' => "flag" },
104 # retain this for backward compatibility (w3mir option was replaced by
105 # file_is_url)
106 { 'name' => "w3mir",
[15872]107# 'desc' => "{HTMLPlugin.w3mir}",
[14665]108 'type' => "flag",
109 'hiddengli' => "yes"},
110 { 'name' => "no_strip_metadata_html",
[15872]111 'desc' => "{HTMLPlugin.no_strip_metadata_html}",
[14665]112 'type' => "string",
113 'deft' => "",
114 'reqd' => "no"},
115 { 'name' => "sectionalise_using_h_tags",
[15872]116 'desc' => "{HTMLPlugin.sectionalise_using_h_tags}",
[14665]117 'type' => "flag" },
[14913]118 { 'name' => "use_realistic_book",
[15872]119 'desc' => "{HTMLPlugin.tidy_html}",
[14665]120 'type' => "flag"},
[15872]121 { 'name' => "old_style_HDL",
122 'desc' => "{HTMLPlugin.old_style_HDL}",
[20791]123 'type' => "flag"},
124 {'name' => "processing_tmp_files",
125 'desc' => "{BasePlugin.processing_tmp_files}",
126 'type' => "flag",
127 'hiddengli' => "yes"}
[14665]128 ];
129
[15872]130my $options = { 'name' => "HTMLPlugin",
131 'desc' => "{HTMLPlugin.desc}",
[14665]132 'abstract' => "no",
133 'inherits' => "yes",
134 'args' => $arguments };
135
136
137sub new {
138 my ($class) = shift (@_);
139 my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
140 push(@$pluginlist, $class);
141
[15872]142 push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
143 push(@{$hashArgOptLists->{"OptList"}},$options);
[16024]144
[14665]145
[15872]146 my $self = new ReadTextFile($pluginlist,$inputargs,$hashArgOptLists);
[14665]147
148 if ($self->{'w3mir'}) {
149 $self->{'file_is_url'} = 1;
150 }
151 $self->{'aux_files'} = {};
152 $self->{'dir_num'} = 0;
153 $self->{'file_num'} = 0;
154
155 return bless $self, $class;
156}
157
158# may want to use (?i)\.(gif|jpe?g|jpe|png|css|js(?:@.*)?)$
159# if have eg <script language="javascript" src="img/lib.js@123">
[20791]160# blocking is now done by reading through the file and recording all the
161# images and other files
[14665]162sub get_default_block_exp {
163 my $self = shift (@_);
164
[16392]165 #return q^(?i)\.(gif|jpe?g|jpe|jpg|png|css)$^;
166 return "";
[14665]167}
168
169sub get_default_process_exp {
170 my $self = shift (@_);
171
172 # the last option is an attempt to encode the concept of an html query ...
173 return q^(?i)(\.html?|\.shtml|\.shm|\.asp|\.php\d?|\.cgi|.+\?.+=.*)$^;
174}
175
176sub store_block_files
177{
178 my $self =shift (@_);
[16392]179 my ($filename_full_path, $block_hash) = @_;
180
181 my $html_fname = $filename_full_path;
[14665]182
[23335]183 my ($language, $content_encoding) = $self->textcat_get_language_encoding ($filename_full_path);
184 $self->{'store_content_encoding'}->{$filename_full_path} = $content_encoding;
[14665]185
186 # read in file ($text will be in utf8)
[16769]187 my $raw_text = "";
[23363]188 $self->read_file_no_decoding($filename_full_path, \$raw_text);
[16769]189
190 my $textref = \$raw_text;
[14665]191 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
192 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
193 $$textref =~ s/$opencom(.*?)$closecom//gs;
194
[23363]195 # Convert entities to their UTF8 equivalents
196 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
197 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,0)/gseo; # on this occassion, want it left as utf8
198 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
199
[14665]200 my $attval = "\\\"[^\\\"]+\\\"|[^\\s>]+";
201 my @img_matches = ($$textref =~ m/<img[^>]*?src\s*=\s*($attval)[^>]*>/igs);
202 my @usemap_matches = ($$textref =~ m/<img[^>]*?usemap\s*=\s*($attval)[^>]*>/igs);
203 my @link_matches = ($$textref =~ m/<link[^>]*?href\s*=\s*($attval)[^>]*>/igs);
204 my @embed_matches = ($$textref =~ m/<embed[^>]*?src\s*=\s*($attval)[^>]*>/igs);
[17127]205 my @tabbg_matches = ($$textref =~ m/<(?:body|table|tr|td)[^>]*?background\s*=\s*($attval)[^>]*>/igs);
[16638]206 my @script_matches = ($$textref =~ m/<script[^>]*?src\s*=\s*($attval)[^>]*>/igs);
[14665]207
[23387]208 if(!defined $self->{'unicode_to_original_filename'}) {
[16769]209 # maps from utf8 converted link name -> original filename referrred to by (possibly URL-encoded) src url
[23387]210 $self->{'unicode_to_original_filename'} = {};
[16769]211 }
212
[23387]213 foreach my $raw_link (@img_matches, @usemap_matches, @link_matches, @embed_matches, @tabbg_matches, @script_matches) {
[14665]214
215 # remove quotes from link at start and end if necessary
[23387]216 if ($raw_link =~ m/^\"/) {
217 $raw_link =~ s/^\"//;
218 $raw_link =~ s/\"$//;
[14665]219 }
220
[23371]221 # remove any anchor names, e.g. foo.html#name becomes foo.html
222 # but watch out for any #'s that are part of entities, such as &#x3B1;
[23387]223 $raw_link =~ s/([^&])\#.*$/$1/s;
[23371]224
[16638]225 # some links may just be anchor names
[23387]226 next unless ($raw_link =~ /\S+/);
[14665]227
[23415]228 if ($raw_link !~ m@^/@ && $raw_link !~ m/^([A-Z]:?)\\/i) {
[14665]229 # Turn relative file path into full path
[16392]230 my $dirname = &File::Basename::dirname($filename_full_path);
[23387]231 $raw_link = &util::filename_cat($dirname, $raw_link);
[14665]232 }
[23387]233 $raw_link = $self->eval_dir_dots($raw_link);
[16638]234
[16769]235 # this is the actual filename on the filesystem (that the link refers to)
[23387]236 my $url_original_filename = $self->opt_url_decode($raw_link);
[16769]237
[23387]238 my ($uses_bytecodes,$exceeds_bytecodes) = &unicode::analyze_raw_string($url_original_filename);
[16769]239
[23387]240 if ($exceeds_bytecodes) {
241 # We have a link to a file name that is more complicated than a raw byte filename
242 # What we do next depends on the operating system we are on
[16769]243
[23387]244 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
245 # Assume we're dealing with a UTF-8 encoded filename
246 $url_original_filename = encode("utf8", $url_original_filename);
247 }
248 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
249 # HFS+ is UTF8 with decompostion
250 $url_original_filename = encode("utf8", $url_original_filename);
251 $url_original_filename = normalize('D', $url_original_filename); # Normalization Form D (decomposition)
252 }
253 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
254 # Don't need to do anything as later code maps Windows
255 # unicode filenames to DOS short filenames when needed
256 }
257 else {
258 my $outhandle = $self->{'outhandle'};
259 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
260 print $outhandle " in raw file system encoding of: $raw_link\n";
261 print $outhandle " Assuming filesystem is UTF-8 based.\n";
262 $url_original_filename = encode("utf8", $url_original_filename);
263 }
264 }
265
266 # Convert the (currently raw) link into its Unicode version.
267 # Store the Unicode link along with the url_original_filename
268 my $unicode_url_original_filename = "";
269 $self->decode_text($raw_link,$content_encoding,$language,\$unicode_url_original_filename);
270
271
272 $self->{'unicode_to_original_filename'}->{$unicode_url_original_filename} = $url_original_filename;
273
274
275 if ($url_original_filename ne $unicode_url_original_filename) {
[17088]276 my $outhandle = $self->{'outhandle'};
[23387]277
[17088]278 print $outhandle "URL Encoding $url_original_filename\n";
[23387]279 print $outhandle " ->$unicode_url_original_filename\n";
[17088]280
[23387]281 # Allow for possibility of raw byte version and Unicode versions of file
[23561]282 &util::block_filename($block_hash,$unicode_url_original_filename);
[23387]283 }
[23363]284
[23418]285 # $url_original_filename = &util::upgrade_if_dos_filename($url_original_filename);
[23561]286 &util::block_filename($block_hash,$url_original_filename);
[23418]287
[14665]288 }
289}
290
[16769]291# Given a filename in any encoding, will URL decode it to get back the original filename
292# in the original encoding. Because this method is intended to work out the *original*
[18320]293# filename*, it does not URL decode any filename if a file by the name of the *URL-encoded*
[16769]294# string already exists in the local folder.
[23363]295#
[16769]296sub opt_url_decode {
297 my $self = shift (@_);
[23387]298 my ($raw_link) = @_;
[16024]299
[23387]300
[16769]301 # Replace %XX's in URL with decoded value if required.
302 # Note that the filename may include the %XX in some situations
[23387]303
304## if ($raw_link =~ m/\%[A-F0-9]{2}/i) {
305
306 if (($raw_link =~ m/\%[A-F0-9]{2}/i) || ($raw_link =~ m/\&\#x[0-9A-F]+;/i) || ($raw_link =~ m/\&\#[0-9]+;/i)) {
307 if (!-e $raw_link) {
308 $raw_link = &unicode::url_decode($raw_link,1);
[16769]309 }
310 }
[23387]311
312 return $raw_link;
[16769]313}
314
[20774]315sub read_into_doc_obj
316{
317 my $self = shift (@_);
318 my ($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli) = @_;
319
[22330]320 my ($filename_full_path, $filename_no_path) = &util::get_full_filenames($base_dir, $file);
[23335]321
322 # Lookup content_encoding worked out in file_block pass for this file
323 # Store it under the local name 'content_encoding' so its nice and
324 # easy to access
325 $self->{'content_encoding'} = $self->{'store_content_encoding'}->{$filename_full_path};
326
[20774]327 # get the input file
328 my $input_filename = $file;
329 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
330 $suffix = lc($suffix);
[22330]331 my $tidy_filename;
[20774]332 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
333 {
334 # because the document has to be sectionalized set the description tags
335 $self->{'description_tags'} = 1;
336
337 # set the file to be tidied
338 $input_filename = &util::filename_cat($base_dir,$file) if $base_dir =~ m/\w/;
339
340 # get the tidied file
341 #my $tidy_filename = $self->tmp_tidy_file($input_filename);
[22330]342 $tidy_filename = $self->convert_tidy_or_oldHDL_file($input_filename);
[20774]343
344 # derive tmp filename from input filename
345 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($tidy_filename, "\\.[^\\.]+\$");
346
347 # set the new input file and base_dir to be from the tidied file
348 $file = "$tailname$suffix";
349 $base_dir = $dirname;
350 }
351
352 # call the parent read_into_doc_obj
353 my ($process_status,$doc_obj) = $self->SUPER::read_into_doc_obj($pluginfo, $base_dir, $file, $block_hash, $metadata, $processor, $maxdocs, $total_count, $gli);
[22330]354 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
355 {
356 # now we need to reset the filenames in the doc obj so that the converted filenames are not used
357 my $collect_file = &util::filename_within_collection($filename_full_path);
358 $doc_obj->set_source_filename ($collect_file, $self->{'file_rename_method'});
359 ## set_source_filename does not set the doc_obj source_path which is used in archives dbs for incremental
[23363]360 # build. So set it manually.
361 $doc_obj->set_source_path($filename_full_path);
[22330]362 my $collect_conv_file = &util::filename_within_collection($tidy_filename);
363 $doc_obj->set_converted_filename($collect_conv_file);
[23349]364
365 my $plugin_filename_encoding = $self->{'filename_encoding'};
[23352]366 my $filename_encoding = $self->deduce_filename_encoding($file,$metadata,$plugin_filename_encoding);
367 $self->set_Source_metadata($doc_obj, $filename_full_path, $filename_encoding);
[22330]368 }
[23335]369
370 delete $self->{'store_content_encoding'}->{$filename_full_path};
371 $self->{'content_encoding'} = undef;
372
[20774]373 return ($process_status,$doc_obj);
374}
[16769]375
[14665]376# do plugin specific processing of doc_obj
377sub process {
378 my $self = shift (@_);
379 my ($textref, $pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
380 my $outhandle = $self->{'outhandle'};
381
[16769]382 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
[16024]383 # this makes life so much easier... perl can cope with unix-style '/'s.
[23371]384 $base_dir =~ s@(\\)+@/@g;
385 $file =~ s@(\\)+@/@g;
[14665]386 }
[23371]387
388 my $filename = &util::filename_cat($base_dir,$file);
389 my $upgraded_base_dir = &util::upgrade_if_dos_filename($base_dir);
390 my $upgraded_filename = &util::upgrade_if_dos_filename($filename);
391
392 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
393 # And again
394 $upgraded_base_dir =~ s@(\\)+@/@g;
395 $upgraded_filename =~ s@(\\)+@/@g;
396
397 # Need to make sure there is a '/' on the end of upgraded_base_dir
[23387]398 if (($upgraded_base_dir ne "") && ($upgraded_base_dir !~ m/\/$/)) {
[23371]399 $upgraded_base_dir .= "/";
400 }
401 }
402 my $upgraded_file = &util::filename_within_directory($upgraded_filename,$upgraded_base_dir);
[14665]403
404 # reset per-doc stuff...
405 $self->{'aux_files'} = {};
406 $self->{'dir_num'} = 0;
407 $self->{'file_num'} = 0;
408
409 # process an HTML file where sections are divided by headings tags (H1, H2 ...)
410 # you can also include metadata in the format (X can be any number)
411 # <hX>Title<!--gsdl-metadata
412 # <Metadata name="name1">value1</Metadata>
413 # ...
414 # <Metadata name="nameN">valueN</Metadata>
415 #--></hX>
416 if ($self->{'sectionalise_using_h_tags'}) {
417 # description_tags should allways be activated because we convert headings to description tags
418 $self->{'description_tags'} = 1;
419
420 my $arrSections = [];
[23371]421 $$textref =~ s/<h([0-9]+)[^>]*>(.*?)<\/h[0-9]+>/$self->process_heading($1, $2, $arrSections, $upgraded_file)/isge;
[14665]422
423 if (scalar(@$arrSections)) {
424 my $strMetadata = $self->update_section_data($arrSections, -1);
425 if (length($strMetadata)) {
426 $strMetadata = '<!--' . $strMetadata . "\n-->\n</body>";
427 $$textref =~ s/<\/body>/$strMetadata/ig;
428 }
429 }
430 }
431
432 my $cursection = $doc_obj->get_top_section();
433
434 $self->extract_metadata ($textref, $metadata, $doc_obj, $cursection)
435 unless $self->{'no_metadata'} || $self->{'description_tags'};
436
437 # Store URL for page as metadata - this can be used for an
438 # altavista style search interface. The URL won't be valid
439 # unless the file structure contains the domain name (i.e.
440 # like when w3mir is used to download a website).
441
442 # URL metadata (even invalid ones) are used to support internal
443 # links, so even if 'file_is_url' is off, still need to store info
444
[23371]445 my ($tailname,$dirname) = &File::Basename::fileparse($upgraded_file);
[23347]446
[23335]447# my $utf8_file = $self->filename_to_utf8_metadata($file);
448# $utf8_file =~ s/&\#095;/_/g;
[23835]449# variable below used to be utf8_file
450
[23387]451 my $url_encoded_file = &unicode::raw_filename_to_url_encoded($tailname);
452 my $utf8_url_encoded_file = &unicode::raw_filename_to_utf8_url_encoded($tailname);
[23335]453
[16735]454 my $web_url = "http://";
[23387]455 my $utf8_web_url = "http://";
[16735]456 if(defined $dirname) { # local directory
[22689]457 # Check for "ftp" in the domain name of the directory
458 # structure to determine if this URL should be a ftp:// URL
459 # This check is not infallible, but better than omitting the
460 # check, which would cause all files downloaded from ftp sites
461 # via mirroring with wget to have potentially erroneous http:// URLs
462 # assigned in their metadata
463 if ($dirname =~ /^[^\/]*ftp/i)
464 {
465 $web_url = "ftp://";
[23387]466 $utf8_web_url = "ftp://";
[22689]467 }
[16836]468 $dirname = $self->eval_dir_dots($dirname);
[18626]469 $dirname .= &util::get_dirsep() if $dirname ne ""; # if there's a directory, it should end on "/"
[23387]470
471 $web_url = $web_url.$dirname.$url_encoded_file;
472 $utf8_web_url = $utf8_web_url.$dirname.$utf8_url_encoded_file;
[16735]473 } else {
[23387]474 $web_url = $web_url.$url_encoded_file;
475 $utf8_web_url = $utf8_web_url.$utf8_url_encoded_file;
[16735]476 }
[19983]477 $web_url =~ s/\\/\//g;
[23387]478 $utf8_web_url =~ s/\\/\//g;
[23371]479
480 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]481 print STDERR "*******DEBUG: upgraded_file: $upgraded_file\n";
482 print STDERR "*******DEBUG: adding URL metadata: $utf8_url_encoded_file\n";
[23371]483 }
484
485
[15872]486 $doc_obj->add_utf8_metadata($cursection, "URL", $web_url);
[23387]487 $doc_obj->add_utf8_metadata($cursection, "UTF8URL", $utf8_web_url);
[15872]488
[14665]489 if ($self->{'file_is_url'}) {
490 $doc_obj->add_metadata($cursection, "weblink", "<a href=\"$web_url\">");
491 $doc_obj->add_metadata($cursection, "webicon", "_iconworld_");
492 $doc_obj->add_metadata($cursection, "/weblink", "</a>");
493 }
494
495 if ($self->{'description_tags'}) {
496 # remove the html header - note that doing this here means any
497 # sections defined within the header will be lost (so all <Section>
498 # tags must appear within the body of the HTML)
499 my ($head_keep) = ($$textref =~ m/^(.*?)<body[^>]*>/is);
500
501 $$textref =~ s/^.*?<body[^>]*>//is;
502 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
503
504 my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
505 my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
506
507 my $lt = '(?:<|&lt;)';
508 my $gt = '(?:>|&gt;)';
509 my $quot = '(?:"|&quot;|&rdquo;|&ldquo;)';
510
511 my $dont_strip = '';
512 if ($self->{'no_strip_metadata_html'}) {
513 ($dont_strip = $self->{'no_strip_metadata_html'}) =~ s{,}{|}g;
514 }
515
516 my $found_something = 0; my $top = 1;
517 while ($$textref =~ s/^(.*?)$opencom(.*?)$closecom//s) {
518 my $text = $1;
519 my $comment = $2;
520 if (defined $text) {
521 # text before a comment - note that getting to here
522 # doesn't necessarily mean there are Section tags in
523 # the document
[23371]524 $self->process_section(\$text, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[14665]525 }
526 while ($comment =~ s/$lt(.*?)$gt//s) {
527 my $tag = $1;
528 if ($tag eq "Section") {
529 $found_something = 1;
530 $cursection = $doc_obj->insert_section($doc_obj->get_end_child($cursection)) unless $top;
531 $top = 0;
532 } elsif ($tag eq "/Section") {
533 $found_something = 1;
534 $cursection = $doc_obj->get_parent_section ($cursection);
[16769]535 } elsif ($tag =~ m/^Metadata name=$quot(.*?)$quot/s) {
[14665]536 my $metaname = $1;
[16769]537 my $accumulate = $tag =~ m/mode=${quot}accumulate${quot}/ ? 1 : 0;
[14665]538 $comment =~ s/^(.*?)$lt\/Metadata$gt//s;
539 my $metavalue = $1;
540 $metavalue =~ s/^\s+//;
541 $metavalue =~ s/\s+$//;
542 # assume that no metadata value intentionally includes
543 # carriage returns or HTML tags (if they're there they
544 # were probably introduced when converting to HTML from
545 # some other format).
546 # actually some people want to have html tags in their
547 # metadata.
548 $metavalue =~ s/[\cJ\cM]/ /sg;
549 $metavalue =~ s/<[^>]+>//sg
[16769]550 unless $dont_strip && ($dont_strip eq 'all' || $metaname =~ m/^($dont_strip)$/);
[14665]551 $metavalue =~ s/\s+/ /sg;
[22348]552 if ($metaname =~ /\./) { # has a namespace
553 $metaname = "ex.$metaname";
554 }
[14665]555 if ($accumulate) {
556 $doc_obj->add_utf8_metadata($cursection, $metaname, $metavalue);
557 } else {
558 $doc_obj->set_utf8_metadata_element($cursection, $metaname, $metavalue);
559 }
560 } elsif ($tag eq "Description" || $tag eq "/Description") {
561 # do nothing with containing Description tags
562 } else {
563 # simple HTML tag (probably created by the conversion
564 # to HTML from some other format) - we'll ignore it and
565 # hope for the best ;-)
566 }
567 }
568 }
569 if ($cursection ne "") {
[23371]570 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains unmatched <Section></Section> tags\n";
[14665]571 }
572
573 $$textref =~ s/^.*?<body[^>]*>//is;
574 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
[16769]575 if ($$textref =~ m/\S/) {
[14665]576 if (!$found_something) {
577 if ($self->{'verbosity'} > 2) {
[23371]578 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags so\n";
[14665]579 print $outhandle " will be processed as a single section document\n";
580 }
581
582 # go ahead and process single-section document
[23371]583 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[14665]584
585 # if document contains no Section tags we'll go ahead
586 # and extract metadata (this won't have been done
587 # above as the -description_tags option prevents it)
588 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
589 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
590 unless $self->{'no_metadata'};
591
592 } else {
[23371]593 print $outhandle "HTMLPlugin: WARNING: $upgraded_file contains the following text outside\n";
[14665]594 print $outhandle " of the final closing </Section> tag. This text will\n";
595 print $outhandle " be ignored.";
596
597 my ($text);
598 if (length($$textref) > 30) {
599 $text = substr($$textref, 0, 30) . "...";
600 } else {
601 $text = $$textref;
602 }
603 $text =~ s/\n/ /isg;
604 print $outhandle " ($text)\n";
605 }
606 } elsif (!$found_something) {
607
608 if ($self->{'verbosity'} > 2) {
609 # may get to here if document contained no valid Section
610 # tags but did contain some comments. The text will have
611 # been processed already but we should print the warning
612 # as above and extract metadata
[23371]613 print $outhandle "HTMLPlugin: WARNING: $upgraded_file appears to contain no Section tags and\n";
[14665]614 print $outhandle " is blank or empty. Metadata will be assigned if present.\n";
615 }
616
617 my $complete_text = $head_keep.$doc_obj->get_text($cursection);
618 $self->extract_metadata (\$complete_text, $metadata, $doc_obj, $cursection)
619 unless $self->{'no_metadata'};
620 }
621
622 } else {
623
624 # remove header and footer
625 if (!$self->{'keep_head'} || $self->{'description_tags'}) {
626 $$textref =~ s/^.*?<body[^>]*>//is;
627 $$textref =~ s/(<\/body[^>]*>|<\/html[^>]*>)//isg;
628 }
629
[25555]630 $self->{'css_assoc_files'} = {};
631
[14665]632 # single section document
[23371]633 $self->process_section($textref, $upgraded_base_dir, $upgraded_file, $doc_obj, $cursection);
[25555]634
635 #my $upgraded_filename_dirname = &File::Basename::dirname($upgraded_filename);
636
637 $self->acquire_css_associated_files($doc_obj, $cursection);
638
639 $self->{'css_assoc_files'} = {};
[14665]640 }
[23335]641
[14665]642 return 1;
643}
644
645
646sub process_heading
647{
648 my ($self, $nHeadNo, $strHeadingText, $arrSections, $file) = @_;
649 $strHeadingText = '' if (!defined($strHeadingText));
650
651 my $strMetadata = $self->update_section_data($arrSections, int($nHeadNo));
652
653 my $strSecMetadata = '';
654 while ($strHeadingText =~ s/<!--gsdl-metadata(.*?)-->//is)
655 {
656 $strSecMetadata .= $1;
657 }
658
659 $strHeadingText =~ s/^\s+//g;
660 $strHeadingText =~ s/\s+$//g;
661 $strSecMetadata =~ s/^\s+//g;
662 $strSecMetadata =~ s/\s+$//g;
663
664 $strMetadata .= "\n<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">" . $strHeadingText . "</Metadata>\n";
665
666 if (length($strSecMetadata)) {
667 $strMetadata .= "\t\t" . $strSecMetadata . "\n";
668 }
669
670 $strMetadata .= "\t</Description>\n";
671
672 return "<!--" . $strMetadata . "-->";
673}
674
675
676sub update_section_data
677{
678 my ($self, $arrSections, $nCurTocNo) = @_;
679 my ($strBuffer, $nLast, $nSections) = ('', 0, scalar(@$arrSections));
680
681 if ($nSections == 0) {
682 push @$arrSections, $nCurTocNo;
683 return $strBuffer;
684 }
685 $nLast = $arrSections->[$nSections - 1];
686 if ($nCurTocNo > $nLast) {
687 push @$arrSections, $nCurTocNo;
688 return $strBuffer;
689 }
690 for(my $i = $nSections - 1; $i >= 0; $i--) {
691 if ($nCurTocNo <= $arrSections->[$i]) {
692 $strBuffer .= "\n</Section>";
693 pop @$arrSections;
694 }
695 }
696 push @$arrSections, $nCurTocNo;
697 return $strBuffer;
698}
699
700
701# note that process_section may be called multiple times for a single
702# section (relying on the fact that add_utf8_text appends the text to any
703# that may exist already).
704sub process_section {
705 my $self = shift (@_);
706 my ($textref, $base_dir, $file, $doc_obj, $cursection) = @_;
[25555]707
708 my @styleTagsText = ($$textref =~ m/<style[^>]*>([^<]*)<\/style>/sg);
709 if(scalar(@styleTagsText) > 0)
710 {
711 my $css_filename_dirname = &File::Basename::dirname(&util::filename_cat($base_dir, $file));
712 foreach my $styleText (@styleTagsText)
713 {
714 $self->acquire_css_associated_files_from_text_block($styleText, $css_filename_dirname);
715 }
716 }
717
[14665]718 # trap links
719 if (!$self->{'nolinks'}) {
720 # usemap="./#index" not handled correctly => change to "#index"
[16769]721## $$textref =~ s/(<img[^>]*?usemap\s*=\s*[\"\']?)([^\"\'>\s]+)([\"\']?[^>]*>)/
722
[23392]723## my $opencom = '(?:<!--|&lt;!(?:&mdash;|&#151;|--))';
724## my $closecom = '(?:-->|(?:&mdash;|&#151;|--)&gt;)';
[23387]725
[16769]726 $$textref =~ s/(<img[^>]*?usemap\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
[14665]727 $self->replace_usemap_links($1, $2, $3)/isge;
728
[23463]729 $$textref =~ s/(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
730 $self->replace_href_links ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[23387]731
[23392]732## $$textref =~ s/($opencom.*?)?+(<(?:a|area|frame|link|script)\s+[^>]*?\s*(?:href|src)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)(.*?$closecom)?+/
733# $self->replace_href_links ($1, $2, $3, $4, $5, $base_dir, $file, $doc_obj, $cursection)/isge;
[14665]734 }
735
736 # trap images
737
[15872]738 # Previously, by default, HTMLPlugin would embed <img> tags inside anchor tags
[15176]739 # i.e. <a href="image><img src="image"></a> in order to overcome a problem that
740 # turned regular text succeeding images into links. That is, by embedding <imgs>
741 # inside <a href=""></a>, the text following images were no longer misbehaving.
742 # However, there would be many occasions whereby images were not meant to link
743 # to their source images but where the images would link to another web page.
744 # To allow this, the no_image_links option was introduced: it would prevent
745 # the behaviour of embedding images into links that referenced the source images.
746
747 # Somewhere along the line, the problem of normal text turning into links when
748 # such text followed images which were not embedded in <a href=""></a> ceased
749 # to occur. This is why the following lines have been commented out (as well as
750 # two lines in replace_images). They appear to no longer apply.
751
752 # If at any time, there is a need for having images embedded in <a> anchor tags,
[15872]753 # then it might be better to turn that into an HTMLPlugin option rather than make
[15176]754 # it the default behaviour. Also, eventually, no_image_links needs to become
[15872]755 # a deprecated option for HTMLPlugin as it has now become the default behaviour.
[15176]756
757 #if(!$self->{'no_image_links'}){
[16247]758 $$textref =~ s/(<(?:img|embed|table|tr|td)[^>]*?(?:src|background)\s*=\s*)((?:[\"][^\"]+[\"])|(?:[\'][^\']+[\'])|(?:[^\s\/>]+))([^>]*>)/
[15872]759 $self->replace_images ($1, $2, $3, $base_dir, $file, $doc_obj, $cursection)/isge;
[15176]760 #}
761
[14665]762 # add text to document object
763 # turn \ into \\ so that the rest of greenstone doesn't think there
764 # is an escape code following. (Macro parsing loses them...)
765 $$textref =~ s/\\/\\\\/go;
766
767 $doc_obj->add_utf8_text($cursection, $$textref);
768}
769
770sub replace_images {
771 my $self = shift (@_);
772 my ($front, $link, $back, $base_dir,
773 $file, $doc_obj, $section) = @_;
774
775 # remove quotes from link at start and end if necessary
776 if ($link=~/^[\"\']/) {
[15838]777 $link=~s/^[\"\']//;
778 $link=~s/[\"\']$//;
[14665]779 $front.='"';
780 $back="\"$back";
781 }
[15872]782
[14665]783 $link =~ s/\n/ /g;
784
785 # Hack to overcome Windows wv 0.7.1 bug that causes embedded images to be broken
786 # If the Word file path has spaces in it, wv messes up and you end up with
787 # absolute paths for the images, and without the "file://" prefix
788 # So check for this special case and massage the data to be correct
[16769]789 if ($ENV{'GSDLOS'} =~ m/^windows/i && $self->{'plugin_type'} eq "WordPlug" && $link =~ m/^[A-Za-z]\:\\/) {
[14665]790 $link =~ s/^.*\\([^\\]+)$/$1/;
791 }
[16632]792
[14665]793 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
794
795 my $img_file = $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section);
796
[17127]797# print STDERR "**** link = $link\n**** href = $href\n**** img_file = $img_file, rl = $rl\n";
[16632]798
[14665]799 my $anchor_name = $img_file;
800 #$anchor_name =~ s/^.*\///;
801 #$anchor_name = "<a name=\"$anchor_name\" ></a>";
802
803 my $image_link = $front . $img_file .$back;
[15176]804 return $image_link;
[14665]805
[15176]806 # The reasons for why the following two lines are no longer necessary can be
807 # found in subroutine process_section
808 #my $anchor_link = "<a href=\"$img_file\" >".$image_link."</a>";
809 #return $anchor_link;
810
[14665]811 #return $front . $img_file . $back . $anchor_name;
812}
813
814sub replace_href_links {
815 my $self = shift (@_);
[23392]816 my ($front, $link, $back, $base_dir, $file, $doc_obj, $section) = @_;
[25555]817
818 if($front =~ m/^<link / && $link =~ m/\.css"$/)
819 {
820 my $actual_link = $link;
821 $actual_link =~ s/^"(.*)"$/$1/;
822
823 my $directory = &File::Basename::dirname($file);
824
825 my $css_filename = &util::filename_cat($base_dir, $directory, $actual_link);
826 $self->retrieve_css_associated_files($css_filename);
827 }
828
[16769]829 # remove quotes from link at start and end if necessary
830 if ($link=~/^[\"\']/) {
831 $link=~s/^[\"\']//;
832 $link=~s/[\"\']$//;
833 $front.='"';
834 $back="\"$back";
835 }
836
[14665]837 # attempt to sort out targets - frames are not handled
838 # well in this plugin and some cases will screw things
839 # up - e.g. the _parent target (so we'll just remove
840 # them all ;-)
841 $front =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
842 $back =~ s/(target=\"?)_top(\"?)/$1_gsdltop_$2/is;
843 $front =~ s/target=\"?_parent\"?//is;
844 $back =~ s/target=\"?_parent\"?//is;
845
[25673]846 if($link =~ m/^\#/s)
847 {
848 return $front . "_httpsamepagelink_" . $link . $back;
849 }
850
[14665]851 $link =~ s/\n/ /g;
852
[16769]853 # Find file referred to by $link on file system
854 # This is more complicated than it sounds when char encodings
855 # is taken in to account
[14665]856 my ($href, $hash_part, $rl) = $self->format_link ($link, $base_dir, $file);
[23835]857
[14665]858 # href may use '\'s where '/'s should be on Windows
859 $href =~ s/\\/\//g;
[16769]860 my ($filename) = $href =~ m/^(?:.*?):(?:\/\/)?(.*)/;
[14665]861
862 ##### leave all these links alone (they won't be picked up by intermediate
863 ##### pages). I think that's safest when dealing with frames, targets etc.
864 ##### (at least until I think of a better way to do it). Problems occur with
865 ##### mailto links from within small frames, the intermediate page is displayed
866 ##### within that frame and can't be seen. There is still potential for this to
867 ##### happen even with html pages - the solution seems to be to somehow tell
868 ##### the browser from the server side to display the page being sent (i.e.
869 ##### the intermediate page) in the top level window - I'm not sure if that's
870 ##### possible - the following line should probably be deleted if that can be done
[16769]871 return $front . $link . $back if $href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/is;
[14665]872
[16769]873 if (($rl == 0) || ($filename =~ m/$self->{'process_exp'}/) ||
874 ($href =~ m/\/$/) || ($href =~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i)) {
[23335]875
[23371]876 if ($ENV{'GSDLOS'} =~ m/^windows$/) {
[23335]877
[23371]878 # Don't do any encoding for now, as not clear what
879 # the right thing to do is to support filename
880 # encoding on Windows when they are not UTF16
881 #
[23347]882 }
[23371]883 else {
884 # => Unix-based system
[23335]885
[23371]886 # If web page didn't give encoding, then default to utf8
887 my $content_encoding= $self->{'content_encoding'} || "utf8";
[23835]888
[23371]889 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
890 print STDERR "**** Encoding with '$content_encoding', href: $href\n";
891 }
[23335]892
[23835]893 # on Darwin, the unicode filenames are stored on the file
894 # system in decomposed form, so any href link (including when
895 # URL-encoded) should refer to the decomposed name of the file
896 if ($ENV{'GSDLOS'} =~ /^darwin$/i) {
897 $href = normalize('D', $href); # Normalization Form D (decomposition)
898 }
899
[23371]900 $href = encode($content_encoding,$href);
901 }
902
[23835]903 $href = &unicode::raw_filename_to_utf8_url_encoded($href);
[23335]904 $href = &unicode::filename_to_url($href);
905
[16812]906 &ghtml::urlsafe ($href);
[23371]907
[23347]908 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]909 print STDERR "******DEBUG: href=$href\n";
[23347]910 }
[23335]911
[23347]912
[18521]913 return $front . "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part . $back;
[14665]914 } else {
[23335]915 # link is to some other type of file (e.g., an image) so we'll
[14665]916 # need to associate that file
917 return $front . $self->add_file ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) . $back;
918 }
919}
920
[25555]921sub retrieve_css_associated_files {
922 my $self = shift (@_);
923 my ($css_filename) = @_;
924
925 my $css_filename_dirname = &File::Basename::dirname($css_filename);
926
927 open (CSSFILE, $css_filename) || return;
928 sysread (CSSFILE, my $file_string, -s CSSFILE);
929
930 $self->acquire_css_associated_files_from_text_block($file_string, $css_filename_dirname) unless !defined $file_string;
931
932 close CSSFILE;
933}
934
935sub acquire_css_associated_files_from_text_block {
936 my $self = shift (@_);
937 my ($text, $css_filename_dirname) = @_;
938
939 my @image_urls = ($text =~ m/background-image:\s*url[^;]*;/sg);
940 foreach my $img_url (@image_urls)
941 {
942 $img_url =~ s/^.*url.*\((.*)\).*$/$1/;
943 $img_url =~ s/^\s*"?([^"]*)"?\s*$/$1/;
944
945 $self->{'css_assoc_files'}->{&util::filename_cat($css_filename_dirname, $img_url)} = $img_url;
946 }
947}
948
949sub acquire_css_associated_files {
950 my $self = shift(@_);
951
952 my ($doc_obj, $section) = @_;
953
954 foreach my $image_filename (keys %{$self->{'css_assoc_files'}})
955 {
956 $doc_obj->associate_file($image_filename, $self->{'css_assoc_files'}->{$image_filename}, undef, $section);
957 }
958}
959
[14665]960sub add_file {
961 my $self = shift (@_);
962 my ($href, $rl, $hash_part, $base_dir, $doc_obj, $section) = @_;
963 my ($newname);
964
965 my $filename = $href;
966 if ($base_dir eq "") {
[23387]967 if ($ENV{'GSDLOS'} =~ m/^windows$/i) {
968 # remove http://
969 $filename =~ s/^[^:]*:\/\///;
970 }
971 else {
972 # remove http:/ thereby leaving one slash at the start as
973 # part of full pathname
974 $filename =~ s/^[^:]*:\///;
975 }
[14665]976 }
977 else {
978 # remove http://
979 $filename =~ s/^[^:]*:\/\///;
980 }
981
982 $filename = &util::filename_cat($base_dir, $filename);
[23363]983
[22355]984 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'})) {
985 # we are processing a tidytmp file - want paths to be in import
986 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
987 }
[23335]988
989 # Replace %XX's in URL with decoded value if required. Note that the
990 # filename may include the %XX in some situations. If the *original*
991 # file's name was in URL encoding, the following method will not decode
992 # it.
[23387]993 my $unicode_filename = $filename;
994 my $opt_decode_unicode_filename = $self->opt_url_decode($unicode_filename);
[16769]995
[23387]996 # wvWare can generate <img src="StrangeNoGraphicData"> tags, but with no
997 # (it seems) accompanying file
998 if ($opt_decode_unicode_filename =~ m/StrangeNoGraphicData$/) { return ""; }
999
[23335]1000 my $content_encoding= $self->{'content_encoding'} || "utf8";
1001
[23387]1002 if ($ENV{'GSDLOS'} =~ /^(linux|solaris)$/i) {
1003 # The filenames that come through the HTML file have been decoded
1004 # into Unicode aware Perl strings. Need to convert them back
1005 # to their initial raw-byte encoding to match the file that
1006 # exists on the file system
1007 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1008 }
1009 elsif ($ENV{'GSDLOS'} =~ /^darwin$/i) {
1010 # HFS+ is UTF8 with decompostion
1011 $filename = encode($content_encoding, $opt_decode_unicode_filename);
1012 $filename = normalize('D', $filename); # Normalization Form D (decomposition)
[23335]1013
[23387]1014 }
1015 elsif ($ENV{'GSDLOS'} =~ /^windows$/i) {
1016 my $long_filename = Win32::GetLongPathName($opt_decode_unicode_filename);
1017
1018 if (defined $long_filename) {
1019 my $short_filename = Win32::GetLongPathName($long_filename);
1020 $filename = $short_filename;
1021 }
1022# else {
1023# print STDERR "***** failed to map href to real file:\n";
1024# print STDERR "****** $href -> $opt_decode_unicode_filename\n";
1025# }
1026 }
1027 else {
1028 my $outhandle = $self->{'outhandle'};
1029 print $outhandle "Warning: Unrecognized operating system ", $ENV{'GSDLOS'}, "\n";
1030 print $outhandle " in file system encoding of href: $href\n";
1031 print $outhandle " No character encoding done.\n";
1032 }
1033
1034
[16769]1035 # some special processing if the intended filename was converted to utf8, but
1036 # the actual file still needs to be renamed
[23363]1037 if (!&util::fd_exists($filename)) {
[16769]1038 # try the original filename stored in map
[23347]1039 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23363]1040 print STDERR "******!! orig filename did not exist: $filename\n";
[23347]1041 }
[23335]1042
[23387]1043## print STDERR "**** trying to look up unicode_filename: $unicode_filename\n";
[23363]1044
[23387]1045 my $original_filename = $self->{'unicode_to_original_filename'}->{$unicode_filename};
[23335]1046
[23347]1047 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23387]1048 print STDERR "****** From lookup unicode_filename, now trying for: $original_filename\n";
[23347]1049 }
[23335]1050
[16920]1051 if (defined $original_filename && -e $original_filename) {
[23347]1052 if ((defined $ENV{"DEBUG_UNICODE"}) && ($ENV{"DEBUG_UNICODE"})) {
[23363]1053 print STDERR "****** Found match!\n";
[23347]1054 }
[16769]1055 $filename = $original_filename;
[14665]1056 }
1057 }
[16769]1058
1059 my ($ext) = $filename =~ m/(\.[^\.]*)$/;
[14665]1060
1061 if ($rl == 0) {
[16769]1062 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
[18521]1063 return "_httpextlink_&amp;rl=0&amp;el=prompt&amp;href=" . $href . $hash_part;
[14665]1064 }
1065 else {
[18521]1066 return "_httpextlink_&amp;rl=0&amp;el=direct&amp;href=" . $href . $hash_part;
[14665]1067 }
1068 }
1069
[16769]1070 if ((!defined $ext) || ($ext !~ m/$self->{'assoc_files'}/)) {
[18521]1071 return "_httpextlink_&amp;rl=" . $rl . "&amp;href=" . $href . $hash_part;
[14665]1072 }
[20778]1073 # add the original image file as a source file
[20791]1074 if (!$self->{'processing_tmp_files'} ) {
1075 $doc_obj->associate_source_file($filename);
1076 }
[14665]1077 if ($self->{'rename_assoc_files'}) {
1078 if (defined $self->{'aux_files'}->{$href}) {
1079 $newname = $self->{'aux_files'}->{$href}->{'dir_num'} . "/" .
1080 $self->{'aux_files'}->{$href}->{'file_num'} . $ext;
1081 } else {
1082 $newname = $self->{'dir_num'} . "/" . $self->{'file_num'} . $ext;
1083 $self->{'aux_files'}->{$href} = {'dir_num' => $self->{'dir_num'}, 'file_num' => $self->{'file_num'}};
1084 $self->inc_filecount ();
1085 }
1086 $doc_obj->associate_file($filename, $newname, undef, $section);
1087 return "_httpdocimg_/$newname";
1088 } else {
[23387]1089 if(&unicode::is_url_encoded($unicode_filename)) {
[16904]1090 # use the possibly-decoded filename instead to avoid double URL encoding
1091 ($newname) = $filename =~ m/([^\/\\]*)$/;
1092 } else {
[23387]1093 ($newname) = $unicode_filename =~ m/([^\/\\]*)$/;
[16904]1094 }
[16935]1095
[18320]1096 # Make sure this name uses only ASCII characters.
1097 # We use either base64 or URL encoding, as these preserve original encoding
1098 $newname = &util::rename_file($newname, $self->{'file_rename_method'});
[16632]1099
[23363]1100### print STDERR "***** associating $filename (raw-byte/utf8)-> $newname\n";
[14665]1101 $doc_obj->associate_file($filename, $newname, undef, $section);
[16632]1102
[16769]1103 # Since the generated image will be URL-encoded to avoid file-system/browser mess-ups
1104 # of filenames, URL-encode the additional percent signs of the URL-encoded filename
[16632]1105 my $newname_url = $newname;
[18404]1106 $newname_url = &unicode::filename_to_url($newname_url);
[16769]1107 return "_httpdocimg_/$newname_url";
[14665]1108 }
1109}
1110
1111
1112sub format_link {
1113 my $self = shift (@_);
1114 my ($link, $base_dir, $file) = @_;
1115
[23371]1116 # strip off hash part, e.g. #foo, but watch out for any entities, e.g. &#x3B1;
1117 my ($before_hash, $hash_part) = $link =~ m/^(.*?[^&])(\#.*)?$/;
[23463]1118
[14665]1119 $hash_part = "" if !defined $hash_part;
[16769]1120 if (!defined $before_hash || $before_hash !~ m/[\w\.\/]/) {
[23463]1121 my $outhandle = $self->{'outhandle'};
1122 print $outhandle "HTMLPlugin: ERROR - badly formatted tag ignored ($link)\n"
1123 if $self->{'verbosity'};
1124 return ($link, "", 0);
[14665]1125 }
[23463]1126
[20576]1127 if ($before_hash =~ s@^((?:http|https|ftp|file|mms)://)@@i) {
[23463]1128 my $type = $1;
[14665]1129
[16769]1130 if ($link =~ m/^(http|ftp):/i) {
[14665]1131 # Turn url (using /) into file name (possibly using \ on windows)
1132 my @http_dir_split = split('/', $before_hash);
1133 $before_hash = &util::filename_cat(@http_dir_split);
1134 }
1135
1136 $before_hash = $self->eval_dir_dots($before_hash);
[16024]1137
[14665]1138 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
1139
1140 my $rl = 0;
1141 $rl = 1 if (-e $linkfilename);
1142
1143 # make sure there's a slash on the end if it's a directory
[16769]1144 if ($before_hash !~ m/\/$/) {
[14665]1145 $before_hash .= "/" if (-d $linkfilename);
1146 }
1147 return ($type . $before_hash, $hash_part, $rl);
[16024]1148
[16769]1149 } elsif ($link !~ m/^(mailto|news|gopher|nntp|telnet|javascript):/i && $link !~ m/^\//) {
[14665]1150
[16769]1151 if ($before_hash =~ s@^/@@ || $before_hash =~ m/\\/) {
1152
[14665]1153 # the first directory will be the domain name if file_is_url
1154 # to generate archives, otherwise we'll assume all files are
1155 # from the same site and base_dir is the root
1156
1157 if ($self->{'file_is_url'}) {
1158 my @dirs = split /[\/\\]/, $file;
1159 my $domname = shift (@dirs);
1160 $before_hash = &util::filename_cat($domname, $before_hash);
1161 $before_hash =~ s@\\@/@g; # for windows
1162 }
1163 else
1164 {
1165 # see if link shares directory with source document
1166 # => turn into relative link if this is so!
1167
[16769]1168 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
[14665]1169 # too difficult doing a pattern match with embedded '\'s...
1170 my $win_before_hash=$before_hash;
1171 $win_before_hash =~ s@(\\)+@/@g;
1172 # $base_dir is already similarly "converted" on windows.
1173 if ($win_before_hash =~ s@^$base_dir/@@o) {
[16024]1174 # if this is true, we removed a prefix
1175 $before_hash=$win_before_hash;
[14665]1176 }
1177 }
1178 else {
1179 # before_hash has lost leading slash by this point,
1180 # -> add back in prior to substitution with $base_dir
1181 $before_hash = "/$before_hash";
1182
1183 $before_hash = &util::filename_cat("",$before_hash);
1184 $before_hash =~ s@^$base_dir/@@;
1185 }
1186 }
1187 } else {
1188 # Turn relative file path into full path
1189 my $dirname = &File::Basename::dirname($file);
1190 $before_hash = &util::filename_cat($dirname, $before_hash);
[16769]1191 $before_hash = $self->eval_dir_dots($before_hash);
[14665]1192 }
1193
1194 my $linkfilename = &util::filename_cat ($base_dir, $before_hash);
[23387]1195
1196
1197# print STDERR "**** linkfilename = $linkfilename\n";
1198# if (!&util::fd_exists($linkfilename)) {
1199# print STDERR "***** Warning: Could not find $linkfilename\n";
1200# }
1201
1202
[14665]1203 # make sure there's a slash on the end if it's a directory
[16769]1204 if ($before_hash !~ m/\/$/) {
[14665]1205 $before_hash .= "/" if (-d $linkfilename);
1206 }
[23387]1207
1208# print STDERR "*** returning: $before_hash\n";
1209
[14665]1210 return ("http://" . $before_hash, $hash_part, 1);
1211 } else {
1212 # mailto, news, nntp, telnet, javascript or gopher link
1213 return ($before_hash, "", 0);
1214 }
1215}
1216
1217sub extract_first_NNNN_characters {
1218 my $self = shift (@_);
1219 my ($textref, $doc_obj, $thissection) = @_;
1220
1221 foreach my $size (split /,/, $self->{'first'}) {
1222 my $tmptext = $$textref;
1223 # skip to the body
1224 $tmptext =~ s/.*<body[^>]*>//i;
1225 # remove javascript
1226 $tmptext =~ s@<script.*?</script>@ @sig;
1227 $tmptext =~ s/<[^>]*>/ /g;
1228 $tmptext =~ s/&nbsp;/ /g;
1229 $tmptext =~ s/^\s+//;
1230 $tmptext =~ s/\s+$//;
1231 $tmptext =~ s/\s+/ /gs;
1232 $tmptext = &unicode::substr ($tmptext, 0, $size);
1233 $tmptext =~ s/\s\S*$/&#8230;/; # adds an ellipse (...)
1234 $doc_obj->add_utf8_metadata ($thissection, "First$size", $tmptext);
1235 }
1236}
1237
1238
1239sub extract_metadata {
1240 my $self = shift (@_);
1241 my ($textref, $metadata, $doc_obj, $section) = @_;
1242 my $outhandle = $self->{'outhandle'};
1243 # if we don't want metadata, we may as well not be here ...
1244 return if (!defined $self->{'metadata_fields'});
[22348]1245
[21800]1246 my $separator = $self->{'metadata_field_separator'};
1247 if ($separator eq "") {
1248 undef $separator;
1249 }
1250
[14665]1251 # metadata fields to extract/save. 'key' is the (lowercase) name of the
1252 # html meta, 'value' is the metadata name for greenstone to use
1253 my %find_fields = ();
1254
1255 my %creator_fields = (); # short-cut for lookups
1256
1257
1258 foreach my $field (split /,/, $self->{'metadata_fields'}) {
1259 $field =~ s/^\s+//; # remove leading whitespace
1260 $field =~ s/\s+$//; # remove trailing whitespace
[22348]1261
[14665]1262 # support tag<tagname>
[20689]1263 if ($field =~ m/^(.*?)\s*<(.*?)>$/) {
[14665]1264 # "$2" is the user's preferred gs metadata name
1265 $find_fields{lc($1)}=$2; # lc = lowercase
1266 } else { # no <tagname> for mapping
1267 # "$field" is the user's preferred gs metadata name
1268 $find_fields{lc($field)}=$field; # lc = lowercase
1269 }
1270 }
1271
1272 if (defined $self->{'hunt_creator_metadata'} &&
1273 $self->{'hunt_creator_metadata'} == 1 ) {
1274 my @extra_fields =
1275 (
1276 'author',
1277 'author.email',
1278 'creator',
1279 'dc.creator',
1280 'dc.creator.corporatename',
1281 );
1282
1283 # add the creator_metadata fields to search for
1284 foreach my $field (@extra_fields) {
1285 $creator_fields{$field}=0; # add to lookup hash
1286 }
1287 }
1288
1289
1290 # find the header in the html file, which has the meta tags
1291 $$textref =~ m@<head>(.*?)</head>@si;
1292
1293 my $html_header=$1;
1294
1295 # go through every <meta... tag defined in the html and see if it is
1296 # one of the tags we want to match.
1297
1298 # special case for title - we want to remember if its been found
1299 my $found_title = 0;
1300 # this assumes that ">" won't appear. (I don't think it's allowed to...)
[16769]1301 $html_header =~ m/^/; # match the start of the string, for \G assertion
[16024]1302
[14665]1303 while ($html_header =~ m/\G.*?<meta(.*?)>/sig) {
1304 my $metatag=$1;
1305 my ($tag, $value);
1306
1307 # find the tag name
[16769]1308 $metatag =~ m/(?:name|http-equiv)\s*=\s*([\"\'])?(.*?)\1/is;
[14665]1309 $tag=$2;
1310 # in case they're not using " or ', but they should...
1311 if (! $tag) {
[16769]1312 $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
[14665]1313 $tag=$1;
1314 }
1315
1316 if (!defined $tag) {
[15872]1317 print $outhandle "HTMLPlugin: can't find NAME in \"$metatag\"\n";
[14665]1318 next;
1319 }
1320
1321 # don't need to assign this field if it was passed in from a previous
1322 # (recursive) plugin
1323 if (defined $metadata->{$tag}) {next}
1324
1325 # find the tag content
[16769]1326 $metatag =~ m/content\s*=\s*([\"\'])?(.*?)\1/is;
[14665]1327 $value=$2;
1328
[24431]1329 # The following code assigns the metaname to value if value is
1330 # empty. Why would we do this?
1331 #if (! $value) {
1332 # $metatag =~ m/(?:name|http-equiv)\s*=\s*([^\s\>]+)/is;
1333 # $value=$1;
1334 #}
1335 if (!defined $value || $value eq "") {
1336 print $outhandle "HTMLPlugin: can't find VALUE in <meta $metatag >\n" if ($self->{'verbosity'} > 2);
[14665]1337 next;
1338 }
[22348]1339
[14665]1340 # clean up and add
1341 $value =~ s/\s+/ /gs;
1342 chomp($value); # remove trailing \n, if any
1343 if (exists $creator_fields{lc($tag)}) {
1344 # map this value onto greenstone's "Creator" metadata
1345 $tag='Creator';
1346 } elsif (!exists $find_fields{lc($tag)}) {
[16024]1347 next; # don't want this tag
[14665]1348 } else {
1349 # get the user's preferred capitalisation
1350 $tag = $find_fields{lc($tag)};
1351 }
1352 if (lc($tag) eq "title") {
1353 $found_title = 1;
1354 }
[18521]1355
1356 if ($self->{'verbosity'} > 2) {
1357 print $outhandle " extracted \"$tag\" metadata \"$value\"\n";
[14665]1358 }
[18521]1359
[22348]1360 if ($tag =~ /\./) {
1361 # there is a . so has a namespace, add ex.
1362 $tag = "ex.$tag";
1363 }
[21800]1364 if (defined $separator) {
1365 my @values = split($separator, $value);
1366 foreach my $v (@values) {
1367 $doc_obj->add_utf8_metadata($section, $tag, $v) if $v =~ /\S/;
1368 }
1369 }
1370 else {
1371 $doc_obj->add_utf8_metadata($section, $tag, $value);
1372 }
[14665]1373 }
1374
1375 # TITLE: extract the document title
1376 if (exists $find_fields{'title'} && !$found_title) {
1377 # we want a title, and didn't find one in the meta tags
1378 # see if there's a <title> tag
1379 my $title;
1380 my $from = ""; # for debugging output only
[16769]1381 if ($html_header =~ m/<title[^>]*>([^<]+)<\/title[^>]*>/is) {
[14665]1382 $title = $1;
1383 $from = "<title> tags";
1384 }
1385
1386 if (!defined $title) {
1387 $from = "first 100 chars";
1388 # if no title use first 100 or so characters
1389 $title = $$textref;
1390 $title =~ s/^\xFE\xFF//; # Remove unicode byte order mark
1391 $title =~ s/^.*?<body>//si;
1392 # ignore javascript!
1393 $title =~ s@<script.*?</script>@ @sig;
1394 $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
1395 $title =~ s/<[^>]*>/ /g; # remove all HTML tags
1396 $title = substr ($title, 0, 100);
1397 $title =~ s/\s\S*$/.../;
1398 }
1399 $title =~ s/<[^>]*>/ /g; # remove html tags
1400 $title =~ s/&nbsp;/ /g;
1401 $title =~ s/(?:&nbsp;|\xc2\xa0)/ /g; # utf-8 for nbsp...
1402 $title =~ s/\s+/ /gs; # collapse multiple spaces
1403 $title =~ s/^\s*//; # remove leading spaces
1404 $title =~ s/\s*$//; # remove trailing spaces
1405
1406 $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
1407 $title =~ s/^\s+//s; # in case title_sub introduced any...
[23335]1408 $doc_obj->add_utf8_metadata ($section, "Title", $title);
[14665]1409 print $outhandle " extracted Title metadata \"$title\" from $from\n"
1410 if ($self->{'verbosity'} > 2);
1411 }
1412
1413 # add FileFormat metadata
1414 $doc_obj->add_metadata($section,"FileFormat", "HTML");
1415
1416 # Special, for metadata names such as tagH1 - extracts
1417 # the text between the first <H1> and </H1> tags into "H1" metadata.
1418
1419 foreach my $field (keys %find_fields) {
[16769]1420 if ($field !~ m/^tag([a-z0-9]+)$/i) {next}
[14665]1421 my $tag = $1;
1422 if ($$textref =~ m@<$tag[^>]*>(.*?)</$tag[^>]*>@g) {
1423 my $content = $1;
1424 $content =~ s/&nbsp;/ /g;
1425 $content =~ s/<[^>]*>/ /g;
1426 $content =~ s/^\s+//;
1427 $content =~ s/\s+$//;
1428 $content =~ s/\s+/ /gs;
1429 if ($content) {
1430 $tag=$find_fields{"tag$tag"}; # get the user's capitalisation
1431 $tag =~ s/^tag//i;
1432 $doc_obj->add_utf8_metadata ($section, $tag, $content);
1433 print $outhandle " extracted \"$tag\" metadata \"$content\"\n"
1434 if ($self->{'verbosity'} > 2);
1435 }
1436 }
1437 }
1438}
1439
1440
1441# evaluate any "../" to next directory up
1442# evaluate any "./" as here
1443sub eval_dir_dots {
1444 my $self = shift (@_);
1445 my ($filename) = @_;
1446 my $dirsep_os = &util::get_os_dirsep();
1447 my @dirsep = split(/$dirsep_os/,$filename);
1448
1449 my @eval_dirs = ();
1450 foreach my $d (@dirsep) {
1451 if ($d eq "..") {
1452 pop(@eval_dirs);
1453
1454 } elsif ($d eq ".") {
1455 # do nothing!
1456
1457 } else {
1458 push(@eval_dirs,$d);
1459 }
1460 }
1461
1462 # Need to fiddle with number of elements in @eval_dirs if the
1463 # first one is the empty string. This is because of a
1464 # modification to util::filename_cat that supresses the addition
1465 # of a leading '/' character (or \ if windows) (intended to help
1466 # filename cat with relative paths) if the first entry in the
1467 # array is the empty string. Making the array start with *two*
1468 # empty strings is a way to defeat this "smart" option.
1469 #
1470 if (scalar(@eval_dirs) > 0) {
1471 if ($eval_dirs[0] eq ""){
1472 unshift(@eval_dirs,"");
1473 }
1474 }
[16836]1475
1476 my $evaluated_filename = (scalar @eval_dirs > 0) ? &util::filename_cat(@eval_dirs) : "";
1477 return $evaluated_filename;
[14665]1478}
1479
1480sub replace_usemap_links {
1481 my $self = shift (@_);
1482 my ($front, $link, $back) = @_;
1483
[16769]1484 # remove quotes from link at start and end if necessary
1485 if ($link=~/^[\"\']/) {
1486 $link=~s/^[\"\']//;
1487 $link=~s/[\"\']$//;
1488 $front.='"';
1489 $back="\"$back";
1490 }
1491
[14665]1492 $link =~ s/^\.\///;
1493 return $front . $link . $back;
1494}
1495
1496sub inc_filecount {
1497 my $self = shift (@_);
1498
1499 if ($self->{'file_num'} == 1000) {
1500 $self->{'dir_num'} ++;
1501 $self->{'file_num'} = 0;
1502 } else {
1503 $self->{'file_num'} ++;
1504 }
1505}
1506
1507
[15872]1508# Extend read_file so that strings like &eacute; are
[14665]1509# converted to UTF8 internally.
1510#
1511# We don't convert &lt; or &gt; or &amp; or &quot; in case
1512# they interfere with the GML files
1513
1514sub read_file {
[15872]1515 my $self = shift(@_);
1516 my ($filename, $encoding, $language, $textref) = @_;
[14665]1517
[15872]1518 $self->SUPER::read_file($filename, $encoding, $language, $textref);
[14665]1519
[23363]1520 # Convert entities to their Unicode code-point equivalents
[14665]1521 $$textref =~ s/&(lt|gt|amp|quot|nbsp);/&z$1;/go;
[22951]1522 $$textref =~ s/&([^;]+);/&ghtml::getcharequiv($1,1,1)/gseo;
[14665]1523 $$textref =~ s/&z(lt|gt|amp|quot|nbsp);/&$1;/go;
[22842]1524
[14665]1525}
1526
[20774]1527sub HB_read_html_file {
1528 my $self = shift (@_);
1529 my ($htmlfile, $text) = @_;
1530
1531 # load in the file
1532 if (!open (FILE, $htmlfile)) {
1533 print STDERR "ERROR - could not open $htmlfile\n";
1534 return;
1535 }
1536
1537 my $foundbody = 0;
1538 $self->HB_gettext (\$foundbody, $text, "FILE");
1539 close FILE;
1540
1541 # just in case there was no <body> tag
1542 if (!$foundbody) {
1543 $foundbody = 1;
1544 open (FILE, $htmlfile) || return;
1545 $self->HB_gettext (\$foundbody, $text, "FILE");
1546 close FILE;
1547 }
1548 # text is in utf8
1549}
1550
1551# converts the text to utf8, as ghtml does that for &eacute; etc.
1552sub HB_gettext {
1553 my $self = shift (@_);
1554 my ($foundbody, $text, $handle) = @_;
1555
1556 my $line = "";
1557 while (defined ($line = <$handle>)) {
1558 # look for body tag
1559 if (!$$foundbody) {
1560 if ($line =~ s/^.*<body[^>]*>//i) {
1561 $$foundbody = 1;
1562 } else {
1563 next;
1564 }
1565 }
1566
1567 # check for symbol fonts
1568 if ($line =~ m/<font [^>]*?face\s*=\s*\"?(\w+)\"?/i) {
1569 my $font = $1;
1570 print STDERR "HBPlug::HB_gettext - warning removed font $font\n"
1571 if ($font !~ m/^arial$/i);
1572 }
1573
1574 $$text .= $line;
1575 }
1576
1577 if ($self->{'input_encoding'} eq "iso_8859_1") {
1578 # convert to utf-8
1579 $$text=&unicode::unicode2utf8(&unicode::convert2unicode("iso_8859_1", $text));
1580 }
1581 # convert any alphanumeric character entities to their utf-8
1582 # equivalent for indexing purposes
1583 #&ghtml::convertcharentities ($$text);
1584
1585 $$text =~ s/\s+/ /g; # remove \n's
[22857]1586
1587 # At this point $$text is a binary byte string
1588 # => turn it into a Unicode aware string, so full
1589 # Unicode aware pattern matching can be used.
1590 # For instance: 's/\x{0101}//g' or '[[:upper:]]'
1591 #
1592
1593 $$text = decode("utf8",$$text);
[20774]1594}
1595
1596sub HB_clean_section {
1597 my $self = shift (@_);
1598 my ($section) = @_;
1599
1600 # remove tags without a starting tag from the section
1601 my ($tag, $tagstart);
1602 while ($section =~ m/<\/([^>]{1,10})>/) {
1603 $tag = $1;
1604 $tagstart = index($section, "<$tag");
1605 last if (($tagstart >= 0) && ($tagstart < index($section, "<\/$tag")));
1606 $section =~ s/<\/$tag>//;
1607 }
1608
1609 # remove extra paragraph tags
1610 while ($section =~ s/<p\b[^>]*>\s*<p\b/<p/ig) {}
1611
1612 # remove extra stuff at the end of the section
1613 while ($section =~ s/(<u>|<i>|<b>|<p\b[^>]*>|&nbsp;|\s)$//i) {}
1614
1615 # add a newline at the beginning of each paragraph
1616 $section =~ s/(.)\s*<p\b/$1\n\n<p/gi;
1617
1618 # add a newline every 80 characters at a word boundary
1619 # Note: this regular expression puts a line feed before
1620 # the last word in each section, even when it is not
1621 # needed.
1622 $section =~ s/(.{1,80})\s/$1\n/g;
1623
1624 # fix up the image links
1625 $section =~ s/<img[^>]*?src=\"?([^\">]+)\"?[^>]*>/
1626 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1627 $section =~ s/&lt;&lt;I&gt;&gt;\s*([^\.]+\.(png|jpg|gif))/
1628 <center><img src=\"$1\" \/><\/center><br\/>/ig;
1629
1630 return $section;
1631}
1632
1633# Will convert the oldHDL format to the new HDL format (using the Section tag)
1634sub convert_to_newHDLformat
1635{
1636 my $self = shift (@_);
1637 my ($file,$cnfile) = @_;
1638 my $input_filename = $file;
1639 my $tmp_filename = $cnfile;
1640
1641 # write HTML tmp file with new HDL format
1642 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1643
1644 # read in the file and do basic html cleaning (removing header etc)
1645 my $html = "";
1646 $self->HB_read_html_file ($input_filename, \$html);
1647
1648 # process the file one section at a time
1649 my $curtoclevel = 1;
1650 my $firstsection = 1;
1651 my $toclevel = 0;
1652 while (length ($html) > 0) {
1653 if ($html =~ s/^.*?(?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC(\d+)&gt;&gt;\s*(.*?)<p\b/<p/i) {
1654 $toclevel = $3;
1655 my $title = $4;
1656 my $sectiontext = "";
1657 if ($html =~ s/^(.*?)((?:<p\b[^>]*>)?((<b>|<i>|<u>|\s)*)&lt;&lt;TOC\d+&gt;&gt;)/$2/i) {
1658 $sectiontext = $1;
1659 } else {
1660 $sectiontext = $html;
1661 $html = "";
1662 }
1663
1664 # remove tags and extra spaces from the title
1665 $title =~ s/<\/?[^>]+>//g;
1666 $title =~ s/^\s+|\s+$//g;
1667
1668 # close any sections below the current level and
1669 # create a new section (special case for the firstsection)
1670 print PROD "<!--\n";
1671 while (($curtoclevel > $toclevel) ||
1672 (!$firstsection && $curtoclevel == $toclevel)) {
1673 $curtoclevel--;
1674 print PROD "</Section>\n";
1675 }
1676 if ($curtoclevel+1 < $toclevel) {
1677 print STDERR "WARNING - jump in toc levels in $input_filename " .
1678 "from $curtoclevel to $toclevel\n";
1679 }
1680 while ($curtoclevel < $toclevel) {
1681 $curtoclevel++;
1682 }
1683
1684 if ($curtoclevel == 1) {
1685 # add the header tag
1686 print PROD "-->\n";
1687 print PROD "<HTML>\n<HEAD>\n<TITLE>$title</TITLE>\n</HEAD>\n<BODY>\n";
1688 print PROD "<!--\n";
1689 }
1690
1691 print PROD "<Section>\n\t<Description>\n\t\t<Metadata name=\"Title\">$title</Metadata>\n\t</Description>\n";
1692
1693 print PROD "-->\n";
1694
1695 # clean up the section html
1696 $sectiontext = $self->HB_clean_section($sectiontext);
1697
1698 print PROD "$sectiontext\n";
1699
1700 } else {
1701 print STDERR "WARNING - leftover text\n" , $self->shorten($html),
1702 "\nin $input_filename\n";
1703 last;
1704 }
1705 $firstsection = 0;
1706 }
1707
1708 print PROD "<!--\n";
1709 while ($curtoclevel > 0) {
1710 $curtoclevel--;
1711 print PROD "</Section>\n";
1712 }
1713 print PROD "-->\n";
1714
1715 close (PROD) || die("Error Closing File: $tmp_filename $!");
1716
1717 return $tmp_filename;
1718}
1719
1720sub shorten {
1721 my $self = shift (@_);
1722 my ($text) = @_;
1723
1724 return "\"$text\"" if (length($text) < 100);
1725
1726 return "\"" . substr ($text, 0, 50) . "\" ... \"" .
1727 substr ($text, length($text)-50) . "\"";
1728}
1729
1730sub convert_tidy_or_oldHDL_file
1731{
1732 my $self = shift (@_);
1733 my ($file) = @_;
1734 my $input_filename = $file;
1735
1736 if (-d $input_filename)
1737 {
1738 return $input_filename;
1739 }
1740
1741 # get the input filename
1742 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1743 my $base_dirname = $dirname;
1744 $suffix = lc($suffix);
1745
1746 # derive tmp filename from input filename
1747 # Remove any white space from filename -- no risk of name collision, and
1748 # makes later conversion by utils simpler. Leave spaces in path...
1749 # tidy up the filename with space, dot, hyphen between
1750 $tailname =~ s/\s+//g;
1751 $tailname =~ s/\.+//g;
1752 $tailname =~ s/\-+//g;
1753 # convert to utf-8 otherwise we have problems with the doc.xml file
1754 # later on
1755 &unicode::ensure_utf8(\$tailname);
1756
1757 # softlink to collection tmp dir
1758 my $tmp_dirname = &util::filename_cat($ENV{'GSDLCOLLECTDIR'}, "tidytmp");
1759 &util::mk_dir($tmp_dirname) if (!-e $tmp_dirname);
1760
1761 my $test_dirname = "";
1762 my $f_separator = &util::get_os_dirsep();
1763
1764 if ($dirname =~ m/import$f_separator/)
1765 {
1766 $test_dirname = $'; #'
1767
1768 #print STDERR "init $'\n";
1769
1770 while ($test_dirname =~ m/[$f_separator]/)
1771 {
1772 my $folderdirname = $`;
1773 $tmp_dirname = &util::filename_cat($tmp_dirname,$folderdirname);
1774 &util::mk_dir($tmp_dirname) if (!-e $tmp_dirname);
1775 $test_dirname = $'; #'
1776 }
1777 }
1778
1779 my $tmp_filename = &util::filename_cat($tmp_dirname, "$tailname$suffix");
1780
1781 # tidy or convert the input file if it is a HTML-like file or it is accepted by the process_exp
1782 if (($suffix eq ".htm") || ($suffix eq ".html") || ($suffix eq ".shtml"))
1783 {
1784 #convert the input file to a new style HDL
1785 my $hdl_output_filename = $input_filename;
1786 if ($self->{'old_style_HDL'})
1787 {
1788 $hdl_output_filename = &util::filename_cat($tmp_dirname, "$tailname$suffix");
1789 $hdl_output_filename = $self->convert_to_newHDLformat($input_filename,$hdl_output_filename);
1790 }
1791
1792 #just for checking copy all other file from the base dir to tmp dir if it is not exists
1793 opendir(DIR,$base_dirname) or die "Can't open base directory : $base_dirname!";
1794 my @files = grep {!/^\.+$/} readdir(DIR);
1795 close(DIR);
1796
1797 foreach my $file (@files)
1798 {
1799 my $src_file = &util::filename_cat($base_dirname,$file);
1800 my $dest_file = &util::filename_cat($tmp_dirname,$file);
1801 if ((!-e $dest_file) && (!-d $src_file))
1802 {
1803 # just copy the original file back to the tmp directory
1804 copy($src_file,$dest_file) or die "Can't copy file $src_file to $dest_file $!";
1805 }
1806 }
1807
1808 # tidy the input file
1809 my $tidy_output_filename = $hdl_output_filename;
1810 if ($self->{'use_realistic_book'})
1811 {
1812 $tidy_output_filename = &util::filename_cat($tmp_dirname, "$tailname$suffix");
1813 $tidy_output_filename = $self->tmp_tidy_file($hdl_output_filename,$tidy_output_filename);
1814 }
1815 $tmp_filename = $tidy_output_filename;
1816 }
1817 else
1818 {
1819 if (!-e $tmp_filename)
1820 {
1821 # just copy the original file back to the tmp directory
1822 copy($input_filename,$tmp_filename) or die "Can't copy file $input_filename to $tmp_filename $!";
1823 }
1824 }
1825
1826 return $tmp_filename;
1827}
1828
1829
1830# Will make the html input file as a proper XML file with removed font tag and
1831# image size added to the img tag.
1832# The tidying process takes place in a collection specific 'tmp' directory so
1833# that we don't accidentally damage the input.
1834sub tmp_tidy_file
1835{
1836 my $self = shift (@_);
1837 my ($file,$cnfile) = @_;
1838 my $input_filename = $file;
1839 my $tmp_filename = $cnfile;
1840
1841 # get the input filename
1842 my ($tailname, $dirname, $suffix) = &File::Basename::fileparse($input_filename, "\\.[^\\.]+\$");
1843
1844 require HTML::TokeParser::Simple;
1845
1846 # create HTML parser to decode the input file
1847 my $parser = HTML::TokeParser::Simple->new($input_filename);
1848
1849 # write HTML tmp file without the font tag and image size are added to the img tag
1850 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1851 while (my $token = $parser->get_token())
1852 {
1853 # is it an img tag
1854 if ($token->is_start_tag('img'))
1855 {
1856 # get the attributes
1857 my $attr = $token->return_attr;
1858
1859 # get the full path to the image
1860 my $img_file = &util::filename_cat($dirname,$attr->{src});
1861
1862 # set the width and height attribute
1863 ($attr->{width}, $attr->{height}) = imgsize($img_file);
1864
1865 # recreate the tag
1866 print PROD "<img";
1867 print PROD map { qq { $_="$attr->{$_}"} } keys %$attr;
1868 print PROD ">";
1869 }
1870 # is it a font tag
1871 else
1872 {
1873 if (($token->is_start_tag('font')) || ($token->is_end_tag('font')))
1874 {
1875 # remove font tag
1876 print PROD "";
1877 }
1878 else
1879 {
1880 # print without changes
1881 print PROD $token->as_is;
1882 }
1883 }
1884 }
1885 close (PROD) || die("Error Closing File: $tmp_filename $!");
1886
1887 # run html-tidy on the tmp file to make it a proper XML file
1888
[22594]1889 my $outhandle = $self->{'outhandle'};
1890 print $outhandle "Converting HTML to be XML compliant:\n";
1891
1892 my $tidy_cmd = "tidy";
1893 $tidy_cmd .= " -q" if ($self->{'verbosity'} <= 2);
[22636]1894 $tidy_cmd .= " -raw -wrap 0 -asxml \"$tmp_filename\"";
[22594]1895 if ($self->{'verbosity'} <= 2) {
1896 if ($ENV{'GSDLOS'} =~ m/^windows/i) {
1897 $tidy_cmd .= " 2>nul";
1898 }
1899 else {
1900 $tidy_cmd .= " 2>/dev/null";
1901 }
1902 print $outhandle " => $tidy_cmd\n";
1903 }
1904
1905 my $tidyfile = `$tidy_cmd`;
1906
[20774]1907 # write result back to the tmp file
1908 open (PROD, ">$tmp_filename") || die("Error Writing to File: $tmp_filename $!");
1909 print PROD $tidyfile;
1910 close (PROD) || die("Error Closing File: $tmp_filename $!");
1911
1912 # return the output filename
1913 return $tmp_filename;
1914}
1915
[22355]1916sub associate_cover_image
1917{
1918 my $self = shift(@_);
1919 my ($doc_obj, $filename) = @_;
1920 if (($self->{'use_realistic_book'}) || ($self->{'old_style_HDL'}))
1921 {
1922 # we will have cover image in tidytmp, but want it from import
1923 $filename =~ s/([\\\/])tidytmp([\\\/])/$1import$2/;
1924 }
1925 $self->SUPER::associate_cover_image($doc_obj, $filename);
1926}
1927
1928
[14665]19291;
Note: See TracBrowser for help on using the repository browser.