source: main/trunk/greenstone2/perllib/plugins/PDFPlugin.pm@ 32277

Last change on this file since 32277 was 32277, checked in by ak19, 6 years ago

First attempt at PDFv2Plugin.pm.

  • Property svn:keywords set to Author Date Id Revision
File size: 31.0 KB
RevLine 
[1410]1###########################################################################
2#
[15872]3# PDFPlugin.pm -- reasonably with-it pdf plugin
[1410]4# A component of the Greenstone digital library software
5# from the New Zealand Digital Library Project at the
6# University of Waikato, New Zealand.
7#
[2661]8# Copyright (C) 1999-2001 New Zealand Digital Library Project
[1410]9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License as published by
12# the Free Software Foundation; either version 2 of the License, or
13# (at your option) any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU General Public License for more details.
19#
20# You should have received a copy of the GNU General Public License
21# along with this program; if not, write to the Free Software
22# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23#
24###########################################################################
[15872]25package PDFPlugin;
[1410]26
[10353]27use strict;
[22702]28no strict 'refs'; # so we can use a var for filehandles (e.g. STDERR)
[32205]29no strict 'subs'; # allow filehandles to be variables and viceversa
[1410]30
[22705]31use ReadTextFile;
32use unicode;
[32205]33use Mojo::DOM; # for HTML parsing
[22702]34
[22861]35use AutoLoadConverters;
[22864]36use ConvertBinaryFile;
[1410]37
[22861]38@PDFPlugin::ISA = ('ConvertBinaryFile', 'AutoLoadConverters', 'ReadTextFile');
[22705]39
40
[10452]41my $convert_to_list =
42 [ { 'name' => "auto",
[15872]43 'desc' => "{ConvertBinaryFile.convert_to.auto}" },
[10452]44 { 'name' => "html",
[15872]45 'desc' => "{ConvertBinaryFile.convert_to.html}" },
[10452]46 { 'name' => "text",
[15872]47 'desc' => "{ConvertBinaryFile.convert_to.text}" },
[32205]48 { 'name' => "paged_html",
49 'desc' => "{PDFPlugin.convert_to.paged_html}"},
[10452]50 { 'name' => "pagedimg_jpg",
[15872]51 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_jpg}"},
[10452]52 { 'name' => "pagedimg_gif",
[15872]53 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_gif}"},
[10452]54 { 'name' => "pagedimg_png",
[15872]55 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_png}"},
[10452]56 ];
57
58
59my $arguments =
[10889]60 [
61 { 'name' => "convert_to",
[15872]62 'desc' => "{ConvertBinaryFile.convert_to}",
[10889]63 'type' => "enum",
64 'reqd' => "yes",
65 'list' => $convert_to_list,
66 'deft' => "html" },
67 { 'name' => "process_exp",
[31492]68 'desc' => "{BaseImporter.process_exp}",
[10889]69 'type' => "regexp",
70 'deft' => &get_default_process_exp(),
71 'reqd' => "no" },
72 { 'name' => "block_exp",
[31494]73 'desc' => "{CommonUtil.block_exp}",
[10889]74 'type' => "regexp",
75 'deft' => &get_default_block_exp() },
76 { 'name' => "metadata_fields",
[15872]77 'desc' => "{HTMLPlugin.metadata_fields}",
[10889]78 'type' => "string",
[24431]79 'deft' => "Title,Author,Subject,Keywords" },
[21800]80 { 'name' => "metadata_field_separator",
81 'desc' => "{HTMLPlugin.metadata_field_separator}",
82 'type' => "string",
83 'deft' => "" },
[10889]84 { 'name' => "noimages",
[15872]85 'desc' => "{PDFPlugin.noimages}",
[10889]86 'type' => "flag" },
87 { 'name' => "allowimagesonly",
[15872]88 'desc' => "{PDFPlugin.allowimagesonly}",
[10889]89 'type' => "flag" },
90 { 'name' => "complex",
[15872]91 'desc' => "{PDFPlugin.complex}",
[10889]92 'type' => "flag" },
93 { 'name' => "nohidden",
[15872]94 'desc' => "{PDFPlugin.nohidden}",
[10889]95 'type' => "flag" },
96 { 'name' => "zoom",
[15872]97 'desc' => "{PDFPlugin.zoom}",
[10889]98 'deft' => "2",
[32222]99# 'range' => "1,3", # actually the range is 0.5-3
100 'type' => "string" },
[10889]101 { 'name' => "use_sections",
[15872]102 'desc' => "{PDFPlugin.use_sections}",
[10889]103 'type' => "flag" },
104 { 'name' => "description_tags",
[15872]105 'desc' => "{HTMLPlugin.description_tags}",
[29101]106 'type' => "flag" },
107 { 'name' => "use_realistic_book",
[29102]108 'desc' => "{PDFPlugin.use_realistic_book}",
[29101]109 'type' => "flag"}
[10889]110 ];
[3540]111
[15872]112my $options = { 'name' => "PDFPlugin",
113 'desc' => "{PDFPlugin.desc}",
[6408]114 'abstract' => "no",
[3540]115 'inherits' => "yes",
[15114]116 'srcreplaceable' => "yes", # Source docs in PDF can be replaced with GS-generated html
[3540]117 'args' => $arguments };
118
[1410]119sub new {
[10218]120 my ($class) = shift (@_);
121 my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
122 push(@$pluginlist, $class);
[2452]123
[10218]124 push(@$inputargs,"-title_sub");
125 push(@$inputargs,'^(Page\s+\d+)?(\s*1\s+)?');
[5616]126
[15872]127 push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
128 push(@{$hashArgOptLists->{"OptList"}},$options);
[10429]129
[22861]130 my $auto_converter_self = new AutoLoadConverters($pluginlist,$inputargs,$hashArgOptLists,["PDFBoxConverter"],1);
131 my $cbf_self = new ConvertBinaryFile($pluginlist, $inputargs, $hashArgOptLists);
[31492]132 my $self = BaseImporter::merge_inheritance($auto_converter_self, $cbf_self);
[10353]133
[10580]134 if ($self->{'info_only'}) {
135 # don't worry about any options etc
136 return bless $self, $class;
137 }
[22861]138
139 $self = bless $self, $class;
[15872]140 $self->{'file_type'} = "PDF";
141
[32273]142 # PDFPlugin is deprecated and migrating users should hereafter choose between
143 # PDFv1Plugin, if they want to use the old pdftohtml tool's capabilities,
144 # and PDFv2Plugin, if they want to use pdfbox or the new xpdftools capabilities.
[32275]145 &gsprintf::gsprintf(STDERR, "{PDFPlugin.deprecated_plugin}\n");
[32273]146
[15872]147 # these are passed through to gsConvert.pl by ConvertBinaryFile.pm
[10218]148 my $zoom = $self->{"zoom"};
[32273]149 # By default, PDFPlugin assumes gsConvert.pl will run the old pdftohtml conversion tool,
150 # But if pdfbox conversion is turned on, the tool used is pdfbox (which is presently an
151 # AutoLoadConverter and therefore bypasses gsConvert.pl)
152 $self->{'convert_options'} = "-pdf_tool pdftohtml";
153 $self->{'convert_options'} .= " -pdf_zoom $zoom";
[10218]154 $self->{'convert_options'} .= " -pdf_complex" if $self->{"complex"};
155 $self->{'convert_options'} .= " -pdf_nohidden" if $self->{"nohidden"};
156 $self->{'convert_options'} .= " -pdf_ignore_images" if $self->{"noimages"};
[10452]157 $self->{'convert_options'} .= " -pdf_allow_images_only" if $self->{"allowimagesonly"};
[3720]158
[22597]159 # check convert_to
160 if ($self->{'convert_to'} eq "text" && $ENV{'GSDLOS'} =~ /^windows$/i) {
[32277]161 &gsprintf::gsprintf(STDERR, "{PDFPlugin.win_old_pdftotext_unsupported}\n");
[32273]162 $self->{'convert_to'} = "html";
[10273]163 }
[22597]164 elsif ($self->{'convert_to'} eq "auto") {
165 # choose html ?? is this the best option
[32223]166 $self->{'convert_to'} = "paged_html";
[10273]167 }
[29101]168 if ($self->{'use_realistic_book'}) {
169 if ($self->{'convert_to'} ne "html") {
170 print STDERR "PDFs will be converted to HTML for realistic book functionality\n";
171 $self->{'convert_to'} = "html";
172 }
173 }
[22597]174 # set convert_to_plugin and convert_to_ext
[22702]175 $self->set_standard_convert_settings();
[18145]176
[22597]177 my $secondary_plugin_name = $self->{'convert_to_plugin'};
178 my $secondary_plugin_options = $self->{'secondary_plugin_options'};
[10273]179
[22597]180 if (!defined $secondary_plugin_options->{$secondary_plugin_name}) {
181 $secondary_plugin_options->{$secondary_plugin_name} = [];
[10724]182 }
[22597]183 my $specific_options = $secondary_plugin_options->{$secondary_plugin_name};
[10429]184
[10273]185 # following title_sub removes "Page 1" added by pdftohtml, and a leading
186 # "1", which is often the page number at the top of the page. Bad Luck
187 # if your document title actually starts with "1 " - is there a better way?
[22597]188 push(@$specific_options , "-title_sub", '^(Page\s+\d+)?(\s*1\s+)?');
[11122]189 my $associate_tail_re = $self->{'associate_tail_re'};
190 if ((defined $associate_tail_re) && ($associate_tail_re ne "")) {
[22597]191 push(@$specific_options, "-associate_tail_re", $associate_tail_re);
[11122]192 }
[22597]193 push(@$specific_options, "-file_rename_method", "none");
194
195 if ($secondary_plugin_name eq "HTMLPlugin") {
[22861]196 # pdftohtml always produces utf8 - What about pdfbox???
[24290]197 # push(@$specific_options, "-input_encoding", "utf8");
[22597]198 push(@$specific_options, "-extract_language") if $self->{'extract_language'};
199 push(@$specific_options, "-processing_tmp_files");
200 # Instruct HTMLPlug (when eventually accessed through read_into_doc_obj)
201 # to extract these metadata fields from the HEAD META fields
202 if (defined $self->{'metadata_fields'} && $self->{'metadata_fields'} =~ /\S/) {
203 push(@$specific_options,"-metadata_fields",$self->{'metadata_fields'});
204 } else {
205 push(@$specific_options,"-metadata_fields","Title,GENERATOR,date,author<Creator>");
206 }
207 if (defined $self->{'metadata_field_separator'} && $self->{'metadata_field_separator'} =~ /\S/) {
208 push(@$specific_options,"-metadata_field_separator",$self->{'metadata_field_separator'});
209 }
210 if ($self->{'use_sections'} || $self->{'description_tags'}) {
211 $self->{'description_tags'} = 1;
212 push(@$specific_options, "-description_tags");
213 }
[29101]214 if ($self->{'use_realistic_book'}) {
215 push(@$specific_options, "-use_realistic_book");
216 }
[32210]217 if($self->{'convert_to'} eq "paged_html") { # for paged html, the default should be to sectionalise on headings the single superpage containing divs representing individual pages as section
218 push(@$specific_options, "sectionalise_using_h_tags");
219 }
[22597]220 }
221 elsif ($secondary_plugin_name eq "PagedImagePlugin") {
222 push(@$specific_options, "-screenviewsize", "1000");
223 push(@$specific_options, "-enable_cache");
224 push(@$specific_options, "-processing_tmp_files");
225 }
[11122]226
[10273]227 $self = bless $self, $class;
[10429]228 $self->load_secondary_plugins($class,$secondary_plugin_options,$hashArgOptLists);
[10273]229 return $self;
[1410]230}
231
232sub get_default_process_exp {
233 my $self = shift (@_);
234
235 return q^(?i)\.pdf$^;
236}
[2661]237
238# so we don't inherit HTMLPlug's block exp...
239sub get_default_block_exp {
240 return "";
241}
[22861]242
243sub init {
244 my $self = shift (@_);
245
246 # ConvertBinaryFile init
247 $self->SUPER::init(@_);
[23754]248 $self->AutoLoadConverters::init(@_);
[22861]249
250}
251
252sub begin {
253 my $self = shift (@_);
254
[23754]255 $self->AutoLoadConverters::begin(@_);
[22861]256 $self->SUPER::begin(@_);
257
258}
259
260sub deinit {
261 my $self = shift (@_);
[1410]262
[23754]263 $self->AutoLoadConverters::deinit(@_);
[22861]264 $self->SUPER::deinit(@_);
265
266}
267
[24290]268# By setting hashing to be on ga xml this ensures that two
269# PDF files that are identical except for the metadata
270# to hash to different values. Without this, when each PDF
271# file is converted to HTML there is a chance that they
272# will both be *identical* if the conversion utility does
273# not embed the metadata in the generated HTML. This is
274# certainly the case when PDFBOX is being used.
[22861]275
[24290]276# This change makes this convert to based plugin more
277# consistent with the original vision that the same document
278# with different metadata should
279# be seen as different.
280
281sub get_oid_hash_type {
282 my $self = shift (@_);
283 return "hash_on_ga_xml";
284}
285
286
[22861]287sub tmp_area_convert_file {
288
289 my $self = shift (@_);
290 return $self->AutoLoadConverters::tmp_area_convert_file(@_);
291
292}
293
[32206]294# Overriding to do some extra handling for paged_html output mode
295sub run_conversion_command {
296 my $self = shift (@_);
297 my ($tmp_dirname, $tmp_inputPDFname, $utf8_tailname, $lc_suffix, $tailname, $suffix) = @_;
298
299 if($self->{'convert_to'} ne "paged_html") {
300 return $self->ConvertBinaryFile::run_conversion_command(@_);
301 }
302
303 # if output mode is paged_html, we use Xpdf tools' pdftohtml and tell it
304 # to create a subdir called "pages" in the tmp area to puts its products
305 # in there. (Xpdf's pdftohtml needs to be passed a *non-existent* directory
306 # parameter, the "pages" subdir). If Xpdf's pdftohtml has successfully run,
307 # the intermediary output file tmp/<random-num>/pages/index.html should
308 # exist (besides other output products there)
309
310 # We let ConvertBinaryFile proceed normally, but the return value should reflect
311 # that on success it should expect the intermediary product tmpdir/pages/index.html
312 # (which is the product of xpdftohtml conversion).
313 my $output_filename = $self->ConvertBinaryFile::run_conversion_command(@_);
314 $output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "pages", "index.html");
315
316 # However, when convert_post_process() is done, it should have output the final
317 # product of the paged_html conversion: an html file of the same name and in the
318 # same tmp location as the input PDF file.
319
320 my ($name_prefix, $output_dir, $ext)
321 = &File::Basename::fileparse($tmp_inputPDFname, "\\.[^\\.]+\$");
322 $self->{'conv_filename_after_post_process'} = &FileUtils::filenameConcatenate($output_dir, $name_prefix.".html");
323# print STDERR "@@@@@ final paged html file will be: " . $self->{'conv_filename_after_post_process'} . "\n";
324
325 return $output_filename;
326}
327
[10273]328sub convert_post_process
329{
[1410]330 my $self = shift (@_);
[10273]331 my ($conv_filename) = @_;
[9465]332
[7019]333 my $outhandle=$self->{'outhandle'};
334
[32206]335 if($self->{'convert_to'} eq "paged_html") {
[32205]336 # special post-processing for paged_html mode, as HTML pages generated
337 # by xpdf's pdftohtml need to be massaged into the form we want
338 $self->xpdftohtml_convert_post_process($conv_filename);
339 }
340 else { # use PDFPlugin's usual post processing
341 $self->default_convert_post_process($conv_filename);
342 }
343}
344
345# Called after gsConvert.pl has been run to convert a PDF to paged_html
346# using Xpdftools' pdftohtml
347# This method will do some cleanup of the HTML files produced after XPDF has produced
348# an HTML doc for each PDF page: it first gets rid of the default index.html.
349# Instead, it constructs a single html page containing each original HTML page
350# <body> nested as divs instead, with simple section information inserted at the top
351# of each 'page' <div> and some further styling customisation. This HTML manipulation
352# is to be done with the Mojo::DOM perl package.
353# Note that since xpdf's pdftohtml would have failed if the output dir already
354# existed and for simpler naming, the output files are created in a new "pages"
355# subdirectory of the tmp location parent of $conv_filename instead
356sub xpdftohtml_convert_post_process
357{
358 my $self = shift (@_);
[32206]359 my ($pages_index_html) = @_; # = tmp/<rand>/pages/index.html for paged_html output mode
360 my $output_filename = $self->{'conv_filename_after_post_process'};
361
362 # Read in all the html files in tmp's "pages" subdir, except for index.html.
363 # and use it to create a new html file called $self->{'conv_filename_after_post_process'}
364 # which will consist of a slightly modified version of
[32205]365 # each of the other html files concatenated together.
366
367 my $outhandle=$self->{'outhandle'};
368
[32206]369 my ($tailname, $pages_subdir, $suffix)
370 = &File::Basename::fileparse($pages_index_html, "\\.[^\\.]+\$");
[32205]371
372 # Code from util::create_itemfile()
373 # Read in all the files
374 opendir(DIR, $pages_subdir) || die "can't opendir $pages_subdir: $!";
375 my @page_files = grep {-f "$pages_subdir/$_"} readdir(DIR);
376 closedir DIR;
377 # Sort files in the directory by page_num
378 # files are named index.html, page1.html, page2.html, ..., pagen.html
379 sub page_number {
380 my ($dir) = @_;
381 my ($pagenum) =($dir =~ m/^page(\d+)\.html?$/i);
382 $pagenum = 0 unless defined $pagenum; # index.html will be given pagenum=0
383 return $pagenum;
384 }
385 # sort the files in the directory in the order of page_num rather than lexically.
386 @page_files = sort { page_number($a) <=> page_number($b) } @page_files;
387
388 #my $num_html_pages = (scalar(@page_files) - 1)/2; # skip index file.
389 # For every html file there's an img file, so halve the total num.
390 # What about other file types that may potentially be there too???
391 my $num_html_pages = 0;
392 foreach my $pagefile (@page_files) {
393 $num_html_pages++ if $pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i;
394 }
395
396 # Prepare to create our new html page that will contain all the individual
397 # htmls generated by xpdf's pdftohtml in sequence.
398 # First write the opening html tags out to the output file. These are the
399 # same tags and their contents, including <meta>, as is generated by
400 # Xpdf's pdftohtml for each of its individual html pages.
401 my $start_text = "<html>\n<head>\n";
[32206]402 my ($output_tailname, $tmp_subdir, $html_suffix)
403 = &File::Basename::fileparse($output_filename, "\\.[^\\.]+\$");
404 $start_text .= "<title>$output_tailname</title>\n";
[32205]405 $start_text .= "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n";
406 $start_text .= "</head>\n<body>\n\n";
[32215]407 $start_text .= "<h1>$output_tailname</h1>\n\n";
[32205]408
409 #handle content encodings the same way that default_convert_post_process does
410 # $self->utf8_write_file ($start_text, $conv_filename); # will close file after write
411 # Don't want to build a giant string in memory of all the pages concatenated
412 # and then write it out in one go. Instead, build up the final single page
413 # by writing each modified paged_html file out to it as this is processed.
414 # Copying file open/close code from CommonUtil::utf8_write_file()
415 if (!open (OUTFILE, ">:utf8", $output_filename)) {
[32273]416 gsprintf(STDERR, "PDFPlugin::xpdftohtml_convert_post_process {CommonUtil.could_not_open_for_writing} ($!)\n", $output_filename);
[32205]417 die "\n";
418 }
419 print OUTFILE $start_text;
420
421 # Get the contents of each individual HTML page generated by Xpdf, after first
422 # modifying each, and write each out into our single all-encompassing html
423 foreach my $pagefile (@page_files) {
424 if ($pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i) {
425 my $page_num = page_number($pagefile);
426 # get full path to pagefile
427 $pagefile = &FileUtils::filenameConcatenate($pages_subdir, $pagefile);
428# print STDERR "@@@ About to process html file $pagefile (num $page_num)\n";
429 my $modified_page_contents = $self->_process_paged_html_page($pagefile, $page_num, $num_html_pages);
430 print OUTFILE "$modified_page_contents\n\n";
431 }
432 }
433
434 # we've now created a single HTML file by concatenating (a modified version)
435 # of each paged html file
436 print OUTFILE "</body>\n</html>\n"; # write out closing tags
437 close OUTFILE; # done
438
439 # Get rid of all the htm(l) files incl index.html in the associated "pages"
440 # subdir, since we've now processed them all into a single html file
441 # one folder level up and we don't want HTMLPlugin to process all of them next.
442 &FileUtils::removeFilesFiltered($pages_subdir, "\.html?\$"); # no specific whitelist, but blacklist htm(l)
443
444 # now the tmp area should contain a single html file contain all the html pages'
445 # contents in sequence, and a "pages" subdir containing the screenshot images
446 # of each page.
447 # HTMLPlugin will process these further in the plugin pipeline
448}
449
450# For whatever reason, most html <tags> don't get printed out in GLI
451# So when debugging, use this function to print them out as [tags] instead.
452sub _debug_print_html
453{
454 my $self = shift (@_);
455 my ($string_or_dom) = @_;
456
457 # can't seem to determine type of string with ref/reftype
458 # https://stackoverflow.com/questions/1731333/how-do-i-tell-what-type-of-value-is-in-a-perl-variable
[32206]459 # Not needed, as $dom objects seem to get correctly stringified in string contexts
[32205]460 # $dom.to_string/$dom.stringify seem to get called, no need to call them
461 # https://stackoverflow.com/questions/5214543/what-is-stringification-in-perl
462 my $escapedTxt = $string_or_dom;
463 $escapedTxt =~ s@\<@[@sg;
464 $escapedTxt =~ s@\>@]@sg;
465
466 print STDERR "#### $escapedTxt\n";
467}
468
469# Helper function to read in each paged_html generated by Xpdf's pdftohtml
470# then modify the html suitably using the HTML parsing functions offered by
471# Mojo::DOM, then return the modified HTML content as a string
472# See https://mojolicious.org/perldoc/Mojo/DOM
473sub _process_paged_html_page
474{
475 my $self = shift (@_);
476 my ($pagefile, $page_num, $num_html_pages) = @_;
477
478 my $text = "";
479
480 # handling content encoding the same way default_convert_post_process does
481 $self->read_file ($pagefile, "utf8", "", \$text);
482
483 my $dom = Mojo::DOM->new($text);
484
485# $self->_debug_print_html($dom);
486
487 # there's a <style> element on the <html>, we need to shift it into the <div>
488 # tag that we'll be creating. We'll first slightly modify the <style> element
489 # store the first style element, which is the only one and in the <body>
490 # we'll later insert it as child of an all-encompassing div that we'll create
491 my $page_style_tag_str = $dom->at('html')->at('style')->to_string;
492 # In the style tag, convert id style references to class style references
493 my $css_class = ".p".$page_num."f";
494 $page_style_tag_str =~ s@\#f@$css_class@sg;
495 my $style_element = Mojo::DOM->new($page_style_tag_str)->at('style'); # modified
496#$self->_debug_print_html($style_element);
497
498 # need to know the image's height to set the height of the surrounding
499 # div that's to replace this page's <body>:
500 my $img_height = $dom->find('img')->[0]{height};
501
502 # 2. Adjust the img#background src attribute to point to the pages subdir for imgs
503 # 3. Set that img tag's class=background, and change its id to background+$page_num
504 my $bg_img_tag=$dom->find('img#background')->[0];
505 my $img_src_str = $bg_img_tag->{src};
506 $img_src_str = "pages/$img_src_str";
[32206]507 $bg_img_tag->attr(src => $img_src_str); # reset
[32205]508#$self->_debug_print_html($bg_img_tag);
509 # set both class and modified id attributes in one step:
510 $bg_img_tag->attr({class => "background", id => "background".$page_num});
511#$self->_debug_print_html($bg_img_tag);
512
513 # get all the <span> nested inside <div class="txt"> elements and
514 # 1. set their class attr to be "p + page_num + id-of-the-span",
515 # 2. then delete the id, because the span ids have been reused when element
516 # ids ought to be unique. Which is why we set the modified ids to be the
517 # value of the class attribute instead
518 $dom->find('div.txt span')->each(sub {
519 $_->attr(class => "p". $page_num. $_->{id});
520 delete $_->{id};
521 }); # both changes done in one find() operation
522#$self->_debug_print_html($dom->find('div.txt span')->last);
523
524 # Finally can create our new dom, starting with a div tag for the current page
525 # Must be: <div id="$page_num" style="position:relative; height:$img_height;"/>
[32206]526# my $new_dom = Mojo::DOM->new_tag('div', id => "page".$page_num, style => "position: relative; height: ".$img_height."px;" )
527 my $new_dom = Mojo::DOM->new_tag('div', style => "position: relative; height: ".$img_height."px;" );
[32205]528#$self->_debug_print_html($new_dom);
529 $new_dom->at('div')->append_content($style_element)->root;
530
[32206]531
532#$self->_debug_print_html($new_dom);
533 # Copy across all the old html's body tag's child nodes into the new dom's new div tag
534 $dom->at('body')->child_nodes->each(sub { $new_dom->at('div')->append_content($_)}); #$_->to_string
535#$self->_debug_print_html($new_dom);
536
537
538 # build up the outer div with the <h>tags for sectionalising
539 my $inner_div_str = $new_dom->to_string;
540
541 my $page_div = "<div id=\"page".$page_num."\">\n";
542 # Append a page range bucket heading if applicable: if we have more than 10 pages
543 # to display in the current bucket AND we're on the first page of each bucket of 10 pages.
[32205]544 # Dr Bainbridge thinks for now we need only consider PDFs where the
545 # total number of pages < 1000 and create buckets of size 10 (e.g. 1-10, ... 51-60, ...)
546 # If number of remaining pages >= 10, then create new bucket heading
547 # e.g. "Pages 30-40"
[32206]548 if(($page_num % 10) == 1 && ($num_html_pages - $page_num) > 10) {
[32205]549 # Double-digit page numbers that start with 2
550 # i.e. 21 to 29 (and 30) should be in 21 to 30 range
551 my $start_range = $page_num - ($page_num % 10) + 1;
552 my $end_range = $page_num + 10 - ($page_num % 10);
[32215]553 $page_div .= "<h2 style=\"font-size:1em;font-weight:normal;\">Pages ".$start_range . "-" . $end_range."</h2>\n";
[32205]554 }
555
[32215]556 # No sectionalising for 10 pages or under. Otherwise, every page is a section too, not just buckets
557 if($num_html_pages > 10) {
558 # Whether we're starting a new bucket or not, add a simpler heading: just the pagenumber, "Page #"
559 $page_div .= "<h3 style=\"font-size:1em;font-weight:normal;\">Page ".$page_num."</h3>\n";
560 }
[32205]561
[32206]562 $page_div .= $inner_div_str;
563 $page_div .= "\n</div>";
564
[32205]565 # Finished processing a single html page of the paged_html output generated by
566 # Xpdf's pdftohtml: finished massaging that single html page into the right form
[32206]567 return $page_div;
[32205]568}
569
570# This subroutine is called to do the PDFPlugin post-processing for all cases
571# except the "paged_html" conversion mode. This is what PDFPlugin always used to do:
572sub default_convert_post_process
573{
574 my $self = shift (@_);
575 my ($conv_filename) = @_;
576 my $outhandle=$self->{'outhandle'};
577
[15963]578 #$self->{'input_encoding'} = "utf8"; # The output is always in utf8 (is it?? it is for html, but what about other types?)
579 #my ($language, $encoding) = $self->textcat_get_language_encoding ($conv_filename);
[8218]580
[10273]581 # read in file ($text will be in utf8)
582 my $text = "";
[15963]583 # encoding will be utf8 for html files - what about other types? will we do this step for them anyway?
584 $self->read_file ($conv_filename, "utf8", "", \$text);
[10273]585
[24159]586 # To support the use_sections option with PDFBox: Greenstone splits PDFs into pages for
587 # sections. The PDFPlugin code wants each new page to be prefixed with <a name=pagenum></a>,
588 # which it then splits on to generate page-based sections. However, that's not what PDFBox
589 # generates in its HTML output. Fortunately, PDFBox does have its own page-separator: it
590 # embeds each page in an extra div. The div opener is:
591 # <div style=\"page-break-before:always; page-break-after:always\">
[24476]592 # The PDFPlugin now looks for this and prefixes <a name=0></a> to each such div. (The
[24159]593 # pagenumber is fixed at 0 since I'm unable to work out how to increment the pagenum during
594 # a regex substitution even with regex extensions on.) Later, when we process each section
595 # to get the pagenum, PDFBox's output for this is pre-processed by having a loopcounter
596 # that increments the pagenum for each subsequent section.
597
598 #$pdfbox_pageheader="\<div style=\"page-break-before:always; page-break-after:always\">";
599 my $loopcounter = 0; # used later on!
600 $text =~ s@\<div style=\"page-break-before:always; page-break-after:always\">@<a name=$loopcounter></a><div style=\"page-break-before:always; page-break-after:always\">@g;
601
602
[10273]603 # Calculate number of pages based on <a ...> tags (we have a <a name=1> etc
604 # for each page). Metadata based on this calculation not set until process()
605 #
[24476]606 # Note: this is done even if we are not breaking the document into pages as it might
[10273]607 # be useful to give an indication of document length in browser through setting
608 # num_pages as metadata.
[30491]609 # Clean html from low and hight surrogates D800–DFFF
[30492]610 $text =~ s@[\N{U+D800}-\N{U+DFFF}]@\ @g;
[24476]611 my @pages = ($text =~ m/\<[Aa] name=\"?\w+\"?>/ig); #<div style=\"?page-break-before:always; page-break-after:always\"?>
[10273]612 my $num_pages = scalar(@pages);
613 $self->{'num_pages'} = $num_pages;
614
[3411]615 if ($self->{'use_sections'}
616 && $self->{'converted_to'} eq "HTML") {
617
[15872]618 print $outhandle "PDFPlugin: Calculating sections...\n";
[3411]619
[3614]620 # we have "<a name=1></a>" etc for each page
[8795]621 # it may be <A name=
[10273]622 my @sections = split('<[Aa] name=', $text);
[3411]623
[10273]624 my $top_section = "";
625
[7019]626 if (scalar (@sections) == 1) { #only one section - no split!
[15872]627 print $outhandle "PDFPlugin: warning - no sections found\n";
[7019]628 } else {
[10273]629 $top_section .= shift @sections; # keep HTML header etc as top_section
[7019]630 }
631
[3411]632 # handle first section specially for title? Or all use first 100...
633
634 my $title = $sections[0];
[8795]635 $title =~ s/^\"?\w+\"?>//; # specific for pdftohtml...
[3411]636 $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
637 $title =~ s/<[^>]*>/ /g;
638 $title =~ s/(?:&nbsp;|\xc2\xa0)/ /g; # utf-8 for nbsp...
639 $title =~ s/^\s+//s;
640 $title =~ s/\s+$//;
641 $title =~ s/\s+/ /gs;
642 $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
643 $title =~ s/^\s+//s; # in case title_sub introduced any...
644 $title = substr ($title, 0, 100);
645 $title =~ s/\s\S*$/.../;
646
[10273]647
[7019]648 if (scalar (@sections) == 1) { # no sections found
[10273]649 $top_section .= $sections[0];
[7019]650 @sections=();
651 } else {
[10273]652 $top_section .= "<!--<Section>\n<Metadata name=\"Title\">$title</Metadata>\n-->\n <!--</Section>-->\n";
[7019]653 }
[3411]654
655 # add metadata per section...
656 foreach my $section (@sections) {
[8795]657 # section names are not always just digits, may be like "outline"
658 $section =~ s@^\"?(\w+)\"?></a>@@; # leftover from split expression...
[3614]659
[3411]660 $title = $1; # Greenstone does magic if sections are titled digits
[24159]661
662 # A title of pagenum=0 means use_sections is being applied on output from PDFBox,
663 # which didn't originally have a <a name=incremented pagenumber></a> to split each page.
664 # Our Perl code then prefixed <a name=0></a> to it. Now need to increment the pagenum here:
665 if($loopcounter > 0 || ($title eq 0 && $loopcounter == 0)) { # implies use_sections with PDFBox
666 $title = ++$loopcounter;
667 }
668
[3411]669 if (! defined($title) ) {
670 print STDERR "no title: $section\n";
[8795]671 $title = " "; # get rid of the undefined warning in next line
[3411]672 }
[15872]673 my $newsection = "<!-- from PDFPlugin -->\n<!-- <Section>\n";
[3411]674 $newsection .= "<Metadata name=\"Title\">" . $title
[24159]675 . "</Metadata>\n--><br />\n";
[3411]676 $newsection .= $section;
677 $newsection .= "<!--</Section>-->\n";
678 $section = $newsection;
679 }
680
[10273]681 $text=join('', ($top_section, @sections));
[3411]682 }
683
[24199]684 if ($self->{'use_sections'}
685 && $self->{'converted_to'} eq "text") {
686 print STDERR "**** When converting PDF to text, cannot apply use_sections\n";
687 }
[22953]688
[24199]689
[22953]690 # The following should no longer be needed, now that strings
691 # read in are Unicode aware (in the Perl sense) rather than
692 # raw binary strings that just happen to be UTF-8 compliant
693
[8218]694 # turn any high bytes that aren't valid utf-8 into utf-8.
[22953]695## unicode::ensure_utf8(\$text);
[8218]696
[10273]697 # Write it out again!
698 $self->utf8_write_file (\$text, $conv_filename);
699}
[7287]700
701
[10273]702# do plugin specific processing of doc_obj for HTML type
703sub process {
704 my $self = shift (@_);
[15872]705 my ($pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
[7287]706
[15963]707 my $result = $self->process_type($base_dir,$file,$doc_obj);
[10273]708
[8226]709 # fix up the extracted date metadata to be in Greenstone date format,
710 # and fix the capitalisation of 'date'
[8227]711 my $cursection = $doc_obj->get_top_section();
712 foreach my $datemeta (@{$doc_obj->get_metadata($cursection, "date")}) {
[7287]713 $doc_obj->delete_metadata($cursection, "date", $datemeta);
714
715 # We're just interested in the date bit, not the time
[8278]716 # some pdf creators (eg "Acrobat 5.0 Scan Plug-in for Windows")
717 # set a /CreationDate, and set /ModDate to 000000000. pdftohtml
718 # extracts the ModDate, so it is 0...
719 $datemeta =~ /(\d+)-(\d+)-(\d+)/;
720 my ($year, $month, $day) = ($1,$2,$3);
721 if (defined($year) && defined($month) && defined($day)) {
722 if ($year == 0) {next}
723 if ($year < 100) {$year += 1900} # just to be safe
724 if ($month =~ /^\d$/) {$month="0$month"} # single digit
725 if ($day =~ /^\d$/) {$day="0$day"} # single digit
726 my $date="$year$month$day";
727 $doc_obj->add_utf8_metadata($cursection, "Date", $date);
728 }
[7287]729 }
730
[24476]731 $doc_obj->add_utf8_metadata($cursection, "NumPages", $self->{'num_pages'}) if defined $self->{'num_pages'};
[8795]732
733 if ($self->{'use_sections'} && $self->{'converted_to'} eq "HTML") {
[30742]734 # For gs2 we explicitly make it a paged document, cos greenstone won't get it
[8795]735 # right if any section has an empty title, or one with letters in it
[30742]736 if (&util::is_gs3()) {
737 # but for gs3, paged docs currently use image slider which is ugly if there are no images
738 $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Hierarchy");
739 } else {
740 $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Paged");
741 }
[8795]742 }
[10273]743
[7287]744 return $result;
[1410]745}
746
7471;
Note: See TracBrowser for help on using the repository browser.