source: main/trunk/greenstone2/perllib/plugins/PDFPlugin.pm@ 32210

Last change on this file since 32210 was 32210, checked in by ak19, 6 years ago

When PDFPlugin is set to paged_html output mode, it now finally sectionalises based on the manually added HTML headings into the output HTML.

  • Property svn:keywords set to Author Date Id Revision
File size: 30.4 KB
Line 
1###########################################################################
2#
3# PDFPlugin.pm -- reasonably with-it pdf plugin
4# A component of the Greenstone digital library software
5# from the New Zealand Digital Library Project at the
6# University of Waikato, New Zealand.
7#
8# Copyright (C) 1999-2001 New Zealand Digital Library Project
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License as published by
12# the Free Software Foundation; either version 2 of the License, or
13# (at your option) any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU General Public License for more details.
19#
20# You should have received a copy of the GNU General Public License
21# along with this program; if not, write to the Free Software
22# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23#
24###########################################################################
25package PDFPlugin;
26
27use strict;
28no strict 'refs'; # so we can use a var for filehandles (e.g. STDERR)
29no strict 'subs'; # allow filehandles to be variables and viceversa
30
31use ReadTextFile;
32use unicode;
33use Mojo::DOM; # for HTML parsing
34
35use AutoLoadConverters;
36use ConvertBinaryFile;
37
38@PDFPlugin::ISA = ('ConvertBinaryFile', 'AutoLoadConverters', 'ReadTextFile');
39
40
41my $convert_to_list =
42 [ { 'name' => "auto",
43 'desc' => "{ConvertBinaryFile.convert_to.auto}" },
44 { 'name' => "html",
45 'desc' => "{ConvertBinaryFile.convert_to.html}" },
46 { 'name' => "text",
47 'desc' => "{ConvertBinaryFile.convert_to.text}" },
48 { 'name' => "paged_html",
49 'desc' => "{PDFPlugin.convert_to.paged_html}"},
50 { 'name' => "pagedimg_jpg",
51 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_jpg}"},
52 { 'name' => "pagedimg_gif",
53 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_gif}"},
54 { 'name' => "pagedimg_png",
55 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_png}"},
56 ];
57
58
59my $arguments =
60 [
61 { 'name' => "convert_to",
62 'desc' => "{ConvertBinaryFile.convert_to}",
63 'type' => "enum",
64 'reqd' => "yes",
65 'list' => $convert_to_list,
66 'deft' => "html" },
67 { 'name' => "process_exp",
68 'desc' => "{BaseImporter.process_exp}",
69 'type' => "regexp",
70 'deft' => &get_default_process_exp(),
71 'reqd' => "no" },
72 { 'name' => "block_exp",
73 'desc' => "{CommonUtil.block_exp}",
74 'type' => "regexp",
75 'deft' => &get_default_block_exp() },
76 { 'name' => "metadata_fields",
77 'desc' => "{HTMLPlugin.metadata_fields}",
78 'type' => "string",
79 'deft' => "Title,Author,Subject,Keywords" },
80 { 'name' => "metadata_field_separator",
81 'desc' => "{HTMLPlugin.metadata_field_separator}",
82 'type' => "string",
83 'deft' => "" },
84 { 'name' => "noimages",
85 'desc' => "{PDFPlugin.noimages}",
86 'type' => "flag" },
87 { 'name' => "allowimagesonly",
88 'desc' => "{PDFPlugin.allowimagesonly}",
89 'type' => "flag" },
90 { 'name' => "complex",
91 'desc' => "{PDFPlugin.complex}",
92 'type' => "flag" },
93 { 'name' => "nohidden",
94 'desc' => "{PDFPlugin.nohidden}",
95 'type' => "flag" },
96 { 'name' => "zoom",
97 'desc' => "{PDFPlugin.zoom}",
98 'deft' => "2",
99 'range' => "1,3", # actually the range is 0.5-3
100 'type' => "int" },
101 { 'name' => "use_sections",
102 'desc' => "{PDFPlugin.use_sections}",
103 'type' => "flag" },
104 { 'name' => "description_tags",
105 'desc' => "{HTMLPlugin.description_tags}",
106 'type' => "flag" },
107 { 'name' => "use_realistic_book",
108 'desc' => "{PDFPlugin.use_realistic_book}",
109 'type' => "flag"}
110 ];
111
112my $options = { 'name' => "PDFPlugin",
113 'desc' => "{PDFPlugin.desc}",
114 'abstract' => "no",
115 'inherits' => "yes",
116 'srcreplaceable' => "yes", # Source docs in PDF can be replaced with GS-generated html
117 'args' => $arguments };
118
119sub new {
120 my ($class) = shift (@_);
121 my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
122 push(@$pluginlist, $class);
123
124 push(@$inputargs,"-title_sub");
125 push(@$inputargs,'^(Page\s+\d+)?(\s*1\s+)?');
126
127 push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
128 push(@{$hashArgOptLists->{"OptList"}},$options);
129
130 my $auto_converter_self = new AutoLoadConverters($pluginlist,$inputargs,$hashArgOptLists,["PDFBoxConverter"],1);
131 my $cbf_self = new ConvertBinaryFile($pluginlist, $inputargs, $hashArgOptLists);
132 my $self = BaseImporter::merge_inheritance($auto_converter_self, $cbf_self);
133
134 if ($self->{'info_only'}) {
135 # don't worry about any options etc
136 return bless $self, $class;
137 }
138
139 $self = bless $self, $class;
140 $self->{'file_type'} = "PDF";
141
142 # these are passed through to gsConvert.pl by ConvertBinaryFile.pm
143 my $zoom = $self->{"zoom"};
144 $self->{'convert_options'} = "-pdf_zoom $zoom";
145 $self->{'convert_options'} .= " -pdf_complex" if $self->{"complex"};
146 $self->{'convert_options'} .= " -pdf_nohidden" if $self->{"nohidden"};
147 $self->{'convert_options'} .= " -pdf_ignore_images" if $self->{"noimages"};
148 $self->{'convert_options'} .= " -pdf_allow_images_only" if $self->{"allowimagesonly"};
149
150 # check convert_to
151 # TODO: Start supporting PDF to txt on Windows if we're going to be using XPDF Tools (incl pdftotext) on Windows/Linux/Mac
152 if ($self->{'convert_to'} eq "text" && $ENV{'GSDLOS'} =~ /^windows$/i) {
153 print STDERR "Windows does not support pdf to text. PDFs will be converted to HTML instead\n";
154 $self->{'convert_to'} = "html";
155 }
156 elsif ($self->{'convert_to'} eq "auto") {
157 # choose html ?? is this the best option
158 $self->{'convert_to'} = "html";
159 }
160 if ($self->{'use_realistic_book'}) {
161 if ($self->{'convert_to'} ne "html") {
162 print STDERR "PDFs will be converted to HTML for realistic book functionality\n";
163 $self->{'convert_to'} = "html";
164 }
165 }
166 # set convert_to_plugin and convert_to_ext
167 $self->set_standard_convert_settings();
168
169 my $secondary_plugin_name = $self->{'convert_to_plugin'};
170 my $secondary_plugin_options = $self->{'secondary_plugin_options'};
171
172 if (!defined $secondary_plugin_options->{$secondary_plugin_name}) {
173 $secondary_plugin_options->{$secondary_plugin_name} = [];
174 }
175 my $specific_options = $secondary_plugin_options->{$secondary_plugin_name};
176
177 # following title_sub removes "Page 1" added by pdftohtml, and a leading
178 # "1", which is often the page number at the top of the page. Bad Luck
179 # if your document title actually starts with "1 " - is there a better way?
180 push(@$specific_options , "-title_sub", '^(Page\s+\d+)?(\s*1\s+)?');
181 my $associate_tail_re = $self->{'associate_tail_re'};
182 if ((defined $associate_tail_re) && ($associate_tail_re ne "")) {
183 push(@$specific_options, "-associate_tail_re", $associate_tail_re);
184 }
185 push(@$specific_options, "-file_rename_method", "none");
186
187 if ($secondary_plugin_name eq "HTMLPlugin") {
188 # pdftohtml always produces utf8 - What about pdfbox???
189 # push(@$specific_options, "-input_encoding", "utf8");
190 push(@$specific_options, "-extract_language") if $self->{'extract_language'};
191 push(@$specific_options, "-processing_tmp_files");
192 # Instruct HTMLPlug (when eventually accessed through read_into_doc_obj)
193 # to extract these metadata fields from the HEAD META fields
194 if (defined $self->{'metadata_fields'} && $self->{'metadata_fields'} =~ /\S/) {
195 push(@$specific_options,"-metadata_fields",$self->{'metadata_fields'});
196 } else {
197 push(@$specific_options,"-metadata_fields","Title,GENERATOR,date,author<Creator>");
198 }
199 if (defined $self->{'metadata_field_separator'} && $self->{'metadata_field_separator'} =~ /\S/) {
200 push(@$specific_options,"-metadata_field_separator",$self->{'metadata_field_separator'});
201 }
202 if ($self->{'use_sections'} || $self->{'description_tags'}) {
203 $self->{'description_tags'} = 1;
204 push(@$specific_options, "-description_tags");
205 }
206 if ($self->{'use_realistic_book'}) {
207 push(@$specific_options, "-use_realistic_book");
208 }
209 if($self->{'convert_to'} eq "paged_html") { # for paged html, the default should be to sectionalise on headings the single superpage containing divs representing individual pages as section
210 push(@$specific_options, "sectionalise_using_h_tags");
211 }
212 }
213 elsif ($secondary_plugin_name eq "PagedImagePlugin") {
214 push(@$specific_options, "-screenviewsize", "1000");
215 push(@$specific_options, "-enable_cache");
216 push(@$specific_options, "-processing_tmp_files");
217 }
218
219 $self = bless $self, $class;
220 $self->load_secondary_plugins($class,$secondary_plugin_options,$hashArgOptLists);
221 return $self;
222}
223
224sub get_default_process_exp {
225 my $self = shift (@_);
226
227 return q^(?i)\.pdf$^;
228}
229
230# so we don't inherit HTMLPlug's block exp...
231sub get_default_block_exp {
232 return "";
233}
234
235sub init {
236 my $self = shift (@_);
237
238 # ConvertBinaryFile init
239 $self->SUPER::init(@_);
240 $self->AutoLoadConverters::init(@_);
241
242}
243
244sub begin {
245 my $self = shift (@_);
246
247 $self->AutoLoadConverters::begin(@_);
248 $self->SUPER::begin(@_);
249
250}
251
252sub deinit {
253 my $self = shift (@_);
254
255 $self->AutoLoadConverters::deinit(@_);
256 $self->SUPER::deinit(@_);
257
258}
259
260# By setting hashing to be on ga xml this ensures that two
261# PDF files that are identical except for the metadata
262# to hash to different values. Without this, when each PDF
263# file is converted to HTML there is a chance that they
264# will both be *identical* if the conversion utility does
265# not embed the metadata in the generated HTML. This is
266# certainly the case when PDFBOX is being used.
267
268# This change makes this convert to based plugin more
269# consistent with the original vision that the same document
270# with different metadata should
271# be seen as different.
272
273sub get_oid_hash_type {
274 my $self = shift (@_);
275 return "hash_on_ga_xml";
276}
277
278
279sub tmp_area_convert_file {
280
281 my $self = shift (@_);
282 return $self->AutoLoadConverters::tmp_area_convert_file(@_);
283
284}
285
286# Overriding to do some extra handling for paged_html output mode
287sub run_conversion_command {
288 my $self = shift (@_);
289 my ($tmp_dirname, $tmp_inputPDFname, $utf8_tailname, $lc_suffix, $tailname, $suffix) = @_;
290
291 if($self->{'convert_to'} ne "paged_html") {
292 return $self->ConvertBinaryFile::run_conversion_command(@_);
293 }
294
295 # if output mode is paged_html, we use Xpdf tools' pdftohtml and tell it
296 # to create a subdir called "pages" in the tmp area to puts its products
297 # in there. (Xpdf's pdftohtml needs to be passed a *non-existent* directory
298 # parameter, the "pages" subdir). If Xpdf's pdftohtml has successfully run,
299 # the intermediary output file tmp/<random-num>/pages/index.html should
300 # exist (besides other output products there)
301
302 # We let ConvertBinaryFile proceed normally, but the return value should reflect
303 # that on success it should expect the intermediary product tmpdir/pages/index.html
304 # (which is the product of xpdftohtml conversion).
305 my $output_filename = $self->ConvertBinaryFile::run_conversion_command(@_);
306 $output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "pages", "index.html");
307
308 # However, when convert_post_process() is done, it should have output the final
309 # product of the paged_html conversion: an html file of the same name and in the
310 # same tmp location as the input PDF file.
311
312 my ($name_prefix, $output_dir, $ext)
313 = &File::Basename::fileparse($tmp_inputPDFname, "\\.[^\\.]+\$");
314 $self->{'conv_filename_after_post_process'} = &FileUtils::filenameConcatenate($output_dir, $name_prefix.".html");
315# print STDERR "@@@@@ final paged html file will be: " . $self->{'conv_filename_after_post_process'} . "\n";
316
317 return $output_filename;
318}
319
320sub convert_post_process
321{
322 my $self = shift (@_);
323 my ($conv_filename) = @_;
324
325 my $outhandle=$self->{'outhandle'};
326
327 if($self->{'convert_to'} eq "paged_html") {
328 # special post-processing for paged_html mode, as HTML pages generated
329 # by xpdf's pdftohtml need to be massaged into the form we want
330 $self->xpdftohtml_convert_post_process($conv_filename);
331 }
332 else { # use PDFPlugin's usual post processing
333 $self->default_convert_post_process($conv_filename);
334 }
335}
336
337# Called after gsConvert.pl has been run to convert a PDF to paged_html
338# using Xpdftools' pdftohtml
339# This method will do some cleanup of the HTML files produced after XPDF has produced
340# an HTML doc for each PDF page: it first gets rid of the default index.html.
341# Instead, it constructs a single html page containing each original HTML page
342# <body> nested as divs instead, with simple section information inserted at the top
343# of each 'page' <div> and some further styling customisation. This HTML manipulation
344# is to be done with the Mojo::DOM perl package.
345# Note that since xpdf's pdftohtml would have failed if the output dir already
346# existed and for simpler naming, the output files are created in a new "pages"
347# subdirectory of the tmp location parent of $conv_filename instead
348sub xpdftohtml_convert_post_process
349{
350 my $self = shift (@_);
351 my ($pages_index_html) = @_; # = tmp/<rand>/pages/index.html for paged_html output mode
352 my $output_filename = $self->{'conv_filename_after_post_process'};
353
354 # Read in all the html files in tmp's "pages" subdir, except for index.html.
355 # and use it to create a new html file called $self->{'conv_filename_after_post_process'}
356 # which will consist of a slightly modified version of
357 # each of the other html files concatenated together.
358
359 my $outhandle=$self->{'outhandle'};
360
361 my ($tailname, $pages_subdir, $suffix)
362 = &File::Basename::fileparse($pages_index_html, "\\.[^\\.]+\$");
363
364 # Code from util::create_itemfile()
365 # Read in all the files
366 opendir(DIR, $pages_subdir) || die "can't opendir $pages_subdir: $!";
367 my @page_files = grep {-f "$pages_subdir/$_"} readdir(DIR);
368 closedir DIR;
369 # Sort files in the directory by page_num
370 # files are named index.html, page1.html, page2.html, ..., pagen.html
371 sub page_number {
372 my ($dir) = @_;
373 my ($pagenum) =($dir =~ m/^page(\d+)\.html?$/i);
374 $pagenum = 0 unless defined $pagenum; # index.html will be given pagenum=0
375 return $pagenum;
376 }
377 # sort the files in the directory in the order of page_num rather than lexically.
378 @page_files = sort { page_number($a) <=> page_number($b) } @page_files;
379
380 #my $num_html_pages = (scalar(@page_files) - 1)/2; # skip index file.
381 # For every html file there's an img file, so halve the total num.
382 # What about other file types that may potentially be there too???
383 my $num_html_pages = 0;
384 foreach my $pagefile (@page_files) {
385 $num_html_pages++ if $pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i;
386 }
387
388 # Prepare to create our new html page that will contain all the individual
389 # htmls generated by xpdf's pdftohtml in sequence.
390 # First write the opening html tags out to the output file. These are the
391 # same tags and their contents, including <meta>, as is generated by
392 # Xpdf's pdftohtml for each of its individual html pages.
393 my $start_text = "<html>\n<head>\n";
394 my ($output_tailname, $tmp_subdir, $html_suffix)
395 = &File::Basename::fileparse($output_filename, "\\.[^\\.]+\$");
396 $start_text .= "<title>$output_tailname</title>\n";
397 $start_text .= "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n";
398 $start_text .= "</head>\n<body>\n\n";
399
400 #handle content encodings the same way that default_convert_post_process does
401 # $self->utf8_write_file ($start_text, $conv_filename); # will close file after write
402 # Don't want to build a giant string in memory of all the pages concatenated
403 # and then write it out in one go. Instead, build up the final single page
404 # by writing each modified paged_html file out to it as this is processed.
405 # Copying file open/close code from CommonUtil::utf8_write_file()
406 if (!open (OUTFILE, ">:utf8", $output_filename)) {
407 gsprintf(STDERR, "PDFPlugin::xpdftohtml_convert_post_process {ConvertToPlug.could_not_open_for_writing} ($!)\n", $output_filename);
408 die "\n";
409 }
410 print OUTFILE $start_text;
411
412 # Get the contents of each individual HTML page generated by Xpdf, after first
413 # modifying each, and write each out into our single all-encompassing html
414 foreach my $pagefile (@page_files) {
415 if ($pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i) {
416 my $page_num = page_number($pagefile);
417 # get full path to pagefile
418 $pagefile = &FileUtils::filenameConcatenate($pages_subdir, $pagefile);
419# print STDERR "@@@ About to process html file $pagefile (num $page_num)\n";
420 my $modified_page_contents = $self->_process_paged_html_page($pagefile, $page_num, $num_html_pages);
421 print OUTFILE "$modified_page_contents\n\n";
422 }
423 }
424
425 # we've now created a single HTML file by concatenating (a modified version)
426 # of each paged html file
427 print OUTFILE "</body>\n</html>\n"; # write out closing tags
428 close OUTFILE; # done
429
430 # Get rid of all the htm(l) files incl index.html in the associated "pages"
431 # subdir, since we've now processed them all into a single html file
432 # one folder level up and we don't want HTMLPlugin to process all of them next.
433 &FileUtils::removeFilesFiltered($pages_subdir, "\.html?\$"); # no specific whitelist, but blacklist htm(l)
434
435 # now the tmp area should contain a single html file contain all the html pages'
436 # contents in sequence, and a "pages" subdir containing the screenshot images
437 # of each page.
438 # HTMLPlugin will process these further in the plugin pipeline
439}
440
441# For whatever reason, most html <tags> don't get printed out in GLI
442# So when debugging, use this function to print them out as [tags] instead.
443sub _debug_print_html
444{
445 my $self = shift (@_);
446 my ($string_or_dom) = @_;
447
448 # can't seem to determine type of string with ref/reftype
449 # https://stackoverflow.com/questions/1731333/how-do-i-tell-what-type-of-value-is-in-a-perl-variable
450 # Not needed, as $dom objects seem to get correctly stringified in string contexts
451 # $dom.to_string/$dom.stringify seem to get called, no need to call them
452 # https://stackoverflow.com/questions/5214543/what-is-stringification-in-perl
453 my $escapedTxt = $string_or_dom;
454 $escapedTxt =~ s@\<@[@sg;
455 $escapedTxt =~ s@\>@]@sg;
456
457 print STDERR "#### $escapedTxt\n";
458}
459
460# Helper function to read in each paged_html generated by Xpdf's pdftohtml
461# then modify the html suitably using the HTML parsing functions offered by
462# Mojo::DOM, then return the modified HTML content as a string
463# See https://mojolicious.org/perldoc/Mojo/DOM
464sub _process_paged_html_page
465{
466 my $self = shift (@_);
467 my ($pagefile, $page_num, $num_html_pages) = @_;
468
469 my $text = "";
470
471 # handling content encoding the same way default_convert_post_process does
472 $self->read_file ($pagefile, "utf8", "", \$text);
473
474 my $dom = Mojo::DOM->new($text);
475
476# $self->_debug_print_html($dom);
477
478 # there's a <style> element on the <html>, we need to shift it into the <div>
479 # tag that we'll be creating. We'll first slightly modify the <style> element
480 # store the first style element, which is the only one and in the <body>
481 # we'll later insert it as child of an all-encompassing div that we'll create
482 my $page_style_tag_str = $dom->at('html')->at('style')->to_string;
483 # In the style tag, convert id style references to class style references
484 my $css_class = ".p".$page_num."f";
485 $page_style_tag_str =~ s@\#f@$css_class@sg;
486 my $style_element = Mojo::DOM->new($page_style_tag_str)->at('style'); # modified
487#$self->_debug_print_html($style_element);
488
489 # need to know the image's height to set the height of the surrounding
490 # div that's to replace this page's <body>:
491 my $img_height = $dom->find('img')->[0]{height};
492
493 # 2. Adjust the img#background src attribute to point to the pages subdir for imgs
494 # 3. Set that img tag's class=background, and change its id to background+$page_num
495 my $bg_img_tag=$dom->find('img#background')->[0];
496 my $img_src_str = $bg_img_tag->{src};
497 $img_src_str = "pages/$img_src_str";
498 $bg_img_tag->attr(src => $img_src_str); # reset
499#$self->_debug_print_html($bg_img_tag);
500 # set both class and modified id attributes in one step:
501 $bg_img_tag->attr({class => "background", id => "background".$page_num});
502#$self->_debug_print_html($bg_img_tag);
503
504 # get all the <span> nested inside <div class="txt"> elements and
505 # 1. set their class attr to be "p + page_num + id-of-the-span",
506 # 2. then delete the id, because the span ids have been reused when element
507 # ids ought to be unique. Which is why we set the modified ids to be the
508 # value of the class attribute instead
509 $dom->find('div.txt span')->each(sub {
510 $_->attr(class => "p". $page_num. $_->{id});
511 delete $_->{id};
512 }); # both changes done in one find() operation
513#$self->_debug_print_html($dom->find('div.txt span')->last);
514
515 # Finally can create our new dom, starting with a div tag for the current page
516 # Must be: <div id="$page_num" style="position:relative; height:$img_height;"/>
517# my $new_dom = Mojo::DOM->new_tag('div', id => "page".$page_num, style => "position: relative; height: ".$img_height."px;" )
518 my $new_dom = Mojo::DOM->new_tag('div', style => "position: relative; height: ".$img_height."px;" );
519#$self->_debug_print_html($new_dom);
520 $new_dom->at('div')->append_content($style_element)->root;
521
522
523#$self->_debug_print_html($new_dom);
524 # Copy across all the old html's body tag's child nodes into the new dom's new div tag
525 $dom->at('body')->child_nodes->each(sub { $new_dom->at('div')->append_content($_)}); #$_->to_string
526#$self->_debug_print_html($new_dom);
527
528
529 # build up the outer div with the <h>tags for sectionalising
530 my $inner_div_str = $new_dom->to_string;
531
532 my $page_div = "<div id=\"page".$page_num."\">\n";
533 # Append a page range bucket heading if applicable: if we have more than 10 pages
534 # to display in the current bucket AND we're on the first page of each bucket of 10 pages.
535 # Dr Bainbridge thinks for now we need only consider PDFs where the
536 # total number of pages < 1000 and create buckets of size 10 (e.g. 1-10, ... 51-60, ...)
537 # If number of remaining pages >= 10, then create new bucket heading
538 # e.g. "Pages 30-40"
539 if(($page_num % 10) == 1 && ($num_html_pages - $page_num) > 10) {
540 # Double-digit page numbers that start with 2
541 # i.e. 21 to 29 (and 30) should be in 21 to 30 range
542 my $start_range = $page_num - ($page_num % 10) + 1;
543 my $end_range = $page_num + 10 - ($page_num % 10);
544 $page_div .= "<h1 style=\"font-size:1em;font-weight:normal;\">Pages ".$start_range . "-" . $end_range."</h1>\n";
545 }
546
547 # Whether we're starting a new bucket or not, add a simpler heading: just the pagenumber, "Page #"
548 $page_div .= "<h2 style=\"font-size:1em;font-weight:normal;\">Page ".$page_num."</h2>\n";
549 $new_dom->at('div')->append_content($new_dom->new_tag('h2', "Page ".$page_num))->root;
550
551 $page_div .= $inner_div_str;
552 $page_div .= "\n</div>";
553
554 # Finished processing a single html page of the paged_html output generated by
555 # Xpdf's pdftohtml: finished massaging that single html page into the right form
556 return $page_div;
557}
558
559# This subroutine is called to do the PDFPlugin post-processing for all cases
560# except the "paged_html" conversion mode. This is what PDFPlugin always used to do:
561sub default_convert_post_process
562{
563 my $self = shift (@_);
564 my ($conv_filename) = @_;
565 my $outhandle=$self->{'outhandle'};
566
567 #$self->{'input_encoding'} = "utf8"; # The output is always in utf8 (is it?? it is for html, but what about other types?)
568 #my ($language, $encoding) = $self->textcat_get_language_encoding ($conv_filename);
569
570 # read in file ($text will be in utf8)
571 my $text = "";
572 # encoding will be utf8 for html files - what about other types? will we do this step for them anyway?
573 $self->read_file ($conv_filename, "utf8", "", \$text);
574
575 # To support the use_sections option with PDFBox: Greenstone splits PDFs into pages for
576 # sections. The PDFPlugin code wants each new page to be prefixed with <a name=pagenum></a>,
577 # which it then splits on to generate page-based sections. However, that's not what PDFBox
578 # generates in its HTML output. Fortunately, PDFBox does have its own page-separator: it
579 # embeds each page in an extra div. The div opener is:
580 # <div style=\"page-break-before:always; page-break-after:always\">
581 # The PDFPlugin now looks for this and prefixes <a name=0></a> to each such div. (The
582 # pagenumber is fixed at 0 since I'm unable to work out how to increment the pagenum during
583 # a regex substitution even with regex extensions on.) Later, when we process each section
584 # to get the pagenum, PDFBox's output for this is pre-processed by having a loopcounter
585 # that increments the pagenum for each subsequent section.
586
587 #$pdfbox_pageheader="\<div style=\"page-break-before:always; page-break-after:always\">";
588 my $loopcounter = 0; # used later on!
589 $text =~ s@\<div style=\"page-break-before:always; page-break-after:always\">@<a name=$loopcounter></a><div style=\"page-break-before:always; page-break-after:always\">@g;
590
591
592 # Calculate number of pages based on <a ...> tags (we have a <a name=1> etc
593 # for each page). Metadata based on this calculation not set until process()
594 #
595 # Note: this is done even if we are not breaking the document into pages as it might
596 # be useful to give an indication of document length in browser through setting
597 # num_pages as metadata.
598 # Clean html from low and hight surrogates D800–DFFF
599 $text =~ s@[\N{U+D800}-\N{U+DFFF}]@\ @g;
600 my @pages = ($text =~ m/\<[Aa] name=\"?\w+\"?>/ig); #<div style=\"?page-break-before:always; page-break-after:always\"?>
601 my $num_pages = scalar(@pages);
602 $self->{'num_pages'} = $num_pages;
603
604 if ($self->{'use_sections'}
605 && $self->{'converted_to'} eq "HTML") {
606
607 print $outhandle "PDFPlugin: Calculating sections...\n";
608
609 # we have "<a name=1></a>" etc for each page
610 # it may be <A name=
611 my @sections = split('<[Aa] name=', $text);
612
613 my $top_section = "";
614
615 if (scalar (@sections) == 1) { #only one section - no split!
616 print $outhandle "PDFPlugin: warning - no sections found\n";
617 } else {
618 $top_section .= shift @sections; # keep HTML header etc as top_section
619 }
620
621 # handle first section specially for title? Or all use first 100...
622
623 my $title = $sections[0];
624 $title =~ s/^\"?\w+\"?>//; # specific for pdftohtml...
625 $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
626 $title =~ s/<[^>]*>/ /g;
627 $title =~ s/(?:&nbsp;|\xc2\xa0)/ /g; # utf-8 for nbsp...
628 $title =~ s/^\s+//s;
629 $title =~ s/\s+$//;
630 $title =~ s/\s+/ /gs;
631 $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
632 $title =~ s/^\s+//s; # in case title_sub introduced any...
633 $title = substr ($title, 0, 100);
634 $title =~ s/\s\S*$/.../;
635
636
637 if (scalar (@sections) == 1) { # no sections found
638 $top_section .= $sections[0];
639 @sections=();
640 } else {
641 $top_section .= "<!--<Section>\n<Metadata name=\"Title\">$title</Metadata>\n-->\n <!--</Section>-->\n";
642 }
643
644 # add metadata per section...
645 foreach my $section (@sections) {
646 # section names are not always just digits, may be like "outline"
647 $section =~ s@^\"?(\w+)\"?></a>@@; # leftover from split expression...
648
649 $title = $1; # Greenstone does magic if sections are titled digits
650
651 # A title of pagenum=0 means use_sections is being applied on output from PDFBox,
652 # which didn't originally have a <a name=incremented pagenumber></a> to split each page.
653 # Our Perl code then prefixed <a name=0></a> to it. Now need to increment the pagenum here:
654 if($loopcounter > 0 || ($title eq 0 && $loopcounter == 0)) { # implies use_sections with PDFBox
655 $title = ++$loopcounter;
656 }
657
658 if (! defined($title) ) {
659 print STDERR "no title: $section\n";
660 $title = " "; # get rid of the undefined warning in next line
661 }
662 my $newsection = "<!-- from PDFPlugin -->\n<!-- <Section>\n";
663 $newsection .= "<Metadata name=\"Title\">" . $title
664 . "</Metadata>\n--><br />\n";
665 $newsection .= $section;
666 $newsection .= "<!--</Section>-->\n";
667 $section = $newsection;
668 }
669
670 $text=join('', ($top_section, @sections));
671 }
672
673 if ($self->{'use_sections'}
674 && $self->{'converted_to'} eq "text") {
675 print STDERR "**** When converting PDF to text, cannot apply use_sections\n";
676 }
677
678
679 # The following should no longer be needed, now that strings
680 # read in are Unicode aware (in the Perl sense) rather than
681 # raw binary strings that just happen to be UTF-8 compliant
682
683 # turn any high bytes that aren't valid utf-8 into utf-8.
684## unicode::ensure_utf8(\$text);
685
686 # Write it out again!
687 $self->utf8_write_file (\$text, $conv_filename);
688}
689
690
691# do plugin specific processing of doc_obj for HTML type
692sub process {
693 my $self = shift (@_);
694 my ($pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
695
696 my $result = $self->process_type($base_dir,$file,$doc_obj);
697
698 # fix up the extracted date metadata to be in Greenstone date format,
699 # and fix the capitalisation of 'date'
700 my $cursection = $doc_obj->get_top_section();
701 foreach my $datemeta (@{$doc_obj->get_metadata($cursection, "date")}) {
702 $doc_obj->delete_metadata($cursection, "date", $datemeta);
703
704 # We're just interested in the date bit, not the time
705 # some pdf creators (eg "Acrobat 5.0 Scan Plug-in for Windows")
706 # set a /CreationDate, and set /ModDate to 000000000. pdftohtml
707 # extracts the ModDate, so it is 0...
708 $datemeta =~ /(\d+)-(\d+)-(\d+)/;
709 my ($year, $month, $day) = ($1,$2,$3);
710 if (defined($year) && defined($month) && defined($day)) {
711 if ($year == 0) {next}
712 if ($year < 100) {$year += 1900} # just to be safe
713 if ($month =~ /^\d$/) {$month="0$month"} # single digit
714 if ($day =~ /^\d$/) {$day="0$day"} # single digit
715 my $date="$year$month$day";
716 $doc_obj->add_utf8_metadata($cursection, "Date", $date);
717 }
718 }
719
720 $doc_obj->add_utf8_metadata($cursection, "NumPages", $self->{'num_pages'}) if defined $self->{'num_pages'};
721
722 if ($self->{'use_sections'} && $self->{'converted_to'} eq "HTML") {
723 # For gs2 we explicitly make it a paged document, cos greenstone won't get it
724 # right if any section has an empty title, or one with letters in it
725 if (&util::is_gs3()) {
726 # but for gs3, paged docs currently use image slider which is ugly if there are no images
727 $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Hierarchy");
728 } else {
729 $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Paged");
730 }
731 }
732
733 return $result;
734}
735
7361;
Note: See TracBrowser for help on using the repository browser.