[32277] | 1 | ###########################################################################
|
---|
| 2 | #
|
---|
| 3 | # PDFv2Plugin.pm -- pdf plugin that uses xpdftools or, if switched on,
|
---|
| 4 | # pdfbox, to process PDFs.
|
---|
| 5 | # A component of the Greenstone digital library software
|
---|
| 6 | # from the New Zealand Digital Library Project at the
|
---|
| 7 | # University of Waikato, New Zealand.
|
---|
| 8 | #
|
---|
| 9 | # Copyright (C) 1999-2001 New Zealand Digital Library Project
|
---|
| 10 | #
|
---|
| 11 | # This program is free software; you can redistribute it and/or modify
|
---|
| 12 | # it under the terms of the GNU General Public License as published by
|
---|
| 13 | # the Free Software Foundation; either version 2 of the License, or
|
---|
| 14 | # (at your option) any later version.
|
---|
| 15 | #
|
---|
| 16 | # This program is distributed in the hope that it will be useful,
|
---|
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
---|
| 19 | # GNU General Public License for more details.
|
---|
| 20 | #
|
---|
| 21 | # You should have received a copy of the GNU General Public License
|
---|
| 22 | # along with this program; if not, write to the Free Software
|
---|
| 23 | # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
---|
| 24 | #
|
---|
| 25 | ###########################################################################
|
---|
| 26 | package PDFv2Plugin;
|
---|
| 27 |
|
---|
| 28 | use strict;
|
---|
| 29 | no strict 'refs'; # so we can use a var for filehandles (e.g. STDERR)
|
---|
| 30 | no strict 'subs'; # allow filehandles to be variables and viceversa
|
---|
| 31 |
|
---|
| 32 | use ReadTextFile;
|
---|
| 33 | use unicode;
|
---|
| 34 | use Mojo::DOM; # for HTML parsing
|
---|
| 35 |
|
---|
| 36 | use AutoLoadConverters;
|
---|
| 37 | use ConvertBinaryFile;
|
---|
| 38 |
|
---|
| 39 | @PDFv2Plugin::ISA = ('ConvertBinaryFile', 'AutoLoadConverters', 'ReadTextFile');
|
---|
| 40 |
|
---|
| 41 |
|
---|
| 42 | my $convert_to_list =
|
---|
| 43 | [ { 'name' => "auto",
|
---|
| 44 | 'desc' => "{ConvertBinaryFile.convert_to.auto}" },
|
---|
| 45 | { 'name' => "text", # xpdftools
|
---|
| 46 | 'desc' => "{ConvertBinaryFile.convert_to.text}" },
|
---|
| 47 | { 'name' => "paged_text", # xpdftools
|
---|
| 48 | 'desc' => "{ConvertBinaryFile.convert_to.paged_text}" },
|
---|
| 49 |
|
---|
| 50 | { 'name' => "html", # pdfbox ## TODO: rename this to html_without_imgs
|
---|
| 51 | 'desc' => "{PDFPlugin.convert_to.html}" },
|
---|
| 52 | { 'name' => "pretty_html", # xpdftools
|
---|
| 53 | 'desc' => "{PDFPlugin.convert_to.pretty_html}" },
|
---|
| 54 | { 'name' => "paged_pretty_html", # xpdftools
|
---|
| 55 | 'desc' => "{PDFPlugin.convert_to.paged_pretty_html}"},
|
---|
| 56 |
|
---|
| 57 | #pdfbox
|
---|
| 58 | { 'name' => "pagedimg_jpg",
|
---|
| 59 | 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_jpg}"},
|
---|
| 60 | { 'name' => "pagedimg_png",
|
---|
| 61 | 'desc' => "{ConvertBinaryFile.convert_to.pagedimg_png}"},
|
---|
| 62 |
|
---|
| 63 | { 'name' => "pagedimgtxt_jpg",
|
---|
| 64 | 'desc' => "{ConvertBinaryFile.convert_to.pagedimgtxt_jpg}"},
|
---|
| 65 | { 'name' => "pagedimgtxt_png",
|
---|
| 66 | 'desc' => "{ConvertBinaryFile.convert_to.pagedimgtxt_png}"},
|
---|
| 67 | ];
|
---|
| 68 |
|
---|
| 69 |
|
---|
| 70 | my $arguments =
|
---|
| 71 | [
|
---|
| 72 | { 'name' => "convert_to",
|
---|
| 73 | 'desc' => "{ConvertBinaryFile.convert_to}",
|
---|
| 74 | 'type' => "enum",
|
---|
| 75 | 'reqd' => "yes",
|
---|
| 76 | 'list' => $convert_to_list,
|
---|
| 77 | 'deft' => "pretty_html" },
|
---|
| 78 | { 'name' => "process_exp",
|
---|
| 79 | 'desc' => "{BaseImporter.process_exp}",
|
---|
| 80 | 'type' => "regexp",
|
---|
| 81 | 'deft' => &get_default_process_exp(),
|
---|
| 82 | 'reqd' => "no" },
|
---|
| 83 | { 'name' => "block_exp",
|
---|
| 84 | 'desc' => "{CommonUtil.block_exp}",
|
---|
| 85 | 'type' => "regexp",
|
---|
| 86 | 'deft' => &get_default_block_exp() },
|
---|
| 87 | { 'name' => "metadata_fields",
|
---|
| 88 | 'desc' => "{HTMLPlugin.metadata_fields}",
|
---|
| 89 | 'type' => "string",
|
---|
| 90 | 'deft' => "Title,Author,Subject,Keywords" },
|
---|
| 91 | { 'name' => "metadata_field_separator",
|
---|
| 92 | 'desc' => "{HTMLPlugin.metadata_field_separator}",
|
---|
| 93 | 'type' => "string",
|
---|
| 94 | 'deft' => "" },
|
---|
| 95 | # { 'name' => "noimages",
|
---|
| 96 | # 'desc' => "{PDFPlugin.noimages}",
|
---|
| 97 | # 'type' => "flag" },
|
---|
| 98 | # { 'name' => "allowimagesonly",
|
---|
| 99 | # 'desc' => "{PDFPlugin.allowimagesonly}",
|
---|
| 100 | # 'type' => "flag" },
|
---|
| 101 | # { 'name' => "complex",
|
---|
| 102 | # 'desc' => "{PDFPlugin.complex}",
|
---|
| 103 | # 'type' => "flag" },
|
---|
| 104 | # { 'name' => "nohidden",
|
---|
| 105 | # 'desc' => "{PDFPlugin.nohidden}",
|
---|
| 106 | # 'type' => "flag" },
|
---|
| 107 | { 'name' => "zoom",
|
---|
| 108 | 'desc' => "{PDFv2Plugin.zoom}",
|
---|
| 109 | 'deft' => "1",
|
---|
| 110 | 'type' => "string" }, # xpdftools' zoom takes fractions
|
---|
| 111 | { 'name' => "use_sections",
|
---|
| 112 | 'desc' => "{PDFPlugin.use_sections}",
|
---|
| 113 | 'type' => "flag" },
|
---|
| 114 | { 'name' => "description_tags",
|
---|
| 115 | 'desc' => "{HTMLPlugin.description_tags}",
|
---|
| 116 | 'type' => "flag" },
|
---|
| 117 | { 'name' => "use_realistic_book",
|
---|
| 118 | 'desc' => "{PDFPlugin.use_realistic_book}",
|
---|
| 119 | 'type' => "flag"}
|
---|
| 120 | ];
|
---|
| 121 |
|
---|
| 122 | my $options = { 'name' => "PDFv2Plugin",
|
---|
| 123 | 'desc' => "{PDFPlugin.desc}",
|
---|
| 124 | 'abstract' => "no",
|
---|
| 125 | 'inherits' => "yes",
|
---|
| 126 | 'srcreplaceable' => "yes", # Source docs in PDF can be replaced with GS-generated html
|
---|
| 127 | 'args' => $arguments };
|
---|
| 128 |
|
---|
| 129 | sub new {
|
---|
| 130 | my ($class) = shift (@_);
|
---|
| 131 | my ($pluginlist,$inputargs,$hashArgOptLists) = @_;
|
---|
| 132 | push(@$pluginlist, $class);
|
---|
| 133 |
|
---|
| 134 | push(@$inputargs,"-title_sub");
|
---|
| 135 | push(@$inputargs,'^(Page\s+\d+)?(\s*1\s+)?');
|
---|
| 136 |
|
---|
| 137 | push(@{$hashArgOptLists->{"ArgList"}},@{$arguments});
|
---|
| 138 | push(@{$hashArgOptLists->{"OptList"}},$options);
|
---|
| 139 |
|
---|
| 140 | my $auto_converter_self = new AutoLoadConverters($pluginlist,$inputargs,$hashArgOptLists,["PDFBoxConverter"],1);
|
---|
| 141 | my $cbf_self = new ConvertBinaryFile($pluginlist, $inputargs, $hashArgOptLists);
|
---|
| 142 | my $self = BaseImporter::merge_inheritance($auto_converter_self, $cbf_self);
|
---|
| 143 |
|
---|
| 144 | if ($self->{'info_only'}) {
|
---|
| 145 | # don't worry about any options etc
|
---|
| 146 | return bless $self, $class;
|
---|
| 147 | }
|
---|
| 148 |
|
---|
| 149 | $self = bless $self, $class;
|
---|
| 150 | $self->{'file_type'} = "PDF";
|
---|
| 151 |
|
---|
| 152 | # convert_options are passed through to gsConvert.pl by ConvertBinaryFile.pm
|
---|
| 153 |
|
---|
| 154 | # the most important option is the tool that's used to do the conversion
|
---|
| 155 | $self->{'convert_options'} = "-pdf_tool xpdftools"; # default for PDFv2Plugin. If pdfbox_conversion is on, the pdfbpox GS extension sets pdf_tool to pdfbox
|
---|
| 156 |
|
---|
| 157 | # pdf_zoom is supported by xpdftools' pdftohtml. So for pretty_html and paged_pretty_html
|
---|
| 158 | my $zoom = $self->{"zoom"};
|
---|
| 159 | $self->{'convert_options'} .= " -pdf_zoom $zoom";
|
---|
| 160 |
|
---|
| 161 | # check convert_to
|
---|
| 162 |
|
---|
| 163 | # Not all available conversion output options are possible with xpdftools, as some are
|
---|
| 164 | # only handled by pdfbox. If a format is unavailable with xpdftools, default to pretty_html
|
---|
| 165 | if ($self->{'convert_to'} =~ /^html$/) {
|
---|
| 166 | &gsprintf::gsprintf(STDERR, "{PDFv2Plugin.conversion_needs_pdfbox}\n", ($self->{'convert_to'}, "pretty_html"));
|
---|
| 167 | # $self->{'convert_to'} = "pretty_html";
|
---|
| 168 | }
|
---|
| 169 | elsif ($self->{'convert_to'} =~ /^pagedimg/) {
|
---|
| 170 | &gsprintf::gsprintf(STDERR, "{PDFv2Plugin.conversion_needs_pdfbox}\n", ($self->{'convert_to'}, "paged_pretty_html"));
|
---|
| 171 | # $self->{'convert_to'} = "paged_pretty_html";
|
---|
| 172 | }
|
---|
| 173 | elsif ($self->{'convert_to'} =~ /^paged_text$/) {
|
---|
| 174 | # TODO
|
---|
| 175 | print STDERR "@@@ Conversion to " . $self->{'convert_to'} , " with Xpdf Tools is not yet implemented.\n";
|
---|
[32280] | 176 | #print STDERR "@@@ Converting to text instead.\n";
|
---|
| 177 | #$self->{'convert_to'} = "text";
|
---|
[32277] | 178 | }
|
---|
| 179 |
|
---|
| 180 | # PDFv2Plugin now supports PDF to txt conversion on Windows too:
|
---|
| 181 | # using XPDF Tools (incl pdftotext) on Windows/Linux/Mac
|
---|
| 182 | elsif ($self->{'convert_to'} eq "text" && $ENV{'GSDLOS'} =~ /^windows$/i) {
|
---|
| 183 | &gsprintf::gsprintf(STDERR, "{PDFv2Plugin.win_pdftotext_info}\n");
|
---|
| 184 | }
|
---|
| 185 | elsif ($self->{'convert_to'} eq "auto") {
|
---|
| 186 | # choose pretty_html is the best default option when using xpdftools
|
---|
| 187 | $self->{'convert_to'} = "pretty_html";
|
---|
| 188 | }
|
---|
| 189 | if ($self->{'use_realistic_book'}) {
|
---|
| 190 | if ($self->{'convert_to'} ne "html") {
|
---|
| 191 | print STDERR "PDFs will be converted to HTML for realistic book functionality\n";
|
---|
| 192 | $self->{'convert_to'} = "html";
|
---|
| 193 | }
|
---|
| 194 | }
|
---|
| 195 | # set convert_to_plugin and convert_to_ext
|
---|
| 196 | $self->set_standard_convert_settings();
|
---|
| 197 |
|
---|
| 198 | my $secondary_plugin_name = $self->{'convert_to_plugin'};
|
---|
| 199 | my $secondary_plugin_options = $self->{'secondary_plugin_options'};
|
---|
| 200 |
|
---|
| 201 | if (!defined $secondary_plugin_options->{$secondary_plugin_name}) {
|
---|
| 202 | $secondary_plugin_options->{$secondary_plugin_name} = [];
|
---|
| 203 | }
|
---|
| 204 | my $specific_options = $secondary_plugin_options->{$secondary_plugin_name};
|
---|
| 205 |
|
---|
| 206 | # following title_sub removes "Page 1" added by pdftohtml, and a leading
|
---|
| 207 | # "1", which is often the page number at the top of the page. Bad Luck
|
---|
| 208 | # if your document title actually starts with "1 " - is there a better way?
|
---|
| 209 | push(@$specific_options , "-title_sub", '^(Page\s+\d+)?(\s*1\s+)?');
|
---|
| 210 | my $associate_tail_re = $self->{'associate_tail_re'};
|
---|
| 211 | if ((defined $associate_tail_re) && ($associate_tail_re ne "")) {
|
---|
| 212 | push(@$specific_options, "-associate_tail_re", $associate_tail_re);
|
---|
| 213 | }
|
---|
| 214 | push(@$specific_options, "-file_rename_method", "none");
|
---|
| 215 |
|
---|
| 216 | if ($secondary_plugin_name eq "HTMLPlugin") {
|
---|
| 217 | # pdftohtml always produces utf8 - What about pdfbox???
|
---|
| 218 | # push(@$specific_options, "-input_encoding", "utf8");
|
---|
| 219 | push(@$specific_options, "-extract_language") if $self->{'extract_language'};
|
---|
| 220 | push(@$specific_options, "-processing_tmp_files");
|
---|
| 221 | # Instruct HTMLPlug (when eventually accessed through read_into_doc_obj)
|
---|
| 222 | # to extract these metadata fields from the HEAD META fields
|
---|
| 223 | if (defined $self->{'metadata_fields'} && $self->{'metadata_fields'} =~ /\S/) {
|
---|
| 224 | push(@$specific_options,"-metadata_fields",$self->{'metadata_fields'});
|
---|
| 225 | } else {
|
---|
| 226 | push(@$specific_options,"-metadata_fields","Title,GENERATOR,date,author<Creator>");
|
---|
| 227 | }
|
---|
| 228 | if (defined $self->{'metadata_field_separator'} && $self->{'metadata_field_separator'} =~ /\S/) {
|
---|
| 229 | push(@$specific_options,"-metadata_field_separator",$self->{'metadata_field_separator'});
|
---|
| 230 | }
|
---|
| 231 | if ($self->{'use_sections'} || $self->{'description_tags'}) {
|
---|
| 232 | $self->{'description_tags'} = 1;
|
---|
| 233 | push(@$specific_options, "-description_tags");
|
---|
| 234 | }
|
---|
| 235 | if ($self->{'use_realistic_book'}) {
|
---|
| 236 | push(@$specific_options, "-use_realistic_book");
|
---|
| 237 | }
|
---|
| 238 | if($self->{'convert_to'} eq "paged_pretty_html") { # for paged pretty html, the default should be to sectionalise
|
---|
| 239 | # the single superpage, the one containing divs representing individual pages as sections, on headings
|
---|
| 240 | push(@$specific_options, "sectionalise_using_h_tags");
|
---|
| 241 | }
|
---|
| 242 | }
|
---|
| 243 | elsif ($secondary_plugin_name eq "PagedImagePlugin") {
|
---|
| 244 | push(@$specific_options, "-screenviewsize", "1000");
|
---|
| 245 | push(@$specific_options, "-enable_cache");
|
---|
| 246 | push(@$specific_options, "-processing_tmp_files");
|
---|
| 247 | }
|
---|
| 248 |
|
---|
| 249 | $self = bless $self, $class;
|
---|
| 250 | $self->load_secondary_plugins($class,$secondary_plugin_options,$hashArgOptLists);
|
---|
| 251 | return $self;
|
---|
| 252 | }
|
---|
| 253 |
|
---|
| 254 | sub get_default_process_exp {
|
---|
| 255 | my $self = shift (@_);
|
---|
| 256 |
|
---|
| 257 | return q^(?i)\.pdf$^;
|
---|
| 258 | }
|
---|
| 259 |
|
---|
| 260 | # so we don't inherit HTMLPlug's block exp...
|
---|
| 261 | sub get_default_block_exp {
|
---|
| 262 | return "";
|
---|
| 263 | }
|
---|
| 264 |
|
---|
| 265 | sub init {
|
---|
| 266 | my $self = shift (@_);
|
---|
| 267 |
|
---|
| 268 | # ConvertBinaryFile init
|
---|
| 269 | $self->SUPER::init(@_);
|
---|
| 270 | $self->AutoLoadConverters::init(@_);
|
---|
| 271 |
|
---|
| 272 | }
|
---|
| 273 |
|
---|
| 274 | sub begin {
|
---|
| 275 | my $self = shift (@_);
|
---|
| 276 |
|
---|
| 277 | $self->AutoLoadConverters::begin(@_);
|
---|
| 278 | $self->SUPER::begin(@_);
|
---|
| 279 |
|
---|
| 280 | }
|
---|
| 281 |
|
---|
| 282 | sub deinit {
|
---|
| 283 | my $self = shift (@_);
|
---|
| 284 |
|
---|
| 285 | $self->AutoLoadConverters::deinit(@_);
|
---|
| 286 | $self->SUPER::deinit(@_);
|
---|
| 287 |
|
---|
| 288 | }
|
---|
| 289 |
|
---|
| 290 | # By setting hashing to be on ga xml this ensures that two
|
---|
| 291 | # PDF files that are identical except for the metadata
|
---|
| 292 | # to hash to different values. Without this, when each PDF
|
---|
| 293 | # file is converted to HTML there is a chance that they
|
---|
| 294 | # will both be *identical* if the conversion utility does
|
---|
| 295 | # not embed the metadata in the generated HTML. This is
|
---|
| 296 | # certainly the case when PDFBOX is being used.
|
---|
| 297 |
|
---|
| 298 | # This change makes this convert to based plugin more
|
---|
| 299 | # consistent with the original vision that the same document
|
---|
| 300 | # with different metadata should
|
---|
| 301 | # be seen as different.
|
---|
| 302 |
|
---|
| 303 | sub get_oid_hash_type {
|
---|
| 304 | my $self = shift (@_);
|
---|
| 305 | return "hash_on_ga_xml";
|
---|
| 306 | }
|
---|
| 307 |
|
---|
| 308 |
|
---|
| 309 | sub tmp_area_convert_file {
|
---|
| 310 |
|
---|
| 311 | my $self = shift (@_);
|
---|
| 312 | return $self->AutoLoadConverters::tmp_area_convert_file(@_);
|
---|
| 313 |
|
---|
| 314 | }
|
---|
| 315 |
|
---|
| 316 | # Overriding to do some extra handling for pretty_html/paged_pretty_html output mode
|
---|
| 317 | sub run_conversion_command {
|
---|
| 318 | my $self = shift (@_);
|
---|
| 319 | my ($tmp_dirname, $tmp_inputPDFname, $utf8_tailname, $lc_suffix, $tailname, $suffix) = @_;
|
---|
| 320 |
|
---|
| 321 | if($self->{'convert_to'} !~ /pretty_html/) {
|
---|
| 322 | return $self->ConvertBinaryFile::run_conversion_command(@_);
|
---|
| 323 | }
|
---|
| 324 |
|
---|
| 325 | # else, paged_pretty_html or pretty_html
|
---|
| 326 |
|
---|
| 327 | # if output mode is (paged_)pretty_html, we use Xpdf tools' pdftohtml and tell it
|
---|
| 328 | # to create a subdir called "pages" in the tmp area to puts its products
|
---|
| 329 | # in there. (Xpdf's pdftohtml needs to be passed a *non-existent* directory
|
---|
| 330 | # parameter, the "pages" subdir). If Xpdf's pdftohtml has successfully run,
|
---|
| 331 | # the intermediary output file tmp/<random-num>/pages/index.html should
|
---|
| 332 | # exist (besides other output products there)
|
---|
| 333 |
|
---|
| 334 | # We let ConvertBinaryFile proceed normally, but the return value should reflect
|
---|
| 335 | # that on success it should expect the intermediary product tmpdir/pages/index.html
|
---|
| 336 | # (which is the product of xpdftohtml conversion).
|
---|
| 337 | my $output_filename = $self->ConvertBinaryFile::run_conversion_command(@_);
|
---|
| 338 | $output_filename = &FileUtils::filenameConcatenate($tmp_dirname, "pages", "index.html");
|
---|
| 339 |
|
---|
| 340 | # However, when convert_post_process() is done, it should have output the final
|
---|
| 341 | # product of the (paged_)pretty_html conversion: an html file of the same name and in the
|
---|
| 342 | # same tmp location as the input PDF file.
|
---|
| 343 |
|
---|
| 344 | my ($name_prefix, $output_dir, $ext)
|
---|
| 345 | = &File::Basename::fileparse($tmp_inputPDFname, "\\.[^\\.]+\$");
|
---|
| 346 | $self->{'conv_filename_after_post_process'} = &FileUtils::filenameConcatenate($output_dir, $name_prefix.".html");
|
---|
| 347 | # print STDERR "@@@@@ final paged html file will be: " . $self->{'conv_filename_after_post_process'} . "\n";
|
---|
| 348 |
|
---|
| 349 | return $output_filename;
|
---|
| 350 | }
|
---|
| 351 |
|
---|
| 352 | sub convert_post_process
|
---|
| 353 | {
|
---|
| 354 | my $self = shift (@_);
|
---|
| 355 | my ($conv_filename) = @_;
|
---|
| 356 |
|
---|
| 357 | my $outhandle=$self->{'outhandle'};
|
---|
| 358 |
|
---|
| 359 | if($self->{'convert_to'} =~ /pretty_html/) { # (paged_)pretty_html
|
---|
| 360 | # special post-processing for (paged_)pretty_html mode, as HTML pages generated
|
---|
| 361 | # by xpdf's pdftohtml need to be massaged into the form we want
|
---|
| 362 | $self->xpdftohtml_convert_post_process($conv_filename);
|
---|
| 363 | }
|
---|
| 364 | else { # use PDFPlugin's usual post processing
|
---|
| 365 | $self->default_convert_post_process($conv_filename);
|
---|
| 366 | }
|
---|
| 367 | }
|
---|
| 368 |
|
---|
| 369 | # Called after gsConvert.pl has been run to convert a PDF to (paged_)pretty_html
|
---|
| 370 | # using Xpdftools' pdftohtml
|
---|
| 371 | # This method will do some cleanup of the HTML files produced after XPDF has produced
|
---|
| 372 | # an HTML doc for each PDF page: it first gets rid of the default index.html.
|
---|
| 373 | # Instead, it constructs a single html page containing each original HTML page
|
---|
| 374 | # <body> nested as divs instead, with simple section information inserted at the top
|
---|
| 375 | # of each 'page' <div> and some further styling customisation. This HTML manipulation
|
---|
| 376 | # is to be done with the Mojo::DOM perl package.
|
---|
| 377 | # Note that since xpdf's pdftohtml would have failed if the output dir already
|
---|
| 378 | # existed and for simpler naming, the output files are created in a new "pages"
|
---|
| 379 | # subdirectory of the tmp location parent of $conv_filename instead
|
---|
| 380 | sub xpdftohtml_convert_post_process
|
---|
| 381 | {
|
---|
| 382 | my $self = shift (@_);
|
---|
| 383 | my ($pages_index_html) = @_; # = tmp/<rand>/pages/index.html for (paged_)pretty_html output mode
|
---|
| 384 | my $output_filename = $self->{'conv_filename_after_post_process'};
|
---|
| 385 |
|
---|
| 386 | # Read in all the html files in tmp's "pages" subdir, except for index.html.
|
---|
| 387 | # and use it to create a new html file called $self->{'conv_filename_after_post_process'}
|
---|
| 388 | # which will consist of a slightly modified version of
|
---|
| 389 | # each of the other html files concatenated together.
|
---|
| 390 |
|
---|
| 391 | my $outhandle=$self->{'outhandle'};
|
---|
| 392 |
|
---|
| 393 | my ($tailname, $pages_subdir, $suffix)
|
---|
| 394 | = &File::Basename::fileparse($pages_index_html, "\\.[^\\.]+\$");
|
---|
| 395 |
|
---|
| 396 | # Code from util::create_itemfile()
|
---|
| 397 | # Read in all the files
|
---|
| 398 | opendir(DIR, $pages_subdir) || die "can't opendir $pages_subdir: $!";
|
---|
| 399 | my @page_files = grep {-f "$pages_subdir/$_"} readdir(DIR);
|
---|
| 400 | closedir DIR;
|
---|
| 401 | # Sort files in the directory by page_num
|
---|
| 402 | # files are named index.html, page1.html, page2.html, ..., pagen.html
|
---|
| 403 | sub page_number {
|
---|
| 404 | my ($dir) = @_;
|
---|
| 405 | my ($pagenum) =($dir =~ m/^page(\d+)\.html?$/i);
|
---|
| 406 | $pagenum = 0 unless defined $pagenum; # index.html will be given pagenum=0
|
---|
| 407 | return $pagenum;
|
---|
| 408 | }
|
---|
| 409 | # sort the files in the directory in the order of page_num rather than lexically.
|
---|
| 410 | @page_files = sort { page_number($a) <=> page_number($b) } @page_files;
|
---|
| 411 |
|
---|
| 412 | #my $num_html_pages = (scalar(@page_files) - 1)/2; # skip index file.
|
---|
| 413 | # For every html file there's an img file, so halve the total num.
|
---|
| 414 | # What about other file types that may potentially be there too???
|
---|
| 415 | my $num_html_pages = 0;
|
---|
| 416 | foreach my $pagefile (@page_files) {
|
---|
| 417 | $num_html_pages++ if $pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i;
|
---|
| 418 | }
|
---|
| 419 |
|
---|
| 420 | # Prepare to create our new html page that will contain all the individual
|
---|
| 421 | # htmls generated by xpdf's pdftohtml in sequence.
|
---|
| 422 | # First write the opening html tags out to the output file. These are the
|
---|
| 423 | # same tags and their contents, including <meta>, as is generated by
|
---|
| 424 | # Xpdf's pdftohtml for each of its individual html pages.
|
---|
| 425 | my $start_text = "<html>\n<head>\n";
|
---|
| 426 | my ($output_tailname, $tmp_subdir, $html_suffix)
|
---|
| 427 | = &File::Basename::fileparse($output_filename, "\\.[^\\.]+\$");
|
---|
| 428 | $start_text .= "<title>$output_tailname</title>\n";
|
---|
| 429 | $start_text .= "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n";
|
---|
| 430 | $start_text .= "</head>\n<body>\n\n";
|
---|
| 431 |
|
---|
| 432 | if($self->{'convert_to'} =~ /paged_pretty_html/) { # then add the <h>tags for sectionalising
|
---|
| 433 | $start_text .= "<h1>$output_tailname</h1>\n\n";
|
---|
| 434 | }
|
---|
| 435 |
|
---|
| 436 | #handle content encodings the same way that default_convert_post_process does
|
---|
| 437 | # $self->utf8_write_file ($start_text, $conv_filename); # will close file after write
|
---|
| 438 | # Don't want to build a giant string in memory of all the pages concatenated
|
---|
| 439 | # and then write it out in one go. Instead, build up the final single page
|
---|
| 440 | # by writing each modified (paged_)pretty_html file out to it as this is processed.
|
---|
| 441 | # Copying file open/close code from CommonUtil::utf8_write_file()
|
---|
| 442 | if (!open (OUTFILE, ">:utf8", $output_filename)) {
|
---|
| 443 | gsprintf(STDERR, "PDFv2Plugin::xpdftohtml_convert_post_process {CommonUtil.could_not_open_for_writing} ($!)\n", $output_filename);
|
---|
| 444 | die "\n";
|
---|
| 445 | }
|
---|
| 446 | print OUTFILE $start_text;
|
---|
| 447 |
|
---|
| 448 | # Get the contents of each individual HTML page generated by Xpdf, after first
|
---|
| 449 | # modifying each, and write each out into our single all-encompassing html
|
---|
| 450 | foreach my $pagefile (@page_files) {
|
---|
| 451 | if ($pagefile =~ m/\.html?$/ && $pagefile !~ /^index\.html?/i) {
|
---|
| 452 | my $page_num = page_number($pagefile);
|
---|
| 453 | # get full path to pagefile
|
---|
| 454 | $pagefile = &FileUtils::filenameConcatenate($pages_subdir, $pagefile);
|
---|
| 455 | # print STDERR "@@@ About to process html file $pagefile (num $page_num)\n";
|
---|
| 456 | my $modified_page_contents = $self->_process_pretty_html_page($pagefile, $page_num, $num_html_pages);
|
---|
| 457 | print OUTFILE "$modified_page_contents\n\n";
|
---|
| 458 | }
|
---|
| 459 | }
|
---|
| 460 |
|
---|
| 461 | # we've now created a single HTML file by concatenating (a modified version)
|
---|
| 462 | # of each paged html file
|
---|
| 463 | print OUTFILE "</body>\n</html>\n"; # write out closing tags
|
---|
| 464 | close OUTFILE; # done
|
---|
| 465 |
|
---|
| 466 | # Get rid of all the htm(l) files incl index.html in the associated "pages"
|
---|
| 467 | # subdir, since we've now processed them all into a single html file
|
---|
| 468 | # one folder level up and we don't want HTMLPlugin to process all of them next.
|
---|
| 469 | &FileUtils::removeFilesFiltered($pages_subdir, "\.html?\$"); # no specific whitelist, but blacklist htm(l)
|
---|
| 470 |
|
---|
| 471 | # now the tmp area should contain a single html file contain all the html pages'
|
---|
| 472 | # contents in sequence, and a "pages" subdir containing the screenshot images
|
---|
| 473 | # of each page.
|
---|
| 474 | # HTMLPlugin will process these further in the plugin pipeline
|
---|
| 475 | }
|
---|
| 476 |
|
---|
| 477 | # For whatever reason, most html <tags> don't get printed out in GLI
|
---|
| 478 | # So when debugging, use this function to print them out as [tags] instead.
|
---|
| 479 | sub _debug_print_html
|
---|
| 480 | {
|
---|
| 481 | my $self = shift (@_);
|
---|
| 482 | my ($string_or_dom) = @_;
|
---|
| 483 |
|
---|
| 484 | # can't seem to determine type of string with ref/reftype
|
---|
| 485 | # https://stackoverflow.com/questions/1731333/how-do-i-tell-what-type-of-value-is-in-a-perl-variable
|
---|
| 486 | # Not needed, as $dom objects seem to get correctly stringified in string contexts
|
---|
| 487 | # $dom.to_string/$dom.stringify seem to get called, no need to call them
|
---|
| 488 | # https://stackoverflow.com/questions/5214543/what-is-stringification-in-perl
|
---|
| 489 | my $escapedTxt = $string_or_dom;
|
---|
| 490 | $escapedTxt =~ s@\<@[@sg;
|
---|
| 491 | $escapedTxt =~ s@\>@]@sg;
|
---|
| 492 |
|
---|
| 493 | print STDERR "#### $escapedTxt\n";
|
---|
| 494 | }
|
---|
| 495 |
|
---|
| 496 | # Helper function for (paged_)pretty_html
|
---|
| 497 | # to read in each page of pretty_html generated by Xpdf's pdftohtml
|
---|
| 498 | # then modify the html suitably using the HTML parsing functions offered by
|
---|
| 499 | # Mojo::DOM, then return the modified HTML content as a string.
|
---|
| 500 | # For paged_pretty_html, some additional modification is done to sectionalise the final html
|
---|
| 501 | # See https://mojolicious.org/perldoc/Mojo/DOM
|
---|
| 502 | sub _process_pretty_html_page
|
---|
| 503 | {
|
---|
| 504 | my $self = shift (@_);
|
---|
| 505 | my ($pagefile, $page_num, $num_html_pages) = @_;
|
---|
| 506 |
|
---|
| 507 | my $text = "";
|
---|
| 508 |
|
---|
| 509 | # handling content encoding the same way default_convert_post_process does
|
---|
| 510 | $self->read_file ($pagefile, "utf8", "", \$text);
|
---|
| 511 |
|
---|
| 512 | my $dom = Mojo::DOM->new($text);
|
---|
| 513 |
|
---|
| 514 | # $self->_debug_print_html($dom);
|
---|
| 515 |
|
---|
| 516 | # there's a <style> element on the <html>, we need to shift it into the <div>
|
---|
| 517 | # tag that we'll be creating. We'll first slightly modify the <style> element
|
---|
| 518 | # store the first style element, which is the only one and in the <body>
|
---|
| 519 | # we'll later insert it as child of an all-encompassing div that we'll create
|
---|
| 520 | my $page_style_tag_str = $dom->at('html')->at('style')->to_string;
|
---|
| 521 | # In the style tag, convert id style references to class style references
|
---|
| 522 | my $css_class = ".p".$page_num."f";
|
---|
| 523 | $page_style_tag_str =~ s@\#f@$css_class@sg;
|
---|
| 524 | my $style_element = Mojo::DOM->new($page_style_tag_str)->at('style'); # modified
|
---|
| 525 | #$self->_debug_print_html($style_element);
|
---|
| 526 |
|
---|
| 527 | # need to know the image's height to set the height of the surrounding
|
---|
| 528 | # div that's to replace this page's <body>:
|
---|
| 529 | my $img_height = $dom->find('img')->[0]{height};
|
---|
| 530 |
|
---|
| 531 | # 2. Adjust the img#background src attribute to point to the pages subdir for imgs
|
---|
| 532 | # 3. Set that img tag's class=background, and change its id to background+$page_num
|
---|
| 533 | my $bg_img_tag=$dom->find('img#background')->[0];
|
---|
| 534 | my $img_src_str = $bg_img_tag->{src};
|
---|
| 535 | $img_src_str = "pages/$img_src_str";
|
---|
| 536 | $bg_img_tag->attr(src => $img_src_str); # reset
|
---|
| 537 | #$self->_debug_print_html($bg_img_tag);
|
---|
| 538 | # set both class and modified id attributes in one step:
|
---|
| 539 | $bg_img_tag->attr({class => "background", id => "background".$page_num});
|
---|
| 540 | #$self->_debug_print_html($bg_img_tag);
|
---|
| 541 |
|
---|
| 542 | # get all the <span> nested inside <div class="txt"> elements and
|
---|
| 543 | # 1. set their class attr to be "p + page_num + id-of-the-span",
|
---|
| 544 | # 2. then delete the id, because the span ids have been reused when element
|
---|
| 545 | # ids ought to be unique. Which is why we set the modified ids to be the
|
---|
| 546 | # value of the class attribute instead
|
---|
| 547 | $dom->find('div.txt span')->each(sub {
|
---|
| 548 | $_->attr(class => "p". $page_num. $_->{id});
|
---|
| 549 | delete $_->{id};
|
---|
| 550 | }); # both changes done in one find() operation
|
---|
| 551 | #$self->_debug_print_html($dom->find('div.txt span')->last);
|
---|
| 552 |
|
---|
| 553 | # Finally can create our new dom, starting with a div tag for the current page
|
---|
| 554 | # Must be: <div id="$page_num" style="position:relative; height:$img_height;"/>
|
---|
| 555 | # my $new_dom = Mojo::DOM->new_tag('div', id => "page".$page_num, style => "position: relative; height: ".$img_height."px;" )
|
---|
| 556 | my $new_dom = Mojo::DOM->new_tag('div', style => "position: relative; height: ".$img_height."px;" );
|
---|
| 557 | #$self->_debug_print_html($new_dom);
|
---|
| 558 | $new_dom->at('div')->append_content($style_element)->root;
|
---|
| 559 |
|
---|
| 560 |
|
---|
| 561 | #$self->_debug_print_html($new_dom);
|
---|
| 562 | # Copy across all the old html's body tag's child nodes into the new dom's new div tag
|
---|
| 563 | $dom->at('body')->child_nodes->each(sub { $new_dom->at('div')->append_content($_)}); #$_->to_string
|
---|
| 564 | #$self->_debug_print_html($new_dom);
|
---|
| 565 |
|
---|
| 566 | # build up the outer div
|
---|
| 567 | my $inner_div_str = $new_dom->to_string;
|
---|
| 568 | my $page_div = "<div id=\"page".$page_num."\">\n";
|
---|
| 569 |
|
---|
| 570 | if($self->{'convert_to'} =~ /paged_pretty_html/) { # then add the <h>tags for sectionalising
|
---|
| 571 |
|
---|
| 572 | # Append a page range bucket heading if applicable: if we have more than 10 pages
|
---|
| 573 | # to display in the current bucket AND we're on the first page of each bucket of 10 pages.
|
---|
| 574 | # Dr Bainbridge thinks for now we need only consider PDFs where the
|
---|
| 575 | # total number of pages < 1000 and create buckets of size 10 (e.g. 1-10, ... 51-60, ...)
|
---|
| 576 | # If number of remaining pages >= 10, then create new bucket heading
|
---|
| 577 | # e.g. "Pages 30-40"
|
---|
| 578 | if(($page_num % 10) == 1 && ($num_html_pages - $page_num) > 10) {
|
---|
| 579 | # Double-digit page numbers that start with 2
|
---|
| 580 | # i.e. 21 to 29 (and 30) should be in 21 to 30 range
|
---|
| 581 | my $start_range = $page_num - ($page_num % 10) + 1;
|
---|
| 582 | my $end_range = $page_num + 10 - ($page_num % 10);
|
---|
| 583 | $page_div .= "<h2 style=\"font-size:1em;font-weight:normal;\">Pages ".$start_range . "-" . $end_range."</h2>\n";
|
---|
| 584 | }
|
---|
| 585 |
|
---|
| 586 | # No sectionalising for 10 pages or under. Otherwise, every page is a section too, not just buckets
|
---|
| 587 | if($num_html_pages > 10) {
|
---|
| 588 | # Whether we're starting a new bucket or not, add a simpler heading: just the pagenumber, "Page #"
|
---|
| 589 | $page_div .= "<h3 style=\"font-size:1em;font-weight:normal;\">Page ".$page_num."</h3>\n";
|
---|
| 590 | }
|
---|
| 591 | }
|
---|
| 592 |
|
---|
| 593 | $page_div .= $inner_div_str;
|
---|
| 594 | $page_div .= "\n</div>";
|
---|
| 595 |
|
---|
| 596 | # Finished processing a single html page of the (paged_)pretty_html output generated by
|
---|
| 597 | # Xpdf's pdftohtml: finished massaging that single html page into the right form
|
---|
| 598 | return $page_div;
|
---|
| 599 | }
|
---|
| 600 |
|
---|
| 601 | # This subroutine is called to do the PDFv2Plugin post-processing for all cases
|
---|
| 602 | # except the "pretty_html" or "paged_pretty_html" conversion modes.
|
---|
| 603 | # This is what PDFPlugin always used to do:
|
---|
| 604 | sub default_convert_post_process
|
---|
| 605 | {
|
---|
| 606 | my $self = shift (@_);
|
---|
| 607 | my ($conv_filename) = @_;
|
---|
| 608 | my $outhandle=$self->{'outhandle'};
|
---|
| 609 |
|
---|
| 610 | #$self->{'input_encoding'} = "utf8"; # TODO: The output is always in utf8 (is it?? it is for html, but what about other types?)
|
---|
| 611 | #my ($language, $encoding) = $self->textcat_get_language_encoding ($conv_filename);
|
---|
| 612 |
|
---|
| 613 | # read in file ($text will be in utf8)
|
---|
| 614 | my $text = "";
|
---|
| 615 | # encoding will be utf8 for html files - what about other types? will we do this step for them anyway?
|
---|
| 616 | $self->read_file ($conv_filename, "utf8", "", \$text);
|
---|
| 617 |
|
---|
| 618 | # To support the use_sections option with PDFBox: Greenstone splits PDFs into pages for
|
---|
| 619 | # sections. The PDFPlugin code wants each new page to be prefixed with <a name=pagenum></a>,
|
---|
| 620 | # which it then splits on to generate page-based sections. However, that's not what PDFBox
|
---|
| 621 | # generates in its HTML output. Fortunately, PDFBox does have its own page-separator: it
|
---|
| 622 | # embeds each page in an extra div. The div opener is:
|
---|
| 623 | # <div style=\"page-break-before:always; page-break-after:always\">
|
---|
| 624 | # The PDFPlugin now looks for this and prefixes <a name=0></a> to each such div. (The
|
---|
| 625 | # pagenumber is fixed at 0 since I'm unable to work out how to increment the pagenum during
|
---|
| 626 | # a regex substitution even with regex extensions on.) Later, when we process each section
|
---|
| 627 | # to get the pagenum, PDFBox's output for this is pre-processed by having a loopcounter
|
---|
| 628 | # that increments the pagenum for each subsequent section.
|
---|
| 629 |
|
---|
| 630 | #$pdfbox_pageheader="\<div style=\"page-break-before:always; page-break-after:always\">";
|
---|
| 631 | my $loopcounter = 0; # used later on!
|
---|
| 632 | $text =~ s@\<div style=\"page-break-before:always; page-break-after:always\">@<a name=$loopcounter></a><div style=\"page-break-before:always; page-break-after:always\">@g;
|
---|
| 633 |
|
---|
| 634 |
|
---|
| 635 | # Calculate number of pages based on <a ...> tags (we have a <a name=1> etc
|
---|
| 636 | # for each page). Metadata based on this calculation not set until process()
|
---|
| 637 | #
|
---|
| 638 | # Note: this is done even if we are not breaking the document into pages as it might
|
---|
| 639 | # be useful to give an indication of document length in browser through setting
|
---|
| 640 | # num_pages as metadata.
|
---|
| 641 | # Clean html from low and hight surrogates D800âDFFF
|
---|
| 642 | $text =~ s@[\N{U+D800}-\N{U+DFFF}]@\ @g;
|
---|
| 643 | my @pages = ($text =~ m/\<[Aa] name=\"?\w+\"?>/ig); #<div style=\"?page-break-before:always; page-break-after:always\"?>
|
---|
| 644 | my $num_pages = scalar(@pages);
|
---|
| 645 | $self->{'num_pages'} = $num_pages;
|
---|
| 646 |
|
---|
| 647 | if ($self->{'use_sections'}
|
---|
| 648 | && $self->{'converted_to'} eq "HTML") {
|
---|
| 649 |
|
---|
| 650 | print $outhandle "PDFv2Plugin: Calculating sections...\n";
|
---|
| 651 |
|
---|
| 652 | # we have "<a name=1></a>" etc for each page
|
---|
| 653 | # it may be <A name=
|
---|
| 654 | my @sections = split('<[Aa] name=', $text);
|
---|
| 655 |
|
---|
| 656 | my $top_section = "";
|
---|
| 657 |
|
---|
| 658 | if (scalar (@sections) == 1) { #only one section - no split!
|
---|
| 659 | print $outhandle "PDFv2Plugin: warning - no sections found\n";
|
---|
| 660 | } else {
|
---|
| 661 | $top_section .= shift @sections; # keep HTML header etc as top_section
|
---|
| 662 | }
|
---|
| 663 |
|
---|
| 664 | # handle first section specially for title? Or all use first 100...
|
---|
| 665 |
|
---|
| 666 | my $title = $sections[0];
|
---|
| 667 | $title =~ s/^\"?\w+\"?>//; # specific for pdftohtml...
|
---|
| 668 | $title =~ s/<\/([^>]+)><\1>//g; # (eg) </b><b> - no space
|
---|
| 669 | $title =~ s/<[^>]*>/ /g;
|
---|
| 670 | $title =~ s/(?: |\xc2\xa0)/ /g; # utf-8 for nbsp...
|
---|
| 671 | $title =~ s/^\s+//s;
|
---|
| 672 | $title =~ s/\s+$//;
|
---|
| 673 | $title =~ s/\s+/ /gs;
|
---|
| 674 | $title =~ s/^$self->{'title_sub'}// if ($self->{'title_sub'});
|
---|
| 675 | $title =~ s/^\s+//s; # in case title_sub introduced any...
|
---|
| 676 | $title = substr ($title, 0, 100);
|
---|
| 677 | $title =~ s/\s\S*$/.../;
|
---|
| 678 |
|
---|
| 679 |
|
---|
| 680 | if (scalar (@sections) == 1) { # no sections found
|
---|
| 681 | $top_section .= $sections[0];
|
---|
| 682 | @sections=();
|
---|
| 683 | } else {
|
---|
| 684 | $top_section .= "<!--<Section>\n<Metadata name=\"Title\">$title</Metadata>\n-->\n <!--</Section>-->\n";
|
---|
| 685 | }
|
---|
| 686 |
|
---|
| 687 | # add metadata per section...
|
---|
| 688 | foreach my $section (@sections) {
|
---|
| 689 | # section names are not always just digits, may be like "outline"
|
---|
| 690 | $section =~ s@^\"?(\w+)\"?></a>@@; # leftover from split expression...
|
---|
| 691 |
|
---|
| 692 | $title = $1; # Greenstone does magic if sections are titled digits
|
---|
| 693 |
|
---|
| 694 | # A title of pagenum=0 means use_sections is being applied on output from PDFBox,
|
---|
| 695 | # which didn't originally have a <a name=incremented pagenumber></a> to split each page.
|
---|
| 696 | # Our Perl code then prefixed <a name=0></a> to it. Now need to increment the pagenum here:
|
---|
| 697 | if($loopcounter > 0 || ($title eq 0 && $loopcounter == 0)) { # implies use_sections with PDFBox
|
---|
| 698 | $title = ++$loopcounter;
|
---|
| 699 | }
|
---|
| 700 |
|
---|
| 701 | if (! defined($title) ) {
|
---|
| 702 | print STDERR "no title: $section\n";
|
---|
| 703 | $title = " "; # get rid of the undefined warning in next line
|
---|
| 704 | }
|
---|
| 705 | my $newsection = "<!-- from PDFv2Plugin -->\n<!-- <Section>\n";
|
---|
| 706 | $newsection .= "<Metadata name=\"Title\">" . $title
|
---|
| 707 | . "</Metadata>\n--><br />\n";
|
---|
| 708 | $newsection .= $section;
|
---|
| 709 | $newsection .= "<!--</Section>-->\n";
|
---|
| 710 | $section = $newsection;
|
---|
| 711 | }
|
---|
| 712 |
|
---|
| 713 | $text=join('', ($top_section, @sections));
|
---|
| 714 | }
|
---|
| 715 |
|
---|
| 716 | if ($self->{'use_sections'}
|
---|
| 717 | && $self->{'converted_to'} eq "text") {
|
---|
| 718 | print STDERR "**** When converting PDF to text, cannot apply use_sections\n";
|
---|
| 719 | }
|
---|
| 720 |
|
---|
| 721 |
|
---|
| 722 | # The following should no longer be needed, now that strings
|
---|
| 723 | # read in are Unicode aware (in the Perl sense) rather than
|
---|
| 724 | # raw binary strings that just happen to be UTF-8 compliant
|
---|
| 725 |
|
---|
| 726 | # turn any high bytes that aren't valid utf-8 into utf-8.
|
---|
| 727 | ## unicode::ensure_utf8(\$text);
|
---|
| 728 |
|
---|
| 729 | # Write it out again!
|
---|
| 730 | $self->utf8_write_file (\$text, $conv_filename);
|
---|
| 731 | }
|
---|
| 732 |
|
---|
| 733 |
|
---|
| 734 | # do plugin specific processing of doc_obj for HTML type
|
---|
| 735 | sub process {
|
---|
| 736 | my $self = shift (@_);
|
---|
| 737 | my ($pluginfo, $base_dir, $file, $metadata, $doc_obj, $gli) = @_;
|
---|
| 738 |
|
---|
| 739 | my $result = $self->process_type($base_dir,$file,$doc_obj);
|
---|
| 740 |
|
---|
| 741 | # fix up the extracted date metadata to be in Greenstone date format,
|
---|
| 742 | # and fix the capitalisation of 'date'
|
---|
| 743 | my $cursection = $doc_obj->get_top_section();
|
---|
| 744 | foreach my $datemeta (@{$doc_obj->get_metadata($cursection, "date")}) {
|
---|
| 745 | $doc_obj->delete_metadata($cursection, "date", $datemeta);
|
---|
| 746 |
|
---|
| 747 | # We're just interested in the date bit, not the time
|
---|
| 748 | # some pdf creators (eg "Acrobat 5.0 Scan Plug-in for Windows")
|
---|
| 749 | # set a /CreationDate, and set /ModDate to 000000000. pdftohtml
|
---|
| 750 | # extracts the ModDate, so it is 0...
|
---|
| 751 | $datemeta =~ /(\d+)-(\d+)-(\d+)/;
|
---|
| 752 | my ($year, $month, $day) = ($1,$2,$3);
|
---|
| 753 | if (defined($year) && defined($month) && defined($day)) {
|
---|
| 754 | if ($year == 0) {next}
|
---|
| 755 | if ($year < 100) {$year += 1900} # just to be safe
|
---|
| 756 | if ($month =~ /^\d$/) {$month="0$month"} # single digit
|
---|
| 757 | if ($day =~ /^\d$/) {$day="0$day"} # single digit
|
---|
| 758 | my $date="$year$month$day";
|
---|
| 759 | $doc_obj->add_utf8_metadata($cursection, "Date", $date);
|
---|
| 760 | }
|
---|
| 761 | }
|
---|
| 762 |
|
---|
| 763 | $doc_obj->add_utf8_metadata($cursection, "NumPages", $self->{'num_pages'}) if defined $self->{'num_pages'};
|
---|
| 764 |
|
---|
| 765 | if ($self->{'use_sections'} && $self->{'converted_to'} eq "HTML") {
|
---|
| 766 | # For gs2 we explicitly make it a paged document, cos greenstone won't get it
|
---|
| 767 | # right if any section has an empty title, or one with letters in it
|
---|
| 768 | if (&util::is_gs3()) {
|
---|
| 769 | # but for gs3, paged docs currently use image slider which is ugly if there are no images
|
---|
| 770 | $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Hierarchy");
|
---|
| 771 | } else {
|
---|
| 772 | $doc_obj->set_utf8_metadata_element ($cursection, "gsdlthistype", "Paged");
|
---|
| 773 | }
|
---|
| 774 | }
|
---|
| 775 |
|
---|
| 776 | return $result;
|
---|
| 777 | }
|
---|
| 778 |
|
---|
| 779 | 1;
|
---|