########################################################################### # # lucenebuildproc.pm -- perl wrapper for building index with Lucene # A component of the Greenstone digital library software # from the New Zealand Digital Library Project at the # University of Waikato, New Zealand. # # Copyright (C) 1999 New Zealand Digital Library Project # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # ########################################################################### package lucenebuildproc; # This document processor outputs a document # for lucene to process # Use same basic XML structure setup by mgppbuilder/mgppbuildproc use mgppbuildproc; use ghtml; use strict; no strict 'refs'; # allow filehandles to be variables and viceversa use IncrementalBuildUtils; sub BEGIN { @lucenebuildproc::ISA = ('mgppbuildproc'); } sub new { my $class = shift @_; my $self = new mgppbuildproc (@_); $self->{'numincdocs'} = 0; return bless $self, $class; } sub is_incremental_capable { my $self = shift (@_); # Unlike MG and MGPP, Lucene supports incremental building return 1; } sub text { my $self = shift (@_); my ($doc_obj,$file) = @_; my $handle = $self->{'output_handle'}; my $outhandle = $self->{'outhandle'}; # only output this document if it is one to be indexed return if ($doc_obj->get_doc_type() ne "indexed_doc"); my $indexed_doc = $self->is_subcollection_doc($doc_obj); # this is another document $self->{'num_docs'} += 1; # get the parameters for the output # split on : just in case there is subcoll and lang stuff my ($fields) = split (/:/, $self->{'index'}); my $doc_level = $mgppbuildproc::level_map{'document'}; my $gs2ns = 'xmlns:gs2="http://www.greenstone.org/gs2"'; my $levels = $self->{'levels'}; my $ldoc_level = $levels->{'document'}; my $lsec_level = $levels->{'section'}; my $lpar_level = $levels->{'paragraph'}; my $docid=""; if ($ldoc_level) { if ($self->{'db_level'} eq 'document') { my $doc_sec_num = $self->{'num_docs'}; $docid = "gs2:id=\"$doc_sec_num\""; } else { # default is section level my $doc_sec_num = $self->{'num_sections'} + 1; $docid = "gs2:id=\"$doc_sec_num\""; } } my $documenttag = "<$doc_level $gs2ns file=\"$file\" $docid >\n"; my $documentendtag = "\n\n"; my ($sectiontag) = ""; if ($lsec_level) { $sectiontag = $mgppbuildproc::level_map{'section'}; } my ($parastarttag) = ""; my ($paraendtag) = ""; if ($self->{'levels'}->{'paragraph'}) { if ($self->{'strip_html'}) { $parastarttag = "<".$mgppbuildproc::level_map{'paragraph'}.">"; $paraendtag = ""; } else { print $outhandle "Paragraph level can not be used with no_strip_html!. Not indexing Paragraphs.\n"; } } my $doc_section = 0; # just for this document my $text = ""; $text .= $documenttag; # get the text for this document my $section = $doc_obj->get_top_section(); while (defined $section) { # update a few statistics $doc_section++; $self->{'num_sections'}++; if ($sectiontag ne "") { my $secid = "gs2:id=\"".$self->{'num_sections'}."\""; $text .= "\n<$sectiontag $secid >\n"; } # if we are doing subcollections, then some docs shouldn't be indexed. # but we need to put the section tag placeholders in there so the # sections match up with database my $indexed_section = $doc_obj->get_metadata_element($section, "gsdldoctype") || "indexed_section"; if (($indexed_doc == 0) || ($indexed_section ne "indexed_section" && $indexed_section ne "indexed_doc")) { $text .= "\n\n" if ($sectiontag ne ""); $section = $doc_obj->get_next_section($section); next; } $self->{'num_bytes'} += $doc_obj->get_text_length ($section); foreach my $field (split (/;/, $fields)) { # only deal with this field if it doesn't start with top or # this is the first section my $real_field = $field; next if (($real_field =~ s/^top//) && ($doc_section != 1)); my $new_text = ""; my $tmp_text = ""; # If allfields is requested add all metadata fields and text as # belonging to the ZZ field if ($real_field eq "allfields") { # Text first - no html nor paragraph tags $new_text .= "$parastarttag\n"; $tmp_text = $self->preprocess_text($doc_obj->get_text ($section), 1, ""); &ghtml::htmlsafe($tmp_text); $new_text .= "$tmp_text$paraendtag\n"; # Then Metadata my $metadata = $doc_obj->get_all_metadata ($section); foreach my $pair (@$metadata) { my ($mfield, $mvalue) = (@$pair); &ghtml::htmlsafe($mvalue); # check fields here, maybe others dont want - change to use dontindex!! if ($mfield ne "Identifier" && $mfield !~ /^gsdl/ && $mfield ne "classifytype" && $mfield ne "assocfilepath" && defined $mvalue && $mvalue ne "") { $new_text .= "$parastarttag$mvalue$paraendtag\n"; } if (!defined $self->{'indexfields'}->{$mfield}) { $self->{'indexfields'}->{$mfield} = 1; } } } # metadata - output all metadata we know about except gsdl stuff elsif ($real_field eq "metadata" || $real_field eq "allfields") { my $shortname = ""; my $metadata = $doc_obj->get_all_metadata ($section); foreach my $pair (@$metadata) { my ($mfield, $mvalue) = (@$pair); &ghtml::htmlsafe($mvalue); # check fields here, maybe others dont want - change to use dontindex!! if ($mfield ne "Identifier" && $mfield !~ /^gsdl/ && $mfield ne "classifytype" && $mfield ne "assocfilepath" && defined $mvalue && $mvalue ne "") { if (defined $self->{'indexfieldmap'}->{$mfield}) { $shortname = $self->{'indexfieldmap'}->{$mfield}; } else { $shortname = $self->create_shortname($mfield); $self->{'indexfieldmap'}->{$mfield} = $shortname; $self->{'indexfieldmap'}->{$shortname} = 1; } $new_text .= "$parastarttag<$shortname index=\"1\">$mvalue$paraendtag\n"; if (!defined $self->{'indexfields'}->{$mfield}) { $self->{'indexfields'}->{$mfield} = 1; } } } } else { #individual metadata and or text specified - could be a comma separated list my $shortname=""; if (defined $self->{'indexfieldmap'}->{$real_field}) { $shortname = $self->{'indexfieldmap'}->{$real_field}; } else { $shortname = $self->create_shortname($real_field); $self->{'indexfieldmap'}->{$real_field} = $shortname; $self->{'indexfieldmap'}->{$shortname} = 1; } my @metadata_list = (); foreach my $submeta (split /,/, $real_field) { if ($submeta eq "text") { my $section_text = $doc_obj->get_text($section); if ($self->{'indexing_text'}) { # tag the text with ..., add the tags and always strip out HTML $new_text .= "$parastarttag<$shortname index=\"1\">\n"; if ($parastarttag ne "") { $section_text = $self->preprocess_text($section_text, 1, "$paraendtag$parastarttag<$shortname index=\"1\">"); } else { # we don't want to individually tag each paragraph if not doing para indexing $section_text = $self->preprocess_text($section_text, 1, ""); } $new_text .= "$section_text$paraendtag\n"; } else { # leave html stuff in, but escape the tags, and dont add Paragraph tags - never retrieve paras at the moment $tmp_text .= $doc_obj->get_text ($section); &ghtml::htmlsafe($tmp_text); $new_text .= $tmp_text; } } else { my @section_metadata = @{$doc_obj->get_metadata ($section, $submeta)}; if ($section ne $doc_obj->get_top_section() && $self->{'indexing_text'} && defined ($self->{'sections_index_document_metadata'})) { if ($self->{'sections_index_document_metadata'} eq "always" || ( scalar(@section_metadata) == 0 && $self->{'sections_index_document_metadata'} eq "unless_section_metadata_exists")) { push (@section_metadata, @{$doc_obj->get_metadata ($doc_obj->get_top_section(), $submeta)}); } } push (@metadata_list, @section_metadata); } } foreach my $item (@metadata_list) { &ghtml::htmlsafe($item); $new_text .= "$parastarttag<$shortname index=\"1\">$item$paraendtag\n"; } } # filter the text $self->filter_text ($field, $new_text); $self->{'num_processed_bytes'} += length ($new_text); $text .= "$new_text"; } # foreach field $text .= "\n\n" if ($sectiontag ne ""); $section = $doc_obj->get_next_section($section); } #while defined section print $handle "$text\n$documentendtag"; #print STDOUT "$text\n$documentendtag"; } # /** We make this builder pretend to be a document processor so we can get # * information back from the plugins. # * # * @param $self A reference to this Lucene builder # * @param $doc_obj A reference to a document object representing what was # * parsed by the GAPlug # * @param $file The name of the file parsed as a string # * # * @author John Thompson, DL Consulting Ltd # */ sub process() { my $self = shift (@_); my ($doc_obj, $file) = @_; # If this is called from any stage other than an incremental infodb we want # to pass through to the superclass of build if ($self->get_mode() eq "incinfodb") { print STDERR "*** Processing a document added using INCINFODB ***\n"; my ($archivedir) = $file =~ /^(.*?)(?:\/|\\)[^\/\\]*$/; $archivedir = "" unless defined $archivedir; $archivedir =~ s/\\/\//g; $archivedir =~ s/^\/+//; $archivedir =~ s/\/+$//; # Number of files print STDERR "There are " . scalar($doc_obj->get_assoc_files()) . " associated documents...\n"; # resolve the final filenames of the files associated with this document $self->assoc_files ($doc_obj, $archivedir); # is this a paged or a hierarchical document my ($thistype, $childtype) = $self->get_document_type ($doc_obj); # Determine the actual docnum by checking if we've processed any # previous incrementally added documents. If so, carry on from there. # Otherwise we set the counter to be the same as the number of # sections encountered during the previous build if ($self->{'numincdocs'} == 0) { $self->{'numincdocs'} = $self->{'starting_num_sections'} + 1; } my $section = $doc_obj->get_top_section (); print STDERR "+ top section: '$section'\n"; my $doc_OID = $doc_obj->get_OID(); my $url = ""; while (defined $section) { print STDERR "+ processing section: '$section'\n"; # Attach all the other metadata to this document # output the fact that this document is a document (unless doctype # has been set to something else from within a plugin my $dtype = $doc_obj->get_metadata_element ($section, "doctype"); if (!defined $dtype || $dtype !~ /\w/) { $doc_obj->add_utf8_metadata($section, "doctype", $dtype); } # output whether this node contains text if ($doc_obj->get_text_length($section) > 0) { $doc_obj->add_utf8_metadata($section, "hastxt", 1); } else { $doc_obj->add_utf8_metadata($section, "hastxt", 0); } # output archivedir if at top level if ($section eq $doc_obj->get_top_section()) { $doc_obj->add_utf8_metadata($section, "archivedir", $archivedir); $doc_obj->add_utf8_metadata($section, "thistype", $thistype); } # output a list of children my $children = $doc_obj->get_children ($section); if (scalar(@$children) > 0) { $doc_obj->add_utf8_metadata($section, "childtype", $childtype); my @contains = (); foreach my $child (@$children) { if ($child =~ /^.*?\.(\d+)$/) { push (@contains, "\".$1"); } else { push (@contains, "\".$child"); } } $doc_obj->add_utf8_metadata($section, "contains", join(";", @contains)); } #output the matching doc number print STDERR "+ docnum=" . $self->{'numincdocs'} . "\n"; $doc_obj->add_utf8_metadata($section, "docnum", $self->{'numincdocs'}); $self->{'numincdocs'}++; $section = $doc_obj->get_next_section($section); # if no sections wanted, only add the docs last if ($self->{'db_level'} eq "document"); } print STDERR "\n*** incrementally add metadata from document at: " . $file . "\n"; &IncrementalBuildUtils::addDocument($self->{'collection'}, $doc_obj, $doc_obj->get_top_section()); } else { $self->mgppbuildproc::process(@_); } } # /** process() **/ # Following methods seem to be no different to those defined in basebuildproc.pm # From inspection, it looks like these ones can be removed sub get_num_docs { my $self = shift (@_); #rint STDERR "get_num_docs(): $self->{'num_docs'}\n"; return $self->{'num_docs'}; } sub get_num_sections { my $self = shift (@_); #rint STDERR "get_num_sections(): $self->{'num_sections'}\n"; return $self->{'num_sections'}; } # num_bytes is the actual number of bytes in the collection # this is normally the same as what's processed during text compression sub get_num_bytes { my $self = shift (@_); #rint STDERR "get_num_bytes(): $self->{'num_bytes'}\n"; return $self->{'num_bytes'}; } # This is similar to mgppbuildproc's preprocess_text but adds extra spaces # Otherwise the removal of tags below might lead to Lucene turning # "...farming

\n

EDWARD.." into "farmingedward" # (example from demo collection b20cre) # Many thanks to John Thompson, DL Consulting Ltd. (www.dlconsulting.com) sub preprocess_text { my $self = shift (@_); my ($text, $strip_html, $para) = @_; # at this stage, we do not do paragraph tags unless have strip_html - # it will result in a huge mess of non-xml return unless $strip_html; my $new_text = $text; # if we have

 tags, we can have < > inside them, need to delete
    # the <> before stripping tags
    $new_text =~ s/
(.*?)<\/pre>/$self->remove_gtlt($1,$para)/gse;

    if ($para eq "") {
       # just remove all tags
       $new_text =~ s/<[^>]*>/ /gs;
    } else {
       # strip all tags except 

tags which get turned into $para $new_text =~ s/<([^>]*)>/$self->process_tags($1, $para)/gse; } # It's important that we remove name entities because otherwise the text passed to Lucene for indexing # may not be valid XML (eg. if HTML-only entities like   are used) $new_text =~ s/&\w{1,10};//g; # Remove stray '&' characters, except in &#nnnn; or &#xhhhh; entities (which are valid XML) $new_text =~ s/&([^\#])/ $1/g; return $new_text; } 1;