source: trunk/gsdl/perllib/classify/phind.pm@ 1949

Last change on this file since 1949 was 1949, checked in by paynter, 23 years ago

Fixed bug that prevented tokeniser from distinguishing between languages.

  • Property svn:keywords set to Author Date Id Revision
File size: 37.9 KB
Line 
1###########################################################################
2#
3# phind.pm -- the Phind classifier
4#
5# Copyright (C) 2000 Gordon W. Paynter
6# Copyright (C) 2000 New Zealand Digital Library Project
7#
8#
9# A component of the Greenstone digital library software
10# from the New Zealand Digital Library Project at the
11# University of Waikato, New Zealand.
12#
13# This program is free software; you can redistribute it and/or modify
14# it under the terms of the GNU General Public License as published by
15# the Free Software Foundation; either version 2 of the License, or
16# (at your option) any later version.
17#
18# This program is distributed in the hope that it will be useful,
19# but WITHOUT ANY WARRANTY; without even the implied warranty of
20# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21# GNU General Public License for more details.
22#
23# You should have received a copy of the GNU General Public License
24# along with this program; if not, write to the Free Software
25# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26#
27###########################################################################
28
29# The phind clasifier plugin.
30# Options are dexcribed in the print_usage function.
31# Type "classinfo.pl phind" at the command line for a summary.
32
33package phind;
34
35use BasClas;
36use util;
37use ghtml;
38use unicode;
39
40sub BEGIN {
41 @ISA = ('BasClas');
42}
43
44
45sub print_usage {
46 print STDERR "
47 usage: classify phind [options]
48
49 options:
50 -text Fields The text used to build the phrase hierarchy.
51 (default: 'section:Title,section:text')
52
53 -title Title The metadata field used to describe each document.
54 (default: 'Title')
55
56 -button Name The label for the classifier screen and button in
57 navigation bar.
58 (default: 'Phrase')
59
60 -language Regex Language or languages to use building hierarchy.
61 Languages are identified by two-letter country codes
62 like en (English), es (Spanish), and fr (French).
63 Language is a regular expression, so 'en|fr' (English or
64 French) and '..' (match any language) are valid.
65 (default: 'en'.)
66
67 -savephrases File If set, the phrase infomation will be stored in
68 the given file as text. It is probably a good idea
69 to use an absolute path.
70 (defualt: not set)
71
72 -suffixmode N The smode parameter to the phrase extraction program. A
73 value of 0 means that stopwords are ignored, and of 1
74 means that stopwords are used.
75 (default: 1)
76
77 -thesaurus Name Name of a thesaurus stored in phind format in the
78 collection's etc directory.
79 (default: not set)
80
81 -untidy Don't remove working files.
82
83"; }
84
85
86# Phrase delimiter symbols - these should be abstracted out someplace
87
88my $colstart = "COLLECTIONSTART";
89my $colend = "COLLECTIONEND";
90my $doclimit = "DOCUMENTLIMIT";
91my $senlimit = "SENTENCELIMIT";
92my @delimiters = ($colstart, $colend, $doclimit, $senlimit);
93
94
95# Create a new phind browser based on collect.cfg
96
97sub new {
98 my $class = shift (@_);
99 my $self = new BasClas($class, @_);
100
101 my $out = $self->{'outhandle'};
102
103
104 # Phind installation check
105 # The phind phrase browser is research software and is not installed
106 # by defualt. If the user attepts to use it we warn them that it's a
107 # bit dodgy, then tell them how to install it. If they can do that
108 # and get all the files in place, then we let them proceed.
109
110 print $out "Checking Phind phrase browser requirements...\n";
111
112 # Make sure we're not in windows
113 if ($ENV{'GSDLOS'} =~ /windows/i) {
114 print STDERR "Sorry - Phind currently only works under Unix";
115 exit(1);
116 }
117
118 # Ensure the Phind generate scripts are in place
119 my $file1 = &util::filename_cat($ENV{'GSDLHOME'}, "bin", $ENV{'GSDLOS'}, "suffix");
120 my $src = &util::filename_cat($ENV{'GSDLHOME'}, "src", "phind", "generate");
121
122 if (!(-e $file1)) {
123 print STDERR "The phind \"suffix\" program is not installed. ";
124 print STDERR "To install it, change to the directory\n";
125 print STDERR " $src\n";
126 print STDERR "and type \"make install-phind\".\n\n";
127 exit(1);
128 }
129
130 # Ensure the Phind CGI script is in place
131 $file1 = &util::filename_cat($ENV{'GSDLHOME'}, "cgi-bin", "phindcgi");
132 $src = &util::filename_cat($ENV{'GSDLHOME'}, "src", "phind", "host");
133
134 if (!(-e $file1)) {
135 print STDERR "The phind CGI program is not installed. ";
136 print STDERR "To install it, change to the directory\n";
137 print STDERR " $src\n";
138 print STDERR "and type \"make install-phind\".\n\n";
139 exit(1);
140 }
141
142 # Ensure the Phind Java applet is in place
143 $src = &util::filename_cat($ENV{'GSDLHOME'}, "src", "phind", "client");
144 $file1 = &util::filename_cat($src, "Phind.class");
145
146 if (!(-e $file1)) {
147 print STDERR "The phind Java classes are not compiled. ";
148 print STDERR "To compile them, change to the directory\n";
149 print STDERR " $src\n";
150 print STDERR "and use your Java compiler to compile Phind.java.\n";
151 print STDERR "(if you have Java 1.2 installed, type \"javac Phind.java\")\n\n";
152 exit(1);
153 }
154
155 # Parse classifier arguments
156 my $builddir = "";
157 my $phinddir = "";
158 if (!parsargv::parse(\@_,
159 q^text/.*/section:Title,section:text^, \$self->{'indexes'},
160 q^title/.*/Title^, \$self->{'titlefield'},
161 q^button/.*/Phrase^, \$self->{'buttonname'},
162 q^language/.*/en^, \$language,
163 q^builddir/.*/^, \$builddir,
164 q^savephrases/\d/0^, \$self->{'savephrases'},
165 q^suffixmode/\d/1^, \$self->{'suffixmode'},
166 q^thesaurus/.*/^, \$self->{'thesaurus'},
167 q^untidy^, \$self->{'untidy'},
168 "allow_extra_options")) {
169
170 print STDERR "\nIncorrect options passed to $class, check your collect.cfg file\n";
171 &print_usage();
172 die "\n";
173 }
174
175 # classifier information
176 $self->{'collection'} = $ENV{'GSDLCOLLECTION'};
177
178 # limit languages
179 $self->{'language_exp'} = $language;
180
181 # collection directories
182 $self->{'collectiondir'} = $ENV{'GSDLCOLLECTDIR'};
183 if (!$builddir) {
184 $builddir = &util::filename_cat($ENV{'GSDLCOLLECTDIR'}, "building");
185 }
186 $self->{'builddir'} = $builddir;
187 $self->{'phinddir'} = &util::filename_cat($builddir, "phind");
188
189 return bless $self, $class;
190}
191
192
193# Initialise the phind classifier
194
195sub init {
196 my $self = shift (@_);
197
198 # ensure we have a build directory
199 my $builddir = $self->{'builddir'};
200 die unless (-e "$builddir");
201
202 # create phind directory
203 my $phinddir = $self->{'phinddir'};
204 if (-e "$phinddir") {
205 &util::rm_r("$phinddir");
206 }
207 &util::mk_dir("$phinddir");
208
209 # open filehandles for documents and text
210 my $clausefile = &util::filename_cat("$phinddir", "clauses");
211 &util::rm($clausefile) if (-e $clausefile);
212 open(TEXT, ">$clausefile") || die "Cannot open $clausefile: $!";
213 $self->{'txthandle'} = TEXT;
214
215 my $docfile = &util::filename_cat("$phinddir", "docs.txt");
216 &util::rm($docfile) if (-e $docfile);
217 open(DOCS, ">$docfile") || die "Cannot open $docfile: $!";
218 $self->{'dochandle'} = DOCS;
219
220}
221
222
223# Classify each document.
224#
225# Each document is passed here in turn. The classifier extracts the
226# text of each and stores it in the clauses file. Document details are
227# stored in the docs.txt file.
228
229sub classify {
230 my $self = shift (@_);
231 my ($doc_obj) = @_;
232
233 my $verbosity = $self->{'verbosity'};
234 my $top_section = $doc_obj->get_top_section();
235
236 my $titlefield = $self->{'titlefield'};
237
238 my $title = $doc_obj->get_metadata_element ($top_section, $titlefield);
239 print "process: $title\n" if ($verbosity > 2);
240
241 # Only consider the file if it is in the correct language
242 my $doclanguage = $doc_obj->get_metadata_element ($top_section, "Language");
243 my $phrlanguage = $self->{'language_exp'};
244 return if ($doclanguage && ($doclanguage !~ /$phrlanguage/i));
245
246 # record this file
247 my $total++;
248 print "file $total: $file\n" if ($self->{'$verbosity'});
249
250
251 # Store document details
252 my $OID = $doc_obj->get_OID();
253 $OID = "NULL" unless defined $OID;
254 my $dochandle = $self->{'dochandle'};
255 print $dochandle "<Document>\t$OID\t$title\n";
256
257 # Store the text occuring in this object
258
259 # output the document delimiter
260 my $txthandle = $self->{'txthandle'};
261 print $txthandle "$doclimit\n";
262
263 # iterarate over the required indexes and store their text
264 my $indexes = $self->{'indexes'};
265 my $text = "";
266 my ($part, $level, $field, $section, $data, $dataref);
267
268 foreach $part (split(/,/, $indexes)) {
269
270 # Each field has a level and a data element ((e.g. document:Title)
271 ($level, $field) = split(/:/, $part);
272 die unless ($level && $field);
273
274 # Extract the text from every section
275 # (In phind, document:text and section:text are equivalent)
276 if ($field eq "text") {
277 $data = "";
278 $section = $doc_obj->get_top_section();
279 while (defined($section)) {
280 $data .= $doc_obj->get_text($section) . "\n";
281 $section = $doc_obj->get_next_section($section);
282 }
283 $text .= convert_gml_to_tokens($phrlanguage, $data) . "\n";
284 }
285
286 # Extract a metadata field from a document
287 # (If ther eis more than one element of the given type, get them all.)
288 elsif ($level eq "document") {
289 $dataref = $doc_obj->get_metadata($doc_obj->get_top_section(), $field);
290 foreach $data ($$dataref) {
291 $text .= convert_gml_to_tokens($phrlanguage, $data) . "\n";
292 }
293 }
294
295 # Extract metadata from every section in a document
296 elsif ($level eq "section") {
297 $data = "";
298 $section = $doc_obj->get_top_section();
299 while (defined($section)) {
300 $dataref .= $doc_obj->get_metadata($section, $field);
301 $data .= join("\n", $$dataref) . "\n";
302 $section = $doc_obj->get_next_section($section);
303 }
304 $text .= convert_gml_to_tokens($phrlanguage, $data) . "\n";
305 }
306
307 # Some sort of specification which I don't understand
308 else {
309 die "Unknown level ($level) in phind index ($part)\n";
310 }
311
312 }
313
314 # output the text
315 $text =~ tr/\n//s;
316 print $txthandle "$text";
317}
318
319
320# Construct the classifier from the information already gathered
321#
322# When get_classify_info is called, the clauses and docs.txt files have
323# already been constructed in the phind directory. This function will
324# translate them into compressed, indexed MGPP files that can be read by
325# the phindcgi script. It will also register our classifier so that it
326# shows up in the navigation bar.
327
328sub get_classify_info {
329 my $self = shift (@_);
330
331 my $verbosity = $self->{'verbosity'};
332 my $out = $self->{'outhandle'};
333 my $phinddir = $self->{'phinddir'};
334
335 if ($verbosity) {
336 print $out "\n*** phind.pm generating indexes for ", $self->{'indexes'}, "\n";
337 }
338
339 # Construct phind indexes
340 my $suffixmode = $self->{'suffixmode'};
341 my ($command, $status);
342
343 # Generate the vocabulary, symbol statistics, and numbers file
344 # from the clauses file
345 print $out "\nExtracting vocabulary and statistics\n" if $verbosity;
346 &extract_vocabulary($self);
347
348 # Use the suffix program to generate the phind/phrases file
349 print $out "\nExtracting phrases from processed text (with suffix)\n" if $verbosity;
350 &execute("suffix $phinddir $suffixmode $verbosity", $verbosity, $out);
351
352 # Create the phrase file and put phrase numbers in phind/phrases
353 print $out "\nSorting and Renumbering phrases for input to mgpp\n" if $verbosity;
354 &renumber_phrases($self);
355
356 # Create the mg phrase database
357 my $mgpp = &util::filename_cat($ENV{'GSDLHOME'}, "src", "mgpp");
358 my $mg_passes = &util::filename_cat($mgpp, "text", "mg_passes");
359 my $mg_compression_dict = &util::filename_cat($mgpp, "text", "mg_compression_dict");
360
361 my $mg_perf_hash_build = &util::filename_cat($mgpp, "text", "mg_perf_hash_build");
362 my $mg_weights_build = &util::filename_cat($mgpp, "text", "mg_weights_build");
363 my $mg_invf_dict = &util::filename_cat($mgpp, "text", "mg_invf_dict");
364 my $mg_stem_idx = &util::filename_cat($mgpp, "text", "mg_stem_idx");
365
366 print $out "\nCreating phrase databases\n";
367 my $mg_input = &util::filename_cat($phinddir, "pdata.txt");
368 my $mg_stem = "pdata";
369
370 &execute("$mg_passes -d $phinddir -f $mg_stem -T1 $mg_input", $verbosity, $out);
371 &execute("$mg_compression_dict -d $phinddir -f $mg_stem", $verbosity, $out);
372 &execute("$mg_passes -d $phinddir -f $mg_stem -T2 $mg_input", $verbosity, $out);
373
374 # create the mg index of words
375 print $out "\nCreating word-level search indexes\n";
376 $mg_input = &util::filename_cat($phinddir, "pword.txt");
377 $mg_stem = "pword";
378
379 &execute("$mg_passes -d $phinddir -f $mg_stem -T1 -I1 $mg_input", $verbosity, $out);
380 &execute("$mg_compression_dict -d $phinddir -f $mg_stem", $verbosity, $out);
381 &execute("$mg_perf_hash_build -d $phinddir -f $mg_stem", $verbosity, $out);
382 &execute("$mg_passes -d $phinddir -f $mg_stem -T2 -I2 $mg_input", $verbosity, $out);
383 &execute("$mg_weights_build -d $phinddir -f $mg_stem", $verbosity, $out);
384 &execute("$mg_invf_dict -d $phinddir -f $mg_stem", $verbosity, $out);
385
386 &execute("$mg_stem_idx -d $phinddir -f $mg_stem -s 1", $verbosity, $out);
387 &execute("$mg_stem_idx -d $phinddir -f $mg_stem -s 2", $verbosity, $out);
388 &execute("$mg_stem_idx -d $phinddir -f $mg_stem -s 3", $verbosity, $out);
389
390 # create the mg document information database
391 print $out "\nCreating document information databases\n";
392 $mg_input = &util::filename_cat($phinddir, "docs.txt");
393 $mg_stem = "docs";
394
395 &execute("$mg_passes -d $phinddir -f $mg_stem -T1 $mg_input", $verbosity, $out);
396 &execute("$mg_compression_dict -d $phinddir -f $mg_stem", $verbosity, $out);
397 &execute("$mg_passes -d $phinddir -f $mg_stem -T2 $mg_input", $verbosity, $out);
398
399
400 # Tidy up stray files
401 if (!$self->{'untidy'}) {
402 print $out "\nCleaning up\n" if ($verbosity > 2);
403 &util::rm("$phinddir/clauses", "$phinddir/clauses.numbers",
404 "$phinddir/clauses.vocab", "$phinddir/clauses.stats",
405 "$phinddir/phrases", "$phinddir/phrases.3", "$phinddir/docs.txt",
406 "$phinddir/pdata.txt", "$phinddir/pword.txt");
407 my $outfile = 1;
408 while (-e "$phinddir/outPhrase.$outfile") {
409 &util::rm("$phinddir/outPhrase.$outfile");
410 $outfile++;
411 }
412 }
413
414
415 # Insert the classifier into.... what?
416 my $collection = $self->{'collection'};
417 my $url = "library?a=p&p=phind&c=$collection";
418
419 my %classifyinfo = ('thistype'=>'Invisible',
420 'childtype'=>'Phind',
421 'Title'=>$self->{'buttonname'},
422 'contains'=>[]);
423
424 push (@{$classifyinfo{'contains'}}, {'OID'=>$url});
425 return \%classifyinfo;
426}
427
428
429
430sub convert_gml_to_tokens {
431
432 my ($language_exp, $text) = @_;
433
434 if ($language_exp =~ /en/) {
435 return &convert_gml_to_tokens_EN($text);
436 }
437
438 # FIRST, remove GML tags
439 $_ = $text;
440
441 # Replace all whitespace with a simple space
442 s/\s+/ /gso;
443
444 # Remove everything that is in a tag
445 s/\s*<p>\s*/ PARAGRAPHBREAK /isgo;
446 s/\s*<br>\s*/ LINEBREAK /isgo;
447 s/<[^>]*>/ /sgo;
448
449 # Now we have the text, but it may contain HTML
450 # elements coded as &gt; etc. Remove these tags.
451 s/&lt;/</sgo;
452 s/&gt;/>/sgo;
453
454 s/\s+/ /sgo;
455 s/\s*<p>\s*/ PARAGRAPHBREAK /isgo;
456 s/\s*<br>\s*/ LINEBREAK /isgo;
457 s/<[^>]*>/ /sgo;
458
459 # remove &amp; and other miscellaneous markup tags
460 s/&amp;/&/sgo;
461 s/&lt;/</sgo;
462 s/&gt;/>/sgo;
463 s/&amp;/&/sgo;
464
465 # replace<p> and <br> placeholders with carriage returns
466 s/PARAGRAPHBREAK/\n/sgo;
467 s/LINEBREAK/\n/sgo;
468
469
470 s/&([^;]+);/&unicode::ascii2utf8(\&ghtml::getcharequiv($1,0))/gse;
471
472
473 # Convert the remaining text to "clause format",
474
475 # This means removing all excess punctuation and garbage text,
476 # normalising valid punctuation to fullstops and commas,
477 # then putting one clause on each line.
478
479 # Insert newline when the end of a sentence is detected
480 # (delimter is: "[\.\?\!]\s")
481 s/\s*[\.\?\!]\s+/\n/go;
482
483 # split numbers after four digits
484 s/(\d\d\d\d)/$1 /go;
485
486 # remove extra whitespace
487 s/ +/ /sgo;
488 s/^\s+//mgo;
489 s/\s*$/\n/mgo;
490
491 # remove lines that contain one word or less
492 s/^\S*$//mgo;
493 s/^\s*$//mgo;
494 tr/\n//s;
495
496 return $_;
497}
498
499# A version of convert_gml_to_tokens that is fine-tuned to the English language.
500
501sub convert_gml_to_tokens_EN {
502 $_ = shift @_;
503
504 # FIRST, remove GML tags
505
506 # Replace all whitespace with a simple space
507 s/\s+/ /gs;
508
509 # Remove everything that is in a tag
510 s/\s*<p>\s*/ PARAGRAPHBREAK /isg;
511 s/\s*<br>\s*/ LINEBREAK /isg;
512 s/<[^>]*>/ /sg;
513
514 # Now we have the text, but it may contain HTML
515 # elements coded as &gt; etc. Remove these tags.
516 s/&lt;/</sg;
517 s/&gt;/>/sg;
518
519 s/\s+/ /sg;
520 s/\s*<p>\s*/ PARAGRAPHBREAK /isg;
521 s/\s*<br>\s*/ LINEBREAK /isg;
522 s/<[^>]*>/ /sg;
523
524 # remove &amp; and other miscellaneous markup tags
525 s/&amp;/&/sg;
526 s/&lt;/</sg;
527 s/&gt;/>/sg;
528 s/&amp;/&/sg;
529
530 # replace<p> and <br> placeholders with carriage returns
531 s/PARAGRAPHBREAK/\n/sg;
532 s/LINEBREAK/\n/sg;
533
534
535 # Exceptional punctuation
536 #
537 # We make special cases of some punctuation
538
539 # remove any apostrophe that indicates omitted letters
540 s/(\w+)\'(\w*\s)/ $1$2 /g;
541
542 # remove period that appears in a person's initals
543 s/\s([A-Z])\./ $1 /g;
544
545 # replace hyphens in hypheanted words and names with a space
546 s/([A-Za-z])-\s*([A-Za-z])/$1 $2/g;
547
548 # Convert the remaining text to "clause format",
549 # This means removing all excess punctuation and garbage text,
550 # normalising valid punctuation to fullstops and commas,
551 # then putting one cluse on each line.
552
553 # Insert newline when the end of a sentence is detected
554 # (delimter is: "[\.\?\!]\s")
555 s/\s*[\.\?\!]\s+/\n/g;
556
557 # split numbers after four digits
558 s/(\d\d\d\d)/$1 /g;
559
560 # split words after 32 characters
561
562 # squash repeated punctuation
563 tr/A-Za-z0-9 //cs;
564
565 # save email addresses
566 # s/\w+@\w+\.[\w\.]+/EMAIL/g;
567
568 # normalise clause breaks (mostly punctuation symbols) to commas
569 s/[^A-Za-z0-9 \n]+/ , /g;
570
571 # Remove repeated commas, and replace with newline
572 s/\s*,[, ]+/\n/g;
573
574 # remove extra whitespace
575 s/ +/ /sg;
576 s/^\s+//mg;
577 s/\s*$/\n/mg;
578
579 # remove lines that contain one word or less
580 s/^\w*$//mg;
581 s/^\s*$//mg;
582 tr/\n//s;
583
584 return $_;
585
586}
587
588
589
590# Execute a system command
591
592sub execute {
593 my ($command, $verbosity, $outhandle) = @_;
594 print $outhandle "Executing: $command\n" if ($verbosity > 2);
595 my $status = system($command);
596 if ($status != 0) {
597 print STDERR "phind - Error executing $command: $!\n";
598 exit($status);
599 }
600}
601
602
603# Generate the vocabulary, symbol statistics, and numbers file from the
604# clauses file. This is legacy code, so is a bit messy and probably wont
605# run under windows.
606
607sub extract_vocabulary {
608 my ($self) = @_;
609
610 my $verbosity = $self->{'verbosity'};
611 my $out = $self->{'outhandle'};
612
613 my $collectiondir = $self->{'collectiondir'};
614 my $phinddir = $self->{'phinddir'};
615
616 my $language_exp = $self->{'language_exp'};
617
618 my ($w, $l, $line, $word);
619
620 my ($first_delimiter, $last_delimiter,
621 $first_stopword, $last_stopword,
622 $first_extractword, $last_extractword,
623 $first_contentword, $last_contentword,
624 $phrasedelimiter);
625
626 my $thesaurus = $self->{'thesaurus'};
627 my ($thesaurus_links, $thesaurus_terms,
628 %thesaurus, $first_thesaurusword, $last_thesaurusword);
629
630 my %symbol;
631 my (%freq);
632
633 print $out "Calculating vocabulary\n" if ($verbosity > 1);
634
635 # Read and store the stopwords
636 my $stopdir = &util::filename_cat($ENV{'GSDLHOME'}, "etc", "stopwords");
637 my $stopword_files = ();
638 my ($language, $language_dir, $file, $file_name);
639 my %stopwords;
640
641 # Examine each directory in the stopword directory
642 opendir(STOPDIR, $stopdir);
643 foreach $language (readdir STOPDIR) {
644
645 # Ignore entries that do not match the classifier's language
646 next unless ($language =~ /$language_exp/);
647 $language_dir = &util::filename_cat($stopdir, $language);
648 next unless (-d "$language_dir");
649
650 opendir(LANGDIR, $language_dir);
651 foreach $file (readdir LANGDIR) {
652
653 # Ignore entries that are not stopword files
654 next unless ($file =~ /sw$/);
655 $file_name = &util::filename_cat($language_dir, $file);
656 next unless (-f "$file_name");
657
658 # Read the stopwords
659 open(STOPFILE, "<$file_name");
660 while (<STOPFILE>) {
661 s/^\s+//;
662 s/\s.*//;
663 $word = $_;
664 $l = lc($word);
665 $stopwords{$l} = $word;
666 }
667 close STOPFILE;
668
669 }
670 }
671
672 # Read thesaurus information
673 if ($thesaurus) {
674
675 # link file exists
676 $thesaurus_links = &util::filename_cat($collectiondir, "etc", "$thesaurus.lnk");
677 die "Cannot find thesaurus link file" unless (-e "$thesaurus_links");
678
679 # ensure term file exists in the correct language
680 if ($language_exp =~ /^([a-z][a-z])/) {
681 $language = $1;
682 } else {
683 $language = 'en';
684 }
685 $thesaurus_terms = &util::filename_cat($collectiondir, "etc", "$thesaurus.$language");
686 die "Cannot find thesaurus term file" unless (-e "$thesaurus_terms");
687
688
689 # Read the thesaurus terms
690 open(TH, "<$thesaurus_terms");
691 while(<TH>) {
692 s/^\d+ //;
693 s/\(.*\)//;
694 foreach $w (split(/\s+/, $_)) {
695 $thesaurus{lc($w)} = $w;
696 }
697 }
698 close TH;
699 }
700
701 # Read words in the text and count occurences
702 open(TXT, "<$phinddir/clauses");
703 my @words;
704
705 while(<TXT>) {
706 $line = $_;
707 next unless ($line =~ /./);
708
709 @words = split(/\s+/, $line);
710 foreach $w (@words) {
711 $l = lc($w);
712 $w = $l if ((defined $stopwords{$l}) || (defined $thesaurus{$l}));
713 $freq{$w}++;
714 }
715 $freq{$senlimit}++;
716 }
717
718 # Calculate the "best" form of each word
719 my (%bestform, %totalfreq, %bestfreq);
720
721 foreach $w (sort (keys %freq)) {
722 $l = lc($w);
723
724 # totalfreq is the number of times a term appears in any form
725 $totalfreq{$l} += $freq{$w};
726
727 if (defined $stopwords{$l}) {
728 $bestform{$l} = $stopwords{$l};
729
730 } elsif (defined $thesaurus{$l}) {
731 $bestform{$l} = $thesaurus{$l};
732
733 } elsif (!$bestform{$l} || ($freq{$w} > $bestfreq{$l})) {
734 $bestfreq{$l} = $freq{$w};
735 $bestform{$l} = $w;
736 }
737 }
738
739 undef %freq;
740 undef %bestfreq;
741
742
743 # Assign symbol numbers to tokens
744 my $nextsymbol = 1;
745 my (@vocab);
746
747 # Delimiters
748 $first_delimiter = 1;
749
750 foreach $word (@delimiters) {
751
752 $word = lc($word);
753 $bestform{$word} = uc($word);
754 $vocab[$nextsymbol] = $word;
755 $symbol{$word} = $nextsymbol;
756 $nextsymbol++;
757 }
758 $last_delimiter = $nextsymbol - 1;
759
760 # Stopwords
761 $first_stopword = $nextsymbol;
762
763 foreach my $word (sort keys %stopwords) {
764
765 # don't incluse stopword unless it occurs in the text
766 $word = lc($word);
767 next unless ($totalfreq{$word});
768 next if ($symbol{$word});
769
770 $vocab[$nextsymbol] = $word;
771 $symbol{$word} = $nextsymbol;
772 $nextsymbol++;
773 }
774 $last_stopword = $nextsymbol - 1;
775 $first_contentword = $nextsymbol;
776
777 # Thesaurus terms
778 if ($thesaurus) {
779 $first_thesaurusword = $nextsymbol;
780
781 foreach my $word (sort keys %thesaurus) {
782
783 $word = lc($word);
784 next if ($symbol{$word});
785 $bestform{$word} = $thesaurus{$word};
786
787 $vocab[$nextsymbol] = $word;
788 $symbol{$word} = $nextsymbol;
789 $nextsymbol++;
790
791 }
792 $last_thesaurusword = $nextsymbol - 1;
793 }
794
795 # Other content words
796 $first_extractword = $nextsymbol;
797
798 foreach my $word (sort (keys %bestform)) {
799
800 next if ($symbol{$word});
801
802 $vocab[$nextsymbol] = $word;
803 $symbol{$word} = $nextsymbol;
804 $nextsymbol++;
805 }
806 $last_extractword = $nextsymbol - 1;
807 $last_contentword = $nextsymbol - 1;
808
809
810 # Outut the words
811 print $out "Saving vocabulary in $phinddir/clauses.vocab\n" if ($verbosity > 1);
812 open(VOC, ">$phinddir/clauses.vocab");
813
814 for (my $i = 1; $i < $nextsymbol; $i++) {
815 $w = $vocab[$i];
816
817 print VOC "$bestform{$w}\n";
818 $totalfreq{$w} = 0 unless ($totalfreq{$w});
819 }
820 close VOC;
821
822
823 # Create statistics file
824 # Output statistics about the vocablary
825 print $out "Saving statistics in $phinddir/clauses.stats\n" if ($verbosity > 1);
826 &util::rm("$phinddir/clauses.stats") if (-e "$phinddir/clauses.stats");
827
828 open(STAT, ">$phinddir/clauses.stats")
829 || die "Cannot open $phinddir/clauses.stats: $!";
830
831 print STAT "first_delimiter $first_delimiter\n";
832 print STAT "last_delimiter $last_delimiter\n";
833 print STAT "first_stopword $first_stopword\n";
834 print STAT "last_stopword $last_stopword\n";
835 if ($thesaurus) {
836 print STAT "first_thesaurusword $first_thesaurusword\n";
837 print STAT "last_thesaurusword $last_thesaurusword\n";
838 }
839 print STAT "first_extractword $first_extractword\n";
840 print STAT "last_extractword $last_extractword\n";
841 print STAT "first_contentword $first_contentword\n";
842 print STAT "last_contentword $last_contentword\n";
843 print STAT "first_symbol $first_delimiter\n";
844 print STAT "last_symbol $last_contentword\n";
845 print STAT "first_word $first_stopword\n";
846 print STAT "last_word $last_contentword\n";
847 close STAT;
848
849 undef @vocab;
850
851
852 # Create numbers file
853 # Save text as symbol numbers
854 print $out "Saving text as numbers in $phinddir/clauses.numbers\n" if ($verbosity > 1);
855
856 open(TXT, "<$phinddir/clauses");
857 open(NUM, ">$phinddir/clauses.numbers");
858
859 $phrasedelimiter = $symbol{lc($senlimit)};
860 print NUM "$symbol{lc($colstart)}\n";
861
862 # set up the special symbols that delimit documents and sentences
863 while(<TXT>) {
864
865 # split sentence into a list of tokens
866 $line = $_;
867 next unless ($line =~ /./);
868 @words = split(/\s+/, $line);
869
870 # output one token at a time
871 foreach $word (@words) {
872 $word = lc($word);
873 print NUM "$symbol{$word}\n";
874 }
875
876 # output phrase delimiter
877 print NUM "$phrasedelimiter\n";
878 }
879
880 print NUM "$symbol{lc($colend)}\n";
881 close NUM;
882
883 # Save thesaurus data in one convienient file
884 if ($thesaurus) {
885
886 my $thesaurusfile = &util::filename_cat($phinddir, "$thesaurus.numbers");
887
888
889 print $out "Saving thesaurus as numbers in $thesaurusfile\n"
890 if ($verbosity > 1);
891
892 # Read the thesaurus terms
893 my ($num, $text, %thes_symbols);
894
895 open(TH, "<$thesaurus_terms");
896 while(<TH>) {
897 chomp;
898 @words = split(/\s+/, $_);
899 $num = shift @words;
900 $text = "";
901
902 # translate words into symbol numbers
903 foreach $word (@words) {
904 $word = lc($word);
905 if ($symbol{$word}) {
906 $text .= "s$symbol{$word} ";
907 } elsif ($verbosity) {
908 print $out "phind: No thesaurus symbol, ignoring \"$word\"\n";
909 }
910 }
911 $text =~ s/ $//;
912 $thes_symbols{$num} = $text;
913 }
914 close TH;
915
916 # Read the thesaurus links and write the corresponding data
917 open(TH, "<$thesaurus_links");
918 open(THOUT, ">$thesaurusfile");
919
920 while(<TH>) {
921 chomp;
922 ($num, $text) = split(/:/, $_);
923
924 if (defined($thes_symbols{$num})) {
925 print THOUT "$num:$thes_symbols{$num}:$text\n";
926 } else {
927 print THOUT "$num:untranslated:$text\n";
928 }
929 }
930 close TH;
931 close THOUT;
932 }
933
934
935
936
937}
938
939
940# renumber_phrases
941#
942# Prepare the phrases file to be input to mgpp. The biggest problem is
943# reconciling the phrase identifiers used by the suffix program (which
944# we'll call suffix-id numbers) with the numbers used in the thesaurus
945# (theesaurus-id) to create a ciommon set of phind id numbers (phind-id).
946# Phind-id numbers must be sorted by frequency of occurance.
947#
948# Start creating a set of phind-id numbers from the sorted suffix-id
949# numbers and (if required) the thesaurus-id numbers. Then add any other
950# phrases occuring in the thesaurus.
951#
952# The last thing we have to do is restore the vocabulary information to the
953# phrase file so that the phrases are stored as words, not as symbol
954# numbers.
955
956# The original phrases file looks something like this:
957# 159396-1:s5175:4:1:116149-2:3:d2240,2;d2253;d2254
958# 159409-1:s5263:6:1:159410-2:6:d2122;d2128;d2129;d2130;d2215;d2380
959# 159415-1:s5267:9:1:159418-2:8:d3,2;d632;d633;d668;d1934;d2010;d2281;d2374
960# 159426-1:s5273:5:2:159429-2,115168-17:5:d252;d815;d938;d939;d2361
961
962
963sub renumber_phrases {
964 my ($self) = @_;
965
966 renumber_suffix_data($self);
967 renumber_thesaurus_data($self);
968 restore_vocabulary_data($self);
969
970}
971
972
973
974# renumber_suffix_data
975#
976# Translate phrases file to phrases.2 using phind keys instead
977# of suffix keys and sorting the expansion data.
978
979sub renumber_suffix_data {
980 my ($self) = @_;
981
982 my $verbosity = $self->{'verbosity'};
983 my $out = $self->{'outhandle'};
984 print $out "Translate phrases: suffix-ids become phind-id's\n"
985 if ($verbosity);
986
987 my $phinddir = $self->{'phinddir'};
988 my $infile = &util::filename_cat($phinddir, 'phrases');
989 my $outfile = &util::filename_cat($phinddir, 'phrases.2');
990
991 # Read the phrase file. Calculate initial set of phind-id
992 # numbers and store (suffixid -> frequency) relation.
993
994 my %suffixtophind;
995 my @phindfrequency;
996 my (@fields, $suffixid);
997 my $nextphind = 1;
998
999 open(IN, "<$infile");
1000 while(<IN>) {
1001
1002 chomp;
1003 @fields = split(/:/, $_);
1004
1005 # get next suffixid and phindid
1006 $suffixid = shift @fields;
1007 $suffixtophind{$suffixid} = $nextphind;
1008
1009 # store total frequency
1010 shift @fields;
1011 $totalfrequency[$nextphind] = shift @fields;
1012
1013 $nextphind++;
1014 }
1015 close IN;
1016
1017
1018 # Translate phrases file to phrases.2. Use phind keys (not suffix
1019 # keys), sort expansion and document occurance data in order of
1020 # descending frequency..
1021 open(IN, "<$infile");
1022 open(OUT, ">$outfile");
1023
1024 my ($phindid, $text, $tf, $countexp, $expansions, $countdocs, $documents);
1025 my (@documwents, @newexp, $k, $n);
1026 my $linenumber = 0;
1027
1028 while(<IN>) {
1029
1030 # read the line
1031 chomp;
1032 @fields = split(/:/, $_);
1033
1034 # get a phrase number for this line
1035 $suffixid = shift @fields;
1036 die unless (defined($suffixtophind{$suffixid}));
1037 $phindid = $suffixtophind{$suffixid};
1038
1039 # get the symbols in the phrase
1040 $text = shift @fields;
1041
1042 # output status information
1043 $linenumber++;
1044 if ($verbosity > 2) {
1045 if ($linenumber % 1000 == 0) {
1046 print $out "line $linenumber:\t$phindid\t$suffixid\t($text)\n";
1047 }
1048 print $out "$num: $key\t($text)\n" if ($verbosity > 3);
1049 }
1050
1051 # get the phrase frequency
1052 $tf = shift @fields;
1053
1054 # get the number of expansions
1055 $countexp = shift @fields;
1056
1057 # get the expansions, convert them into phind-id numbers, and sort them
1058 $expansions = shift @fields;
1059 @newexp = ();
1060 foreach $k (split(/,/, $expansions)) {
1061 die "ERROR - no phindid for: $k" unless (defined($suffixtophind{$k}));
1062 $n = $suffixtophind{$k};
1063 push @newexp, $n;
1064 }
1065 @newexp = sort {$totalfrequency[$b] <=> $totalfrequency[$a]} @newexp;
1066
1067 # get the number of documents
1068 $countdocs = shift @fields;
1069
1070 # get the documents and sort them
1071 $documents = shift @fields;
1072 $documents =~ s/d//g;
1073 @documents = split(/;/, $documents);
1074 @documents = sort by_doc_frequency @documents;
1075
1076 # output the phrase data
1077 print OUT "$phindid:$text:$tf:$countexp:$countdocs:";
1078 print OUT join(",", @newexp), ",:", join(";", @documents), ";\n";
1079
1080 }
1081
1082 close IN;
1083 close OUT;
1084}
1085
1086
1087# renumber_thesaurus_data
1088#
1089# Translate phrases.2 to phrases.3, adding thesaurus data if available.
1090
1091sub renumber_thesaurus_data {
1092 my ($self) = @_;
1093
1094 my $out = $self->{'outhandle'};
1095 my $verbosity = $self->{'verbosity'};
1096 my $thesaurus = $self->{'thesaurus'};
1097
1098 my $phinddir = $self->{'phinddir'};
1099 my $infile = &util::filename_cat($phinddir, "phrases.2");
1100 my $outfile = &util::filename_cat($phinddir, "phrases.3");
1101
1102
1103 # If no thesaurus is defined, simply move the phrases file.
1104 if (!$thesaurus) {
1105 print $out "Translate phrases.2: no thesaurus data\n"
1106 if ($verbosity);
1107 &util::mv($infile, $outfile);
1108 return;
1109 }
1110
1111 print $out "Translate phrases.2: add thesaurus data\n"
1112 if ($verbosity);
1113
1114 # 1.
1115 # Read thesaurus file and store (symbols->thesaurusid) mapping
1116 my $thesaurusfile = &util::filename_cat($phinddir, "$thesaurus.numbers");
1117 my %symbolstothesid;
1118 my (@fields, $thesid, $symbols);
1119
1120 open(TH, "<$thesaurusfile");
1121
1122 while (<TH>) {
1123
1124 chomp;
1125 @fields = split(/:/, $_);
1126
1127 # get id and text
1128 $thesid = shift @fields;
1129 $symbols = shift @fields;
1130 $symbolstothesid{$symbols} = $thesid;
1131 }
1132 close TH;
1133
1134 # 2.
1135 # Read phrases file to find thesaurus entries that already
1136 # have a phindid. Store their phind-ids for later translation.
1137 my %thesaurustophindid;
1138 my ($phindid);
1139
1140 open(IN, "<$infile");
1141
1142 while(<IN>) {
1143
1144 chomp;
1145 @fields = split(/:/, $_);
1146
1147 # phindid and symbols for this line
1148 $phindid = shift @fields;
1149 $symbols = shift @fields;
1150
1151 # do we have a thesaurus id corresponding to this phrase?
1152 if (defined($symbolstothesid{$symbols})) {
1153 $thesid = $symbolstothesid{$symbols};
1154 $thesaurustophindid{$thesid} = $phindid;
1155 }
1156 }
1157 close IN;
1158
1159 undef %symbolstothesid;
1160
1161 # 3.
1162 # Create phind-id numbers for remaining thesaurus entries
1163 my $nextphindid = $phindid + 1;
1164
1165 open(TH, "<$thesaurusfile");
1166 while(<TH>) {
1167
1168 chomp;
1169 @fields = split(/:/, $_);
1170
1171 # read thesaurus-id and ensure it has a corresponding phind-id
1172 $thesid = shift @fields;
1173 if (!defined($thesaurustophindid{$thesid})) {
1174 $thesaurustophindid{$thesid} = $nextphindid;
1175 $nextphindid++;
1176 }
1177 }
1178 close TH;
1179
1180 # 4.
1181 # Translate thesaurus file, replacing thesaurus-id numbers with
1182 # phind-id numbers.
1183 my $newthesaurusfile = &util::filename_cat($phinddir, "$thesaurus.phindid");
1184 my ($relations, $linkcounter, $linktext, $linktype, @linkdata, $link);
1185
1186 open(TH, "<$thesaurusfile");
1187 open(TO, ">$newthesaurusfile");
1188 while(<TH>) {
1189
1190 chomp;
1191 @fields = split(/:/, $_);
1192
1193 # phindid and symbols for this line
1194 ($thesid, $symbols, $relations) = @fields;
1195
1196 die unless ($thesid && $symbols);
1197 die unless $thesaurustophindid{$thesid};
1198 $phindid = $thesaurustophindid{$thesid};
1199
1200 # convert each part of the relation string to use phind-id numbers
1201 $newrelation = "";
1202 $linkcounter = 0;
1203 foreach $linktext (split(/;/, $relations)) {
1204 @linkdata = split(/,/, $linktext);
1205
1206 # remember the linktype (e.g. BT, NT)
1207 $linktype = shift @linkdata;
1208 $newrelation .= "$linktype,";
1209
1210 # convert the link target identfiers
1211 foreach $link (@linkdata) {
1212 die unless (defined($thesaurustophindid{$link}));
1213 $newrelation .= "$thesaurustophindid{$link},";
1214 $linkcounter++;
1215 }
1216 $newrelation =~ s/\,$//;
1217 $newrelation .= ";";
1218 }
1219 $newrelation .= ":";
1220
1221 print TO "$phindid:$symbols:$linkcounter:$newrelation\n";
1222 }
1223 close TH;
1224 close TO;
1225
1226 undef %thesaurustophindid;
1227
1228 # 5.
1229 # Read thesaurus data (in phind-id format) into memory
1230 my %thesaurusdata;
1231
1232 open(TH, "<$newthesaurusfile");
1233 while(<TH>) {
1234 chomp;
1235 ($phindid, $symbols, $linkcounter, $relations) = split(/:/, $_);
1236 die unless ($phindid && $symbols);
1237 $thesaurusdata{$phindid} = "$symbols:$linkcounter:$relations";
1238 }
1239
1240 # 6.
1241 # Add thesaurus data to phrases file
1242 my ($text, $tf, $countexp, $expansions, $countdocs, $documents);
1243 my (@documwents, @newexp, $k, $n);
1244 my $linenumber = 0;
1245
1246 open(IN, "<$infile");
1247 open(OUT, ">$outfile");
1248
1249 # Update existing phrases
1250 while(<IN>) {
1251
1252 chomp;
1253 @fields = split(/:/, $_);
1254
1255 # get data for this line
1256 $phindid = shift @fields;
1257
1258 # output the phrase data, with thesaurus information
1259 print OUT "$phindid:", join(":", @fields);
1260
1261 # add thesaurus data
1262 if (defined($thesaurusdata{$phindid})) {
1263 @fields = split(/:/, $thesaurusdata{$phindid});
1264 shift @fields;
1265 $linkcounter = shift @fields;
1266 $relations = shift @fields;
1267
1268 print OUT ":$linkcounter:$relations";
1269 $thesaurusdata{$phindid} = "";
1270 }
1271 print OUT "\n";
1272 }
1273 close IN;
1274
1275 # Add phrases that aren't already in the file
1276 foreach $phindid (sort numerically keys %thesaurusdata) {
1277 next unless ($thesaurusdata{$phindid});
1278
1279 @fields = split(/:/, $thesaurusdata{$phindid});
1280 $symbols = shift @fields;
1281 $linkcounter = shift @fields;
1282 $relations = shift @fields;
1283
1284 print OUT "$phindid:$symbols:0:0:0:::$linkcounter:$relations\n";
1285 }
1286 close OUT;
1287
1288}
1289
1290# restore_vocabulary_data
1291#
1292# Read phrases.3 and restore vocabulary information. Then write
1293# this data to the MGPP input files (pwrod.txt and pdata.txt) and
1294# (if requested) to the saved phrases file.
1295
1296sub restore_vocabulary_data {
1297 my ($self) = @_;
1298
1299 my $out = $self->{'outhandle'};
1300 my $verbosity = $self->{'verbosity'};
1301 print $out "Translate phrases.3: restore vocabulary\n" if ($verbosity);
1302
1303 my $phinddir = $self->{'phinddir'};
1304 my $infile = &util::filename_cat($phinddir, 'phrases.3');
1305 my $vocabfile = &util::filename_cat($phinddir, 'clauses.vocab');
1306 my $datafile = &util::filename_cat($phinddir, 'pdata.txt');
1307 my $wordfile = &util::filename_cat($phinddir, 'pword.txt');
1308
1309 my $savephrases = $self->{'savephrases'};
1310
1311 # 1.
1312 # Read the vocabulary file
1313 open(V, "<$vocabfile")
1314 || die "Cannot open $vocabfile: $!";
1315 my @symbol;
1316 my $i = 1;
1317 while(<V>) {
1318 chomp;
1319 $symbol[$i++] = $_;
1320 }
1321
1322
1323 # 2.
1324 # Translate phrases.3 to MGPP input files
1325 my ($key, $text, $word);
1326 my @fields;
1327 my $linenumber = 0;
1328
1329 open(IN, "<$infile");
1330 open(DATA, ">$datafile");
1331 open(WORD, ">$wordfile");
1332
1333 # Save the phrases in a separate text file
1334 if ($savephrases) {
1335 print $out "Saving phrases in $savephrases\n" if ($verbosity);
1336 open(SAVE, ">$savephrases");
1337 }
1338
1339 while(<IN>) {
1340
1341 # read the line
1342 chomp;
1343 $line = $_;
1344 @fields = split(/:/, $line);
1345
1346 # get a phrase number for this line
1347 $key = shift @fields;
1348
1349 # restore the text of the phrase
1350 $text = shift @fields;
1351 $text =~ s/s(\d+)/$symbol[$1]/g;
1352 if ($text =~ / /) {
1353 $word = "";
1354 } elsif ($text ne 'untranslated') {
1355 $word = $text;
1356 }
1357
1358 # output the phrase data
1359 print DATA "<Document>";
1360 print DATA "$key:$text:", join(":", @fields), ":\n";
1361
1362 # output the word index search data
1363 print WORD "<Document>$word\n";
1364
1365 # output the phrases to a text file
1366 if ($savephrases) {
1367 print SAVE $fields[0], "\t", $fields[2], "\t", "$text\n";
1368 }
1369 }
1370 close IN;
1371 close WORD;
1372 close DATA;
1373 close SAVE if ($savephrases);
1374
1375}
1376
1377
1378
1379# sort routines used to renumber phrases
1380
1381sub numerically { $a <=> $b }
1382
1383sub by_doc_frequency {
1384 my $fa = 1;
1385 if ($a =~ /,/) {
1386 $fa = $a;
1387 $fa =~ s/\d+,//;
1388 }
1389 my $fb = 1;
1390 if ($b =~ /,/) {
1391 $fb = $b;
1392 $fb =~ s/\d+,//;
1393 }
1394
1395 return ($fb <=> $fa);
1396}
1397
13981;
Note: See TracBrowser for help on using the repository browser.