source: trunk/indexers/lucene-gs/src/org/greenstone/LuceneWrapper/GS2LuceneQuery.java@ 13909

Last change on this file since 13909 was 13909, checked in by kjdon, 17 years ago

check that searcher is not null before closing it, in cleanUp()

  • Property svn:keywords set to Author Date Id Revision
File size: 19.4 KB
Line 
1/**********************************************************************
2 *
3 * GS2LuceneQuery.java
4 *
5 * Copyright 2004 The New Zealand Digital Library Project
6 *
7 * A component of the Greenstone digital library software
8 * from the New Zealand Digital Library Project at the
9 * University of Waikato, New Zealand.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *********************************************************************/
26package org.greenstone.LuceneWrapper;
27
28
29import java.io.*;
30import java.util.*;
31import java.util.regex.*;
32
33import org.apache.lucene.analysis.Analyzer;
34import org.apache.lucene.analysis.standard.StandardAnalyzer;
35import org.apache.lucene.document.Document;
36import org.apache.lucene.index.IndexReader;
37import org.apache.lucene.index.Term;
38import org.apache.lucene.index.TermDocs;
39import org.apache.lucene.queryParser.ParseException;
40import org.apache.lucene.queryParser.QueryParser;
41import org.apache.lucene.search.BooleanQuery.TooManyClauses;
42import org.apache.lucene.search.Filter;
43import org.apache.lucene.search.Hit;
44import org.apache.lucene.search.Hits;
45import org.apache.lucene.search.IndexSearcher;
46import org.apache.lucene.search.Query;
47import org.apache.lucene.search.RangeFilter;
48import org.apache.lucene.search.Searcher;
49import org.apache.lucene.search.ScoreDoc;
50import org.apache.lucene.search.Sort;
51import org.apache.lucene.search.TopFieldDocs;
52
53
54public class GS2LuceneQuery
55{
56
57
58 static private String TEXTFIELD = "TX";
59
60 // Use the standard set of English stop words by default
61 static private String[] stop_words = StandardAnalyzer.STOP_WORDS;
62
63 private String full_indexdir="";
64 private String default_conjunction_operator = "OR";
65 private String fuzziness = null;
66 private String sort_field = null;
67 private Sort sorter=new Sort();
68 private String filter_string = null;
69 private Filter filter = null;
70 private int start_results=1;
71 private int end_results=Integer.MAX_VALUE;
72
73 private QueryParser query_parser = null;
74 private QueryParser query_parser_no_stop_words = null;
75 private Searcher searcher = null;
76 private IndexReader reader = null;
77
78 public GS2LuceneQuery() {
79
80 // Create one query parser with the standard set of stop words, and one with none
81
82 query_parser = new QueryParser(TEXTFIELD, new StandardAnalyzer(stop_words));
83 query_parser_no_stop_words = new QueryParser(TEXTFIELD, new StandardAnalyzer(new String[] { }));
84 }
85
86
87 public boolean initialise() {
88
89 if (full_indexdir==null || full_indexdir.length()==-1){
90 System.out.println("Index directory is not indicated ");
91 return false;
92 }
93 try {
94 searcher = new IndexSearcher(full_indexdir);
95 reader = ((IndexSearcher) searcher).getIndexReader();
96
97 }
98 catch (IOException exception) {
99 exception.printStackTrace();
100 return false;
101 }
102 return true;
103
104 }
105
106 public LuceneQueryResult runQuery(String query_string) {
107
108 if (query_string == null || query_string.equals("")) {
109 System.out.println("The query word is not indicated ");
110 return null;
111 }
112
113 LuceneQueryResult lucene_query_result=new LuceneQueryResult();
114 lucene_query_result.clear();
115
116 try {
117 Query query_including_stop_words = query_parser_no_stop_words.parse(query_string);
118 query_including_stop_words = query_including_stop_words.rewrite(reader);
119
120 Query query = parseQuery(reader, query_parser, query_string, fuzziness);
121 query = query.rewrite(reader);
122
123 // Get the list of expanded query terms and their frequencies
124 // num docs matching, and total frequency
125 HashSet terms = new HashSet();
126 query.extractTerms(terms);
127
128 Iterator iter = terms.iterator();
129 while (iter.hasNext()) {
130
131 Term term = (Term) iter.next();
132
133 // Get the term frequency over all the documents
134 TermDocs term_docs = reader.termDocs(term);
135 int term_freq = term_docs.freq();
136 int match_docs = 0;
137 if (term_freq != 0) match_docs++;
138 while (term_docs.next()) {
139 term_freq += term_docs.freq();
140 if (term_docs.freq()!= 0) {
141 match_docs++;
142 }
143 }
144
145 // Create a term
146 lucene_query_result.addTerm(term.text(), term.field(), match_docs, term_freq);
147 }
148
149 // Get the list of stop words removed from the query
150 HashSet terms_including_stop_words = new HashSet();
151 query_including_stop_words.extractTerms(terms_including_stop_words);
152 Iterator terms_including_stop_words_iter = terms_including_stop_words.iterator();
153 while (terms_including_stop_words_iter.hasNext()) {
154 Term term = (Term) terms_including_stop_words_iter.next();
155 if (!terms.contains(term)) {
156 lucene_query_result.addStopWord(term.text());
157 }
158 }
159
160 // do the query
161 // Simple case for getting all the matching documents
162 if (end_results == Integer.MAX_VALUE) {
163 // Perform the query (filter and sorter may be null)
164 Hits hits = searcher.search(query, filter, sorter);
165 lucene_query_result.setTotalDocs(hits.length());
166
167 // Output the matching documents
168 lucene_query_result.setStartResults(start_results);
169 lucene_query_result.setEndResults(hits.length());
170
171 for (int i = start_results; i <= hits.length(); i++) {
172 Document doc = hits.doc(i - 1);
173 lucene_query_result.addDoc(Long.parseLong(doc.get("nodeID").trim()), hits.score(i-1));
174 }
175 }
176
177 // Slightly more complicated case for returning a subset of the matching documents
178 else {
179 // Perform the query (filter may be null)
180 TopFieldDocs hits = searcher.search(query, filter, end_results, sorter);
181 lucene_query_result.setTotalDocs(hits.totalHits);
182
183 lucene_query_result.setStartResults(start_results);
184 lucene_query_result.setEndResults(end_results < hits.scoreDocs.length ? end_results: hits.scoreDocs.length);
185
186 // Output the matching documents
187 for (int i = start_results; (i <= hits.scoreDocs.length && i <= end_results); i++) {
188 Document doc = reader.document(hits.scoreDocs[i - 1].doc);
189 lucene_query_result.addDoc(Long.parseLong(doc.get("nodeID").trim()), hits.scoreDocs[i-1].score);
190 }
191 }
192 }
193
194 catch (ParseException parse_exception) {
195 lucene_query_result.setError(LuceneQueryResult.PARSE_ERROR);
196 }
197 catch (TooManyClauses too_many_clauses_exception) {
198 lucene_query_result.setError(LuceneQueryResult.TOO_MANY_CLAUSES_ERROR);
199 }
200 catch (IOException exception) {
201 lucene_query_result.setError(LuceneQueryResult.IO_ERROR);
202 exception.printStackTrace();
203 }
204 catch (Exception exception) {
205 lucene_query_result.setError(LuceneQueryResult.OTHER_ERROR);
206 exception.printStackTrace();
207 }
208 return lucene_query_result;
209 }
210
211 public void setDefaultConjunctionOperator(String default_conjunction_operator) {
212 this.default_conjunction_operator = default_conjunction_operator.toUpperCase();
213 if (default_conjunction_operator == "AND") {
214 query_parser.setDefaultOperator(query_parser.AND_OPERATOR);
215 query_parser_no_stop_words.setDefaultOperator(query_parser.AND_OPERATOR);
216 } else { // default is OR
217 query_parser.setDefaultOperator(query_parser.OR_OPERATOR);
218 query_parser_no_stop_words.setDefaultOperator(query_parser.OR_OPERATOR);
219 }
220 }
221
222 public String getDefaultConjunctionOperator() {
223 return this.default_conjunction_operator;
224 }
225
226 public void setEndResults(int end_results) {
227 this.end_results = end_results;
228 }
229 public int getEndResults() {
230 return this.end_results;
231 }
232
233 public void setFilterString(String filter_string) {
234 this.filter_string = filter_string;
235 this.filter = parseFilterString(filter_string);
236 }
237 public String getFilterString() {
238 return this.filter_string ;
239 }
240
241 public Filter getFilter() {
242 return this.filter;
243 }
244
245 public void setIndexDir(String full_indexdir) {
246 this.full_indexdir = full_indexdir;
247 }
248
249 public void setFuzziness(String fuzziness) {
250 this.fuzziness = fuzziness;
251 }
252 public String getFuzziness() {
253 return this.fuzziness;
254 }
255
256 public void setSortField(String sort_field) {
257 this.sort_field = sort_field;
258 if (sort_field == null) {
259 this.sorter = new Sort();
260 } else {
261 this.sorter = new Sort(sort_field);
262 }
263 }
264 public String getSortField() {
265 return this.sort_field;
266 }
267
268 public void setStartResults(int start_results) {
269 if (start_results < 1) {
270 start_results = 1;
271 }
272 this.start_results = start_results;
273 }
274 public int getStartResults() {
275 return this.start_results;
276 }
277
278 public void cleanUp() {
279 try {
280 if (searcher != null) {
281 searcher.close();
282 }
283 } catch (IOException exception) {
284 exception.printStackTrace();
285 }
286 }
287
288 private Query parseQuery(IndexReader reader, QueryParser query_parser, String query_string, String fuzziness)
289 throws java.io.IOException, org.apache.lucene.queryParser.ParseException
290 {
291 // Split query string into the search terms and the filter terms
292 // * The first +(...) term contains the search terms so count
293 // up '(' and stop when we finish matching ')'
294 int offset = 0;
295 int paren_count = 0;
296 boolean seen_paren = false;
297 while (offset < query_string.length() && (!seen_paren || paren_count > 0)) {
298 if (query_string.charAt(offset) == '(') {
299 paren_count++;
300 seen_paren = true;
301 }
302 if (query_string.charAt(offset) == ')') {
303 paren_count--;
304 }
305 offset++;
306 }
307 String query_prefix = query_string.substring(0, offset);
308 String query_suffix = query_string.substring(offset);
309
310 ///ystem.err.println("Prefix: " + query_prefix);
311 ///ystem.err.println("Suffix: " + query_suffix);
312
313 Query query = query_parser.parse(query_prefix);
314 query = query.rewrite(reader);
315
316 // If this is a fuzzy search, then we need to add the fuzzy
317 // flag to each of the query terms
318 if (fuzziness != null && query.toString().length() > 0) {
319
320 // Revert the query to a string
321 System.err.println("Rewritten query: " + query.toString());
322 // Search through the string for TX:<term> query terms
323 // and append the ~ operator. Note that this search will
324 // not change phrase searches (TX:"<term> <term>") as
325 // fuzzy searching is not possible for these entries.
326 // Yahoo! Time for a state machine!
327 StringBuffer mutable_query_string = new StringBuffer(query.toString());
328 int o = 0; // Offset
329 // 0 = BASE, 1 = SEEN_T, 2 = SEEN_TX, 3 = SEEN_TX:
330 int s = 0; // State
331 while(o < mutable_query_string.length()) {
332 char c = mutable_query_string.charAt(o);
333 if (s == 0 && c == TEXTFIELD.charAt(0)) {
334 ///ystem.err.println("Found T!");
335 s = 1;
336 }
337 else if (s == 1) {
338 if (c == TEXTFIELD.charAt(1)) {
339 ///ystem.err.println("Found X!");
340 s = 2;
341 }
342 else {
343 s = 0; // Reset
344 }
345 }
346 else if (s == 2) {
347 if (c == ':') {
348 ///ystem.err.println("Found TX:!");
349 s = 3;
350 }
351 else {
352 s = 0; // Reset
353 }
354 }
355 else if (s == 3) {
356 // Don't process phrases
357 if (c == '"') {
358 ///ystem.err.println("Stupid phrase...");
359 s = 0; // Reset
360 }
361 // Found the end of the term... add the
362 // fuzzy search indicator
363 // Nor outside the scope of parentheses
364 else if (Character.isWhitespace(c) || c == ')') {
365 ///ystem.err.println("Yahoo! Found fuzzy term.");
366 mutable_query_string.insert(o, '~' + fuzziness);
367 o++;
368 s = 0; // Reset
369 }
370 }
371 o++;
372 }
373 // If we were in the state of looking for the end of a
374 // term - then we just found it!
375 if (s == 3) {
376
377 mutable_query_string.append('~' + fuzziness);
378 }
379 // Reparse the query
380 ///ystem.err.println("Fuzzy query: " + mutable_query_string.toString() + query_suffix);
381 query = query_parser.parse(mutable_query_string.toString() + query_suffix);
382 }
383 else {
384 query = query_parser.parse(query_prefix + query_suffix);
385 }
386
387 return query;
388 }
389
390 private Filter parseFilterString(String filter_string)
391 {
392 Filter result = null;
393 Pattern pattern = Pattern.compile("\\s*\\+(\\w+)\\:([\\{\\[])(\\d+)\\s+TO\\s+(\\d+)([\\}\\]])\\s*");
394 Matcher matcher = pattern.matcher(filter_string);
395 if (matcher.matches()) {
396 String field_name = matcher.group(1);
397 boolean include_lower = matcher.group(2).equals("[");
398 String lower_term = matcher.group(3);
399 String upper_term = matcher.group(4);
400 boolean include_upper = matcher.group(5).equals("]");
401 result = new RangeFilter(field_name, lower_term, upper_term, include_lower, include_upper);
402 }
403 else {
404 System.err.println("Error: Could not understand filter string \"" + filter_string + "\"");
405 }
406 return result;
407 }
408
409
410 /** command line program and auxiliary methods */
411
412 // Fairly self-explanatory I should hope
413 static private boolean query_result_caching_enabled = false;
414
415 static public void main (String args[])
416 {
417 if (args.length == 0) {
418 System.out.println("Usage: GS2LuceneQuery <index directory> [-fuzziness value] [-filter filter_string] [-sort sort_field] [-dco AND|OR] [-startresults number -endresults number] [query]");
419 return;
420 }
421
422 try {
423 String index_directory = args[0];
424
425 GS2LuceneQuery queryer = new GS2LuceneQuery();
426 queryer.setIndexDir(index_directory);
427
428 // Prepare the index cache directory, if query result caching is enabled
429 if (query_result_caching_enabled) {
430 // Make the index cache directory if it doesn't already exist
431 File index_cache_directory = new File(index_directory, "cache");
432 if (!index_cache_directory.exists()) {
433 index_cache_directory.mkdir();
434 }
435
436 // Disable caching if the index cache directory isn't available
437 if (!index_cache_directory.exists() || !index_cache_directory.isDirectory()) {
438 query_result_caching_enabled = false;
439 }
440 }
441
442 String query_string = null;
443
444 // Parse the command-line arguments
445 for (int i = 1; i < args.length; i++) {
446 if (args[i].equals("-sort")) {
447 i++;
448 queryer.setSortField(args[i]);
449 }
450 else if (args[i].equals("-filter")) {
451 i++;
452 queryer.setFilterString(args[i]);
453 }
454 else if (args[i].equals("-dco")) {
455 i++;
456 queryer.setDefaultConjunctionOperator(args[i]);
457 }
458 else if (args[i].equals("-fuzziness")) {
459 i++;
460 queryer.setFuzziness(args[i]);
461 }
462 else if (args[i].equals("-startresults")) {
463 i++;
464 if (args[i].matches("\\d+")) {
465 queryer.setStartResults(Integer.parseInt(args[i]));
466 }
467 }
468 else if (args[i].equals("-endresults")) {
469 i++;
470 if (args[i].matches("\\d+")) {
471 queryer.setEndResults(Integer.parseInt(args[i]));
472 }
473 }
474 else {
475 query_string = args[i];
476 }
477 }
478
479 if (!queryer.initialise()) {
480 return;
481 }
482
483 // The query string has been specified as a command-line argument
484 if (query_string != null) {
485 runQueryCaching(index_directory, queryer, query_string);
486 }
487
488 // Read queries from STDIN
489 else {
490 BufferedReader in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
491 while (true) {
492 // Read the query from STDIN
493 query_string = in.readLine();
494 if (query_string == null || query_string.length() == -1) {
495 break;
496 }
497 runQueryCaching(index_directory, queryer, query_string);
498
499 }
500 }
501 queryer.cleanUp();
502 }
503 catch (IOException exception) {
504 exception.printStackTrace();
505 }
506 }
507
508 private static void runQueryCaching(String index_directory, GS2LuceneQuery queryer, String query_string)
509 throws IOException
510 {
511 StringBuffer query_results_xml = new StringBuffer();
512
513 // Check if this query result has been cached from a previous search (if it's enabled)
514 File query_result_cache_file = null;
515 if (query_result_caching_enabled) {
516 // Generate the cache file name from the query options
517 String query_result_cache_file_name = query_string + "-";
518 String fuzziness = queryer.getFuzziness();
519 query_result_cache_file_name += ((fuzziness != null) ? fuzziness : "") + "-";
520 String filter_string = queryer.getFilterString();
521 query_result_cache_file_name += ((filter_string != null) ? filter_string : "") + "-";
522 String sort_string = queryer.getSortField();
523 query_result_cache_file_name += ((sort_string != null) ? sort_string : "") + "-";
524 String default_conjunction_operator = queryer.getDefaultConjunctionOperator();
525 query_result_cache_file_name += default_conjunction_operator + "-";
526 int start_results = queryer.getStartResults();
527 int end_results = queryer.getEndResults();
528 query_result_cache_file_name += start_results + "-" + end_results;
529 query_result_cache_file_name = fileSafe(query_result_cache_file_name);
530
531 // If the query result cache file exists, just return its contents and we're done
532 File index_cache_directory = new File(index_directory, "cache");
533 query_result_cache_file = new File(index_cache_directory, query_result_cache_file_name);
534 if (query_result_cache_file.exists() && query_result_cache_file.isFile()) {
535 FileInputStream fis = new FileInputStream(query_result_cache_file);
536 InputStreamReader isr = new InputStreamReader(fis, "UTF-8");
537 BufferedReader buffered_reader = new BufferedReader(isr);
538 String line = "";
539 while ((line = buffered_reader.readLine()) != null) {
540 query_results_xml.append(line + "\n");
541 }
542 String query_results_xml_string = query_results_xml.toString();
543 query_results_xml_string = query_results_xml_string.replaceFirst("cached=\"false\"", "cached=\"true\"");
544 System.out.print(query_results_xml_string);
545 return;
546 }
547 }
548
549 // not cached
550 query_results_xml.append("<ResultSet cached=\"false\">\n");
551 query_results_xml.append("<QueryString>" + LuceneQueryResult.xmlSafe(query_string) + "</QueryString>\n");
552 Filter filter = queryer.getFilter();
553 if (filter != null) {
554 query_results_xml.append("<FilterString>" + filter.toString() + "</FilterString>\n");
555 }
556
557 LuceneQueryResult query_result = queryer.runQuery(query_string);
558 if (query_result == null) {
559 System.err.println("Couldn't run the query");
560 return;
561 }
562
563 if (query_result.getError() != LuceneQueryResult.NO_ERROR) {
564 query_results_xml.append("<Error type=\""+query_result.getErrorString()+"\" />\n");
565 } else {
566 query_results_xml.append(query_result.getXMLString());
567 }
568 query_results_xml.append("</ResultSet>\n");
569
570 System.out.print(query_results_xml);
571
572 // Cache this query result, if desired
573 if (query_result_caching_enabled) {
574 FileWriter query_result_cache_file_writer = new FileWriter(query_result_cache_file);
575 query_result_cache_file_writer.write(query_results_xml.toString());
576 query_result_cache_file_writer.close();
577 }
578 }
579
580 private static String fileSafe(String text)
581 {
582 StringBuffer file_safe_text = new StringBuffer();
583 for (int i = 0; i < text.length(); i++) {
584 char character = text.charAt(i);
585 if ((character >= 'A' && character <= 'Z') || (character >= 'a' && character <= 'z') || (character >= '0' && character <= '9') || character == '-') {
586 file_safe_text.append(character);
587 }
588 else {
589 file_safe_text.append('%');
590 file_safe_text.append((int) character);
591 }
592 }
593 return file_safe_text.toString();
594 }
595
596
597}
598
599
Note: See TracBrowser for help on using the repository browser.