source: gs3-extensions/solr/trunk/src/src/java/org/greenstone/gsdl3/util/SolrQueryWrapper.java@ 32105

Last change on this file since 32105 was 32105, checked in by Georgiy Litvinov, 6 years ago

Added docFilter query param for limiting results to document's sections.

  • Property svn:executable set to *
File size: 20.5 KB
Line 
1/**********************************************************************
2 *
3 * SolrQueryWrapper.java
4 *
5 * Copyright 2004 The New Zealand Digital Library Project
6 *
7 * A component of the Greenstone digital library software
8 * from the New Zealand Digital Library Project at the
9 * University of Waikato, New Zealand.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *********************************************************************/
26package org.greenstone.gsdl3.util;
27
28import java.lang.reflect.Type;
29import java.net.URLDecoder;
30import java.util.ArrayList;
31import java.util.Collection;
32import java.util.HashMap;
33import java.util.Iterator;
34import java.util.List;
35import java.util.Map;
36import java.util.Set;
37import java.util.HashSet;
38import java.util.regex.Pattern;
39import java.util.regex.Matcher;
40
41import org.apache.log4j.Logger;
42import org.apache.solr.client.solrj.SolrQuery; // subclass of ModifiableSolrParams
43import org.apache.solr.client.solrj.SolrServer;
44import org.apache.solr.client.solrj.SolrServerException;
45import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
46import org.apache.solr.client.solrj.response.QueryResponse;
47import org.apache.solr.client.solrj.response.TermsResponse;
48import org.apache.solr.core.CoreContainer;
49import org.apache.solr.core.SolrCore;
50import org.apache.solr.common.SolrDocument;
51import org.apache.solr.common.SolrDocumentList;
52import org.apache.solr.common.params.ModifiableSolrParams;
53import org.greenstone.LuceneWrapper4.SharedSoleneQuery;
54import org.greenstone.LuceneWrapper4.SharedSoleneQueryResult;
55import org.apache.lucene.search.Query; // Query, TermQuery, BooleanQuery, BooleanClause and more
56import org.apache.lucene.index.IndexReader;
57import org.apache.lucene.index.Term;
58import org.apache.solr.search.QParser;
59import org.apache.solr.search.SolrIndexSearcher;
60import org.apache.solr.request.LocalSolrQueryRequest;
61
62import com.google.gson.Gson;
63import com.google.gson.reflect.TypeToken;
64
65public class SolrQueryWrapper extends SharedSoleneQuery
66{
67 public static String SORT_ASCENDING = "asc";
68 public static String SORT_DESCENDING = "desc";
69 public static String SORT_BY_RANK = "score";
70 public static String SORT_BY_INDEX_ORDER = "_docid_";
71
72 static Logger logger = Logger.getLogger(org.greenstone.gsdl3.util.SolrQueryWrapper.class.getName());
73 protected int max_docs = 100;
74 protected String sort_order = SORT_DESCENDING;
75 //Filter results by document hash. To get results from limited document sections.
76 protected String docFilter = null;
77 protected String sort_field = SORT_BY_RANK; // don't want null default for solr
78 protected ArrayList<String> _facets = new ArrayList<String>();
79 protected ArrayList<String> _facetQueries = new ArrayList<String>();
80 SolrServer solr_core = null;
81
82 protected String highlight_field = null;
83
84 String collection_core_name_prefix = null;
85
86 public SolrQueryWrapper()
87 {
88 super();
89 start_results = 0;
90 }
91
92 public void setMaxDocs(int max_docs)
93 {
94 this.max_docs = max_docs;
95 }
96
97 public void setSolrCore(SolrServer solr_core)
98 {
99 this.solr_core = solr_core;
100 }
101
102 public void setCollectionCoreNamePrefix(String colCoreNamePrefix) {
103 this.collection_core_name_prefix = colCoreNamePrefix;
104 }
105
106 // make sure its not null.
107 public void setSortField(String sort_field) {
108 if (sort_field != null) {
109 this.sort_field = sort_field;
110 }
111 }
112 public void setHighlightField(String hl_field)
113 {
114 this.highlight_field = hl_field;
115 }
116 public void setSortOrder(String order)
117 {
118 this.sort_order = order;
119 }
120 public void setDocFilter(String docFilter)
121 {
122 this.docFilter = docFilter;
123 }
124 public void addFacet(String facet)
125 {
126 if (!_facets.contains(facet))
127 {
128 _facets.add(facet);
129 }
130 }
131
132 public void clearFacets()
133 {
134 _facets.clear();
135 }
136
137 public void addFacetQuery(String facetQuery)
138 {
139 if (!_facetQueries.contains(facetQuery))
140 {
141 _facetQueries.add(facetQuery);
142 }
143 }
144
145 public void clearFacetQueries()
146 {
147 _facetQueries.clear();
148 }
149
150 public boolean initialise()
151 {
152 if (solr_core == null)
153 {
154 utf8out.println("Solr Core not loaded in ");
155 utf8out.flush();
156 return false;
157 }
158 return true;
159 }
160
161
162 /**
163 * UNUSED.
164 * Back when we used the EmbeddedSolrServer, this getTerms method would expand the terms of a query.
165 * Because of Solr/Lucene Index locking exceptions, we switched over to the HttpSolrServer instead
166 * of the Embedded kind.
167 *
168 * The functionality of getTerms has been moved to
169 * ../solrserver/Greenstone3SearchHandler.java, which will sit on the solrserver side (inside
170 * tomcat's solr webapp).
171 *
172 * Extracts the query terms from the query string. The query string can be a boolean
173 * combination of the various search fields with their search terms or phrases
174 */
175 public Term[] getTerms(SolrQuery solrQuery, String query_string)
176 {
177 Term terms[] = null;
178
179 if(solr_core instanceof EmbeddedSolrServer) {
180 EmbeddedSolrServer solrServer = (EmbeddedSolrServer)solr_core;
181
182 CoreContainer coreContainer = solrServer.getCoreContainer();
183
184 Collection<SolrCore> solrCores = coreContainer.getCores();
185 if(!solrCores.isEmpty()) {
186 Iterator<SolrCore> coreIterator = solrCores.iterator();
187
188 // Just use the first core that matches the collection name, since the term
189 // frequency of any term is the same regardless of whether its didx or sidx core
190 boolean foundCore = false;
191 while(coreIterator.hasNext() && !foundCore) {
192 SolrCore solrCore = coreIterator.next();
193 if(this.collection_core_name_prefix != null) {
194 if(!solrCore.getName().startsWith(this.collection_core_name_prefix)) {
195 //logger.error("### Skipping core not of this collection: " + solrCore.getName());
196 continue;
197 }
198 } else {
199 logger.error("### Collection_core_name_prefix not set. Won't try to find terms");
200 break;
201 }
202
203 //logger.error("### Found core " + solrCore.getName() + " of this collection " + this.collection_core_name_prefix);
204 foundCore = true;
205
206 LocalSolrQueryRequest solrQueryRequest = new LocalSolrQueryRequest(solrCore, solrQuery);
207 Query parsedQuery = null;
208
209 try {
210
211 // get the qparser, default is LuceneQParserPlugin, which is called "lucene" see http://wiki.apache.org/solr/QueryParser
212 QParser qParser = QParser.getParser(query_string, "lucene", solrQueryRequest);
213 parsedQuery = qParser.getQuery();
214
215 // For PrefixQuery or WildCardQuery (a subclass of AutomatonQuery, incl RegexpQ),
216 // like ZZ:econom* and ZZ:*date/regex queries, Query.extractTerms() throws an Exception
217 // because it has not done the Query.rewrite() step yet. So do that manually for them.
218 // This still doesn't provide us with the terms that econom* or *date break down into.
219
220 //if(parsedQuery instanceof PrefixQuery || parsedQuery instanceof AutomatonQuery) {
221 // Should we just check superclass MultiTermQuery?
222 // Can be a BooleanQuery containing PrefixQuery/WildCardQuery among its clauses, so
223 // just test for * in the query_string to determine if we need to do a rewrite() or not
224 if(query_string.contains("*")) {
225 SolrIndexSearcher searcher = solrQueryRequest.getSearcher();
226 IndexReader indexReader = searcher.getIndexReader(); // returns a DirectoryReader
227 parsedQuery = parsedQuery.rewrite(indexReader); // gets rewritten to ConstantScoreQuery
228 }
229
230 //System.err.println("#### Query type was: " + parsedQuery.getClass());
231 //logger.error("#### Query type was: " + parsedQuery.getClass());
232
233 // extract the terms
234 Set<Term> extractedQueryTerms = new HashSet<Term>();
235 parsedQuery.extractTerms(extractedQueryTerms);
236
237 terms = new Term[extractedQueryTerms.size()];
238
239 Iterator<Term> termsIterator = extractedQueryTerms.iterator();
240 for(int i = 0; termsIterator.hasNext(); i++) {
241 Term term = termsIterator.next();
242 ///System.err.println("#### Found query term: " + term);
243 ///logger.error("#### Found query term: " + term);
244
245 terms[i] = term; //(term.field(), term.text());
246 }
247
248 } catch(Exception queryParseException) {
249 queryParseException.printStackTrace();
250 System.err.println("Exception when parsing query: " + queryParseException.getMessage());
251 System.err.println("#### Query type was: " + parsedQuery.getClass());
252 logger.error("#### Query type was: " + parsedQuery.getClass());
253 }
254 // http://lucene.apache.org/solr/4_7_2/solr-core/org/apache/solr/request/SolrQueryRequestBase.html#close%28%29
255 // close() must be called when the object is no longer in use. Frees resources associated with this request
256 solrQueryRequest.close();
257 }
258
259 } else {
260 System.err.println("#### CoreContainer is empty");
261 logger.error("#### CoreContainer is empty");
262 }
263 } else {
264 System.err.println("#### Not an EmbeddedSolrServer. SolrQueryWrapper.getTerms() not yet implemented for " + solr_core.getClass());
265 logger.error("#### Not an EmbeddedSolrServer. SolrQueryWrapper.getTerms() not yet implemented for " + solr_core.getClass());
266 }
267
268
269 return terms;
270 }
271
272 public SharedSoleneQueryResult runQuery(String query_string)
273 {
274 if (query_string == null || query_string.equals(""))
275 {
276 utf8out.println("The query word is not indicated ");
277 utf8out.flush();
278 return null;
279 }
280
281 SolrQueryResult solr_query_result = new SolrQueryResult();
282 solr_query_result.clear();
283
284 if (_facetQueries.size() > 0)
285 {
286 HashMap<String, ArrayList<String>> grouping = new HashMap<String, ArrayList<String>>();
287 for (String currentQuery : _facetQueries)
288 {
289 //Facet queries are stored in JSON, so we have to decode it
290 Gson gson = new Gson();
291 Type type = new TypeToken<List<String>>()
292 {
293 }.getType();
294 List<String> queryElems = gson.fromJson(currentQuery, type);
295
296 //Group each query segment by the index it uses
297 for (String currentQueryElement : queryElems)
298 {
299 String decodedQueryElement = null;
300 try
301 {
302 decodedQueryElement = URLDecoder.decode(currentQueryElement, "UTF-8");
303 }
304 catch (Exception ex)
305 {
306 continue;
307 }
308
309 int colonIndex = currentQueryElement.indexOf(":");
310 String indexShortName = currentQueryElement.substring(0, colonIndex);
311
312 if (grouping.get(indexShortName) == null)
313 {
314 grouping.put(indexShortName, new ArrayList<String>());
315 }
316 grouping.get(indexShortName).add(decodedQueryElement);
317 }
318 }
319
320 //Construct the facet query string to add to the regular query string
321 StringBuilder facetQueryString = new StringBuilder();
322 int keysetCounter = 0;
323 for (String key : grouping.keySet())
324 {
325 StringBuilder currentFacetString = new StringBuilder("(");
326 int groupCounter = 0;
327 for (String queryElem : grouping.get(key))
328 {
329 currentFacetString.append(queryElem);
330
331 groupCounter++;
332 if (groupCounter < grouping.get(key).size())
333 {
334 currentFacetString.append(" OR ");
335 }
336 }
337 currentFacetString.append(")");
338
339 facetQueryString.append(currentFacetString);
340
341 keysetCounter++;
342 if (keysetCounter < grouping.keySet().size())
343 {
344 facetQueryString.append(" AND ");
345 }
346 }
347
348 if (facetQueryString.length() > 0)
349 {
350 query_string += " AND " + facetQueryString;
351 }
352 }
353
354
355 SolrQuery solrQuery = new SolrQuery(query_string);
356 solrQuery.addSort(this.sort_field, SolrQuery.ORDER.valueOf(this.sort_order)); // sort param, like "score desc" or "byORG asc"
357 solrQuery.setStart(start_results); // which result to start from
358 solrQuery.setRows(end_results - start_results); // how many results per "page"
359
360 // http://lucene.472066.n3.nabble.com/get-term-frequency-just-only-keywords-search-td4084510.html
361 // WORKS (search didx core):
362 //TI:farming
363 //docOID,score,termfreq(TI,'farming'),totaltermfreq(TI,'farming')
364
365
366 // which fields to return for each document, we'll add the request for totaltermfreq later
367 // fl=docOID score termfreq(TI,'farming') totaltermfreq(TI,'farming')
368 solrQuery.setFields("docOID", "score"); //solrParams.set("fl", "docOID score totaltermfreq(field,'queryterm')");
369
370 //Turn on highlighting
371 solrQuery.setHighlight(true);
372 //Return 3 snippets for each document
373 solrQuery.setParam("hl.snippets", "3");
374 solrQuery.setParam("hl.useFastVectorHighlighter", "true");
375 solrQuery.setParam("hl.fl", highlight_field);
376 solrQuery.setParam("hl.tag.pre", "&lt;span class=\"snippetText\"&gt;" );
377 solrQuery.setParam("hl.tag.post","&lt;/span&gt;" );
378
379 if (docFilter != null) {
380 solrQuery.setParam("fq", "docOID:" + docFilter + "*");
381 }
382 //solrQuery.setTerms(true); // turn on the termsComponent
383 //solrQuery.set("terms.fl", "ZZ"); // which field to get the terms from. ModifiableSolrParams method
384
385 // http://wiki.apache.org/solr/TermVectorComponent and https://cwiki.apache.org/confluence/display/solr/The+Term+Vector+Component
386 // http://lucene.472066.n3.nabble.com/get-term-frequency-just-only-keywords-search-td4084510.html
387 // http://stackoverflow.com/questions/13031534/word-frequency-in-solr
388 // http://wiki.apache.org/solr/FunctionQuery#tf and #termfreq and #totaltermfreq
389 // https://wiki.apache.org/solr/TermsComponent
390
391 //solrParams.set("tv.tf", true);// turn on the terms vector Component
392 //solrParams.set("tv.fl", "ZZ");// which field to get the terms from /// ZZ
393
394
395 if (_facets.size() > 0)
396 {
397 // enable facet counts in the query response
398 solrQuery.setFacet(true); //solrParams.set("facet", "true");
399 for (int i = 0; i < _facets.size(); i++)
400 {
401 // add this field as a facet
402 solrQuery.addFacetField(_facets.get(i)); // solrParams.add("facet.field", _facets.get(i));
403 }
404 }
405
406 // the solrserver will now
407 // get the individual terms that make up the query, then request solr to return the totaltermfreq for each term
408
409 // do the query
410 try
411 {
412 QueryResponse solrResponse = solr_core.query(solrQuery); //solr_core.query(solrParams);
413 SolrDocumentList hits = solrResponse.getResults();
414 Map<String, Map<String, List<String>>> hlResponse = solrResponse.getHighlighting();
415 solr_query_result.setHighlightResults(hlResponse);
416 //TermsResponse termResponse = solrResponse.getTermsResponse(); // null unless termvectors=true in schema.xml
417
418 if (hits != null)
419 {
420 logger.info("*** hits size = " + hits.size());
421 logger.info("*** num docs found = " + hits.getNumFound());
422
423 logger.info("*** start results = " + start_results);
424 logger.info("*** end results = " + end_results);
425 logger.info("*** max docs = " + max_docs);
426
427 // numDocsFound is the total number of matching docs in the collection
428 // as opposed to the number of documents returned in the hits list
429
430 solr_query_result.setTotalDocs((int) hits.getNumFound());
431
432 solr_query_result.setStartResults(start_results);
433 solr_query_result.setEndResults(start_results + hits.size());
434
435 // get the first field we're searching in, this will be the fallback field
436 int sepIndex = query_string.indexOf(":");
437 String defaultField = query_string.substring(0, sepIndex);
438 //String query = query_string.substring(sepIndex + 2, query_string.length() - 1); // Replaced by call to getTerms()
439
440 //solr_query_result.addTerm(query, field, (int) hits.getNumFound(), -1);
441
442 // Output the matching documents
443 for (int i = 0; i < hits.size(); i++)
444 {
445 SolrDocument doc = hits.get(i);
446
447 // Need to think about how to support document term frequency. Make zero for now
448 int doc_term_freq = 0;
449 String docOID = (String) doc.get("docOID");
450 Float score = (Float) doc.get("score");
451
452 logger.info("**** docOID = " + docOID);
453 logger.info("**** score = " + score);
454
455
456 // solr returns each term's totaltermfreq, ttf, at the document level, even though
457 // the ttf is the same for each document. So extract this information just for the first document
458 // https://wiki.apache.org/solr/FunctionQuery#docfreq
459
460 if(i == 0) { // first document, all others repeat the same termfreq data
461 boolean foundTermInfo = false;
462
463 Collection<String> fieldNames = doc.getFieldNames();
464 for(Iterator<String> it = fieldNames.iterator(); it.hasNext(); ) {
465 String fieldName = it.next(); // e.g. looking for totaltermfreq(ZZ,'economically')
466 //logger.info("@@@@ found fieldName " + fieldName);
467
468
469 if(fieldName.startsWith("totaltermfreq")) {
470 //|| fieldName.startsWith("termfreq")) {
471
472 foundTermInfo = true;
473
474 // e.g. totaltermfreq(TI,'farming')
475 // e.g. termfreq(TI,'farming')
476 Pattern pattern = Pattern.compile("(.*?termfreq)\\((.*?),'(.*?)'\\)");
477 Matcher matcher = pattern.matcher(fieldName);
478 String metaField, indexField, queryTerm;
479 while (matcher.find()) {
480 metaField = matcher.group(1); // termfreq or totaltermfreq
481 indexField = matcher.group(2); //ZZ, TI
482 queryTerm = matcher.group(3);
483
484 //logger.info("\t@@@@ found field " + indexField);
485 //logger.info("\t@@@@ queryTerm " + queryTerm);
486
487 // Finally, can ask for the totaltermfreq value for this
488 // searchterm in its indexed field:
489 // e.g. totaltermfreq(TI,'farming'), e.g. termfreq(TI,'farming')
490 Long totaltermfreq = (Long)doc.get("totaltermfreq("+indexField+",'"+queryTerm+"')");
491
492 Integer termfreq = (Integer)doc.get("termfreq("+indexField+",'"+queryTerm+"')");
493
494 //System.err.println("**** ttf = " + totaltermfreq);
495 //System.err.println("**** tf = " + termfreq);
496 //logger.info("**** ttf = " + totaltermfreq);
497 //logger.info("**** tf = " + termfreq);
498 solr_query_result.addTerm(queryTerm, indexField, (int) hits.getNumFound(), totaltermfreq.intValue()); // long totaltermfreq to int
499 }
500 }
501 }
502 if(!foundTermInfo) { // no terms extracted from query_string
503 solr_query_result.addTerm(query_string, defaultField, (int) hits.getNumFound(), -1); // no terms
504 }
505 }
506
507 solr_query_result.addDoc(docOID, score.floatValue(), doc_term_freq); // doc_termfreq for which term????
508 }
509 }
510 else
511 {
512 solr_query_result.setTotalDocs(0);
513
514 solr_query_result.setStartResults(0);
515 solr_query_result.setEndResults(0);
516 }
517
518 solr_query_result.setFacetResults(solrResponse.getFacetFields());
519 }
520 catch (SolrServerException server_exception)
521 {
522 server_exception.printStackTrace();
523 solr_query_result.setError(SolrQueryResult.SERVER_ERROR);
524 }
525
526 return solr_query_result;
527 }
528// Highlighting query. Returns full highlighted text for document
529 public String runHighlightingQuery(String query,String hldocOID)
530 {
531
532 SolrQueryResult solr_query_result = new SolrQueryResult();
533 solr_query_result.clear();
534
535
536 /* Create Query*/
537
538 SolrQuery solrQuery = new SolrQuery(query);
539
540 /* Set Query Parameters*/
541
542 //Turn on highlighting
543 solrQuery.setHighlight(true);
544 //Extract default field from query
545
546 //Set field for highlighting
547 solrQuery.setParam("hl.fl", highlight_field);
548
549 //Get whole highlighted field
550 solrQuery.setHighlightFragsize(0);
551
552 //Return only required document by docOID
553 solrQuery.setFilterQueries("docOID:"+ hldocOID);
554
555 solrQuery.setHighlightSimplePre("<span class=\"termHighlight\">");
556 solrQuery.setHighlightSimplePost("</span>");
557
558 //Prepare results
559 String text = null;
560 // do the query
561 try
562 {
563 QueryResponse solrResponse = solr_core.query(solrQuery); //solr_core.query(solrParams);
564 //Get highliting results
565 Map<String,Map<String,List<String>>> highlightingResults = solrResponse.getHighlighting();
566 // Check for existing highlighting results
567 if (highlightingResults != null && highlightingResults.get(hldocOID) != null && highlightingResults.get(hldocOID).get(highlight_field) != null)
568 {
569 //Get highlited document text
570 text = highlightingResults.get(hldocOID).get(highlight_field).get(0);
571 }
572 }
573 catch (SolrServerException server_exception)
574 {
575 server_exception.printStackTrace();
576
577 }
578 return text;
579 }
580
581 // start results always from 0
582 public void setStartResults(int start_results)
583 {
584 if (start_results < 0)
585 {
586 start_results = 0;
587 }
588 this.start_results = start_results;
589 }
590
591 public void cleanUp()
592 {
593 super.cleanUp();
594 }
595
596}
Note: See TracBrowser for help on using the repository browser.