source: gs3-extensions/solr/trunk/src/src/java/org/greenstone/gsdl3/util/SolrQueryWrapper.java

Last change on this file was 38108, checked in by kjdon, 8 months ago

for now, lets sort the facets by count

  • Property svn:executable set to *
File size: 21.8 KB
Line 
1/**********************************************************************
2 *
3 * SolrQueryWrapper.java
4 *
5 * Copyright 2004 The New Zealand Digital Library Project
6 *
7 * A component of the Greenstone digital library software
8 * from the New Zealand Digital Library Project at the
9 * University of Waikato, New Zealand.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *********************************************************************/
26package org.greenstone.gsdl3.util;
27
28import java.lang.reflect.Type;
29import java.net.URLDecoder;
30import java.util.ArrayList;
31import java.util.Collection;
32import java.util.HashMap;
33import java.util.Iterator;
34import java.util.List;
35import java.util.Map;
36import java.util.Set;
37import java.util.HashSet;
38import java.util.regex.Pattern;
39import java.util.regex.Matcher;
40
41import org.apache.log4j.Logger;
42import org.apache.solr.client.solrj.SolrQuery; // subclass of ModifiableSolrParams
43import org.apache.solr.client.solrj.SolrServer;
44import org.apache.solr.client.solrj.SolrServerException;
45import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
46import org.apache.solr.client.solrj.response.QueryResponse;
47import org.apache.solr.client.solrj.response.TermsResponse;
48import org.apache.solr.core.CoreContainer;
49import org.apache.solr.core.SolrCore;
50import org.apache.solr.common.SolrDocument;
51import org.apache.solr.common.SolrDocumentList;
52import org.apache.solr.common.params.ModifiableSolrParams;
53import org.apache.solr.common.params.FacetParams;
54import org.greenstone.LuceneWrapper4.SharedSoleneQuery;
55import org.greenstone.LuceneWrapper4.SharedSoleneQueryResult;
56import org.apache.lucene.search.Query; // Query, TermQuery, BooleanQuery, BooleanClause and more
57import org.apache.lucene.index.IndexReader;
58import org.apache.lucene.index.Term;
59import org.apache.solr.search.QParser;
60import org.apache.solr.search.SolrIndexSearcher;
61import org.apache.solr.request.LocalSolrQueryRequest;
62
63import com.google.gson.Gson;
64import com.google.gson.reflect.TypeToken;
65
66public class SolrQueryWrapper extends SharedSoleneQuery
67{
68 public static String SORT_ASCENDING = "asc";
69 public static String SORT_DESCENDING = "desc";
70 public static String SORT_BY_RANK = "score";
71 public static String SORT_BY_INDEX_ORDER = "_docid_";
72
73 static Logger logger = Logger.getLogger(org.greenstone.gsdl3.util.SolrQueryWrapper.class.getName());
74 protected int max_docs = 100;
75 protected String sort_order = SORT_DESCENDING;
76 //Filter results by document hash. To get results from limited document sections.
77 protected String docFilter = null;
78 protected String sort_field = SORT_BY_RANK; // don't want null default for solr
79 protected ArrayList<String> _facets = new ArrayList<String>();
80 protected ArrayList<String> _facetQueries = new ArrayList<String>();
81 SolrServer solr_core = null;
82
83 protected String highlight_field = null;
84
85 String collection_core_name_prefix = null;
86
87 public SolrQueryWrapper()
88 {
89 super();
90 start_results = 0;
91 }
92
93 public void setMaxDocs(int max_docs)
94 {
95 this.max_docs = max_docs;
96 }
97
98 public void setSolrCore(SolrServer solr_core)
99 {
100 this.solr_core = solr_core;
101 }
102
103 public void setCollectionCoreNamePrefix(String colCoreNamePrefix) {
104 this.collection_core_name_prefix = colCoreNamePrefix;
105 }
106
107 // make sure its not null.
108 public void setSortField(String sort_field) {
109 if (sort_field != null) {
110 this.sort_field = sort_field;
111 }
112 }
113 public void setHighlightField(String hl_field)
114 {
115 this.highlight_field = hl_field;
116 }
117 public void setSortOrder(String order)
118 {
119 this.sort_order = order;
120 }
121 public void setDocFilter(String docFilter)
122 {
123 this.docFilter = docFilter;
124 }
125 public void addFacet(String facet)
126 {
127 if (!_facets.contains(facet))
128 {
129 _facets.add(facet);
130 }
131 }
132
133 public void clearFacets()
134 {
135 _facets.clear();
136 }
137
138 public void addFacetQuery(String facetQuery)
139 {
140 if (!_facetQueries.contains(facetQuery))
141 {
142 _facetQueries.add(facetQuery);
143 }
144 }
145
146 public void clearFacetQueries()
147 {
148 _facetQueries.clear();
149 }
150
151 public boolean initialise()
152 {
153 if (solr_core == null)
154 {
155 utf8out.println("Solr Core not loaded in ");
156 utf8out.flush();
157 return false;
158 }
159 return true;
160 }
161
162
163 /**
164 * UNUSED.
165 * Back when we used the EmbeddedSolrServer, this getTerms method would expand the terms of a query.
166 * Because of Solr/Lucene Index locking exceptions, we switched over to the HttpSolrServer instead
167 * of the Embedded kind.
168 *
169 * The functionality of getTerms has been moved to
170 * ../solrserver/Greenstone3SearchHandler.java, which will sit on the solrserver side (inside
171 * tomcat's solr webapp).
172 *
173 * Extracts the query terms from the query string. The query string can be a boolean
174 * combination of the various search fields with their search terms or phrases
175 */
176 public Term[] getTerms(SolrQuery solrQuery, String query_string)
177 {
178 Term terms[] = null;
179
180 if(solr_core instanceof EmbeddedSolrServer) {
181 EmbeddedSolrServer solrServer = (EmbeddedSolrServer)solr_core;
182
183 CoreContainer coreContainer = solrServer.getCoreContainer();
184
185 Collection<SolrCore> solrCores = coreContainer.getCores();
186 if(!solrCores.isEmpty()) {
187 Iterator<SolrCore> coreIterator = solrCores.iterator();
188
189 // Just use the first core that matches the collection name, since the term
190 // frequency of any term is the same regardless of whether its didx or sidx core
191 boolean foundCore = false;
192 while(coreIterator.hasNext() && !foundCore) {
193 SolrCore solrCore = coreIterator.next();
194 if(this.collection_core_name_prefix != null) {
195 if(!solrCore.getName().startsWith(this.collection_core_name_prefix)) {
196 //logger.error("### Skipping core not of this collection: " + solrCore.getName());
197 continue;
198 }
199 } else {
200 logger.error("### Collection_core_name_prefix not set. Won't try to find terms");
201 break;
202 }
203
204 //logger.error("### Found core " + solrCore.getName() + " of this collection " + this.collection_core_name_prefix);
205 foundCore = true;
206
207 LocalSolrQueryRequest solrQueryRequest = new LocalSolrQueryRequest(solrCore, solrQuery);
208 Query parsedQuery = null;
209
210 try {
211
212 // get the qparser, default is LuceneQParserPlugin, which is called "lucene" see http://wiki.apache.org/solr/QueryParser
213 QParser qParser = QParser.getParser(query_string, "lucene", solrQueryRequest);
214 parsedQuery = qParser.getQuery();
215
216 // For PrefixQuery or WildCardQuery (a subclass of AutomatonQuery, incl RegexpQ),
217 // like ZZ:econom* and ZZ:*date/regex queries, Query.extractTerms() throws an Exception
218 // because it has not done the Query.rewrite() step yet. So do that manually for them.
219 // This still doesn't provide us with the terms that econom* or *date break down into.
220
221 //if(parsedQuery instanceof PrefixQuery || parsedQuery instanceof AutomatonQuery) {
222 // Should we just check superclass MultiTermQuery?
223 // Can be a BooleanQuery containing PrefixQuery/WildCardQuery among its clauses, so
224 // just test for * in the query_string to determine if we need to do a rewrite() or not
225 if(query_string.contains("*")) {
226 SolrIndexSearcher searcher = solrQueryRequest.getSearcher();
227 IndexReader indexReader = searcher.getIndexReader(); // returns a DirectoryReader
228 parsedQuery = parsedQuery.rewrite(indexReader); // gets rewritten to ConstantScoreQuery
229 }
230
231 //System.err.println("#### Query type was: " + parsedQuery.getClass());
232 //logger.error("#### Query type was: " + parsedQuery.getClass());
233
234 // extract the terms
235 Set<Term> extractedQueryTerms = new HashSet<Term>();
236 parsedQuery.extractTerms(extractedQueryTerms);
237
238 terms = new Term[extractedQueryTerms.size()];
239
240 Iterator<Term> termsIterator = extractedQueryTerms.iterator();
241 for(int i = 0; termsIterator.hasNext(); i++) {
242 Term term = termsIterator.next();
243 ///System.err.println("#### Found query term: " + term);
244 ///logger.error("#### Found query term: " + term);
245
246 terms[i] = term; //(term.field(), term.text());
247 }
248
249 } catch(Exception queryParseException) {
250 queryParseException.printStackTrace();
251 System.err.println("Exception when parsing query: " + queryParseException.getMessage());
252 System.err.println("#### Query type was: " + parsedQuery.getClass());
253 logger.error("#### Query type was: " + parsedQuery.getClass());
254 }
255 // http://lucene.apache.org/solr/4_7_2/solr-core/org/apache/solr/request/SolrQueryRequestBase.html#close%28%29
256 // close() must be called when the object is no longer in use. Frees resources associated with this request
257 solrQueryRequest.close();
258 }
259
260 } else {
261 System.err.println("#### CoreContainer is empty");
262 logger.error("#### CoreContainer is empty");
263 }
264 } else {
265 System.err.println("#### Not an EmbeddedSolrServer. SolrQueryWrapper.getTerms() not yet implemented for " + solr_core.getClass());
266 logger.error("#### Not an EmbeddedSolrServer. SolrQueryWrapper.getTerms() not yet implemented for " + solr_core.getClass());
267 }
268
269
270 return terms;
271 }
272
273 public SharedSoleneQueryResult runQuery(String query_string)
274 {
275 if (query_string == null || query_string.equals(""))
276 {
277 utf8out.println("The query word is not indicated ");
278 utf8out.flush();
279 return null;
280 }
281
282 SolrQueryResult solr_query_result = new SolrQueryResult();
283 solr_query_result.clear();
284
285 if (_facetQueries.size() > 0)
286 {
287 HashMap<String, ArrayList<String>> grouping = new HashMap<String, ArrayList<String>>();
288 for (String currentQuery : _facetQueries)
289 {
290 //Facet queries are stored in JSON, so we have to decode it
291 Gson gson = new Gson();
292 Type type = new TypeToken<List<String>>()
293 {
294 }.getType();
295 List<String> queryElems = gson.fromJson(currentQuery, type);
296
297 //Group each query segment by the index it uses
298 for (String currentQueryElement : queryElems)
299 {
300 //logger.info("@@@@ currentQueryElement " + currentQueryElement);
301
302 String decodedQueryElement = null;
303 try
304 {
305 decodedQueryElement = URLDecoder.decode(currentQueryElement, "UTF-8");
306 }
307 catch (Exception ex)
308 {
309 continue;
310 }
311
312 int colonIndex = currentQueryElement.indexOf(":");
313 String indexShortName = currentQueryElement.substring(0, colonIndex);
314
315 if (grouping.get(indexShortName) == null)
316 {
317 grouping.put(indexShortName, new ArrayList<String>());
318 }
319 grouping.get(indexShortName).add(decodedQueryElement);
320 }
321 }
322
323 //Construct the facet query string to add to the regular query string
324 StringBuilder facetQueryString = new StringBuilder();
325 int keysetCounter = 0;
326 for (String key : grouping.keySet())
327 {
328 StringBuilder currentFacetString = new StringBuilder("(");
329 int groupCounter = 0;
330 for (String queryElem : grouping.get(key))
331 {
332 currentFacetString.append(queryElem);
333
334 groupCounter++;
335 if (groupCounter < grouping.get(key).size())
336 {
337 currentFacetString.append(" OR ");
338 }
339 }
340 currentFacetString.append(")");
341
342 facetQueryString.append(currentFacetString);
343
344 keysetCounter++;
345 if (keysetCounter < grouping.keySet().size())
346 {
347 facetQueryString.append(" AND ");
348 }
349 }
350
351 if (facetQueryString.length() > 0)
352 {
353 query_string += " AND " + facetQueryString;
354 }
355 }
356
357
358 SolrQuery solrQuery = new SolrQuery(query_string);
359 solrQuery.addSort(this.sort_field, SolrQuery.ORDER.valueOf(this.sort_order)); // sort param, like "score desc" or "byORG asc"
360 solrQuery.setStart(start_results); // which result to start from
361 solrQuery.setRows(end_results - start_results); // how many results per "page"
362
363 // http://lucene.472066.n3.nabble.com/get-term-frequency-just-only-keywords-search-td4084510.html
364 // WORKS (search didx core):
365 //TI:farming
366 //docOID,score,termfreq(TI,'farming'),totaltermfreq(TI,'farming')
367
368
369 // which fields to return for each document, we'll add the request for totaltermfreq later
370 // fl=docOID score termfreq(TI,'farming') totaltermfreq(TI,'farming')
371 solrQuery.setFields("docOID", "score"); //solrParams.set("fl", "docOID score totaltermfreq(field,'queryterm')");
372
373 //Turn on highlighting
374 solrQuery.setHighlight(true);
375 //Return 3 snippets for each document
376 solrQuery.setParam("hl.snippets", "3");
377 solrQuery.setParam("hl.useFastVectorHighlighter", "true");
378 solrQuery.setParam("hl.fl", highlight_field);
379 solrQuery.setParam("hl.tag.pre", "&lt;span class=\"snippetText\"&gt;" );
380 solrQuery.setParam("hl.tag.post","&lt;/span&gt;" );
381
382 // set the default conjunction op
383 solrQuery.setParam("q.op", this.default_conjunction_operator);
384 if (docFilter != null) {
385 solrQuery.setParam("fq", "docOID:" + docFilter + "*");
386 }
387 //solrQuery.setTerms(true); // turn on the termsComponent
388 //solrQuery.set("terms.fl", "ZZ"); // which field to get the terms from. ModifiableSolrParams method
389
390 // http://wiki.apache.org/solr/TermVectorComponent and https://cwiki.apache.org/confluence/display/solr/The+Term+Vector+Component
391 // http://lucene.472066.n3.nabble.com/get-term-frequency-just-only-keywords-search-td4084510.html
392 // http://stackoverflow.com/questions/13031534/word-frequency-in-solr
393 // http://wiki.apache.org/solr/FunctionQuery#tf and #termfreq and #totaltermfreq
394 // https://wiki.apache.org/solr/TermsComponent
395
396 //solrParams.set("tv.tf", true);// turn on the terms vector Component
397 //solrParams.set("tv.fl", "ZZ");// which field to get the terms from /// ZZ
398
399
400 if (_facets.size() > 0)
401 {
402 // enable facet counts in the query response
403 solrQuery.setFacet(true); //solrParams.set("facet", "true");
404 for (int i = 0; i < _facets.size(); i++)
405 {
406 // add this field as a facet
407 solrQuery.addFacetField(_facets.get(i)); // solrParams.add("facet.field", _facets.get(i));
408 }
409 //for(int i = 0; i < _facetQueries.size(); i++) {
410 // logger.info("@@@@ facet query i: " + _facetQueries.get(i));
411 //}
412
413 // sort the facets by count
414 solrQuery.setFacetSort(FacetParams.FACET_SORT_COUNT);
415
416 }
417
418
419 // Some debugging
420 logger.info("@@@@ solrQuery: " + solrQuery);
421 try {
422 // https://stackoverflow.com/questions/2632175/decoding-uri-query-string-in-java
423 String displayQueryString = URLDecoder.decode(solrQuery.toString().replace("+", " "), "UTF-8");
424 logger.info("@@@@ solrQuery URL decoded: " + displayQueryString);
425 } catch(Exception uee) { // UnsupportedEncodingException
426 logger.info("Got debug exception " + uee.getMessage());
427 }
428
429
430 // the solrserver will now
431 // get the individual terms that make up the query, then request solr to return the totaltermfreq for each term
432
433 // do the query
434 try
435 {
436 QueryResponse solrResponse = solr_core.query(solrQuery); //solr_core.query(solrParams);
437 SolrDocumentList hits = solrResponse.getResults();
438 Map<String, Map<String, List<String>>> hlResponse = solrResponse.getHighlighting();
439 solr_query_result.setHighlightResults(hlResponse);
440 //TermsResponse termResponse = solrResponse.getTermsResponse(); // null unless termvectors=true in schema.xml
441
442 if (hits != null)
443 {
444 logger.info("*** hits size = " + hits.size()+
445 ", num docs found = " + hits.getNumFound() +
446 ", start results = " + start_results +
447 ", end results = " + end_results+
448 ", max docs = " + max_docs);
449
450 // numDocsFound is the total number of matching docs in the collection
451 // as opposed to the number of documents returned in the hits list
452
453 solr_query_result.setTotalDocs((int) hits.getNumFound());
454
455 solr_query_result.setStartResults(start_results);
456 solr_query_result.setEndResults(start_results + hits.size());
457
458 // get the first field we're searching in, this will be the fallback field
459 int sepIndex = query_string.indexOf(":");
460 String defaultField = query_string.substring(0, sepIndex);
461 //String query = query_string.substring(sepIndex + 2, query_string.length() - 1); // Replaced by call to getTerms()
462
463 //solr_query_result.addTerm(query, field, (int) hits.getNumFound(), -1);
464
465 // Output the matching documents
466 for (int i = 0; i < hits.size(); i++)
467 {
468 SolrDocument doc = hits.get(i);
469
470 // Need to think about how to support document term frequency. Make zero for now
471 int doc_term_freq = 0;
472 String docOID = (String) doc.get("docOID");
473 Float score = (Float) doc.get("score");
474
475 //logger.info("**** docOID = " + docOID);
476 //logger.info("**** score = " + score);
477
478
479 // solr returns each term's totaltermfreq, ttf, at the document level, even though
480 // the ttf is the same for each document. So extract this information just for the first document
481 // https://wiki.apache.org/solr/FunctionQuery#docfreq
482
483 if(i == 0) { // first document, all others repeat the same termfreq data
484 boolean foundTermInfo = false;
485
486 Collection<String> fieldNames = doc.getFieldNames();
487 for(Iterator<String> it = fieldNames.iterator(); it.hasNext(); ) {
488 String fieldName = it.next(); // e.g. looking for totaltermfreq(ZZ,'economically')
489 //logger.info("@@@@ found fieldName " + fieldName);
490
491
492 if(fieldName.startsWith("totaltermfreq")) {
493 //|| fieldName.startsWith("termfreq")) {
494
495 foundTermInfo = true;
496
497 // e.g. totaltermfreq(TI,'farming')
498 // e.g. termfreq(TI,'farming')
499 Pattern pattern = Pattern.compile("(.*?termfreq)\\((.*?),'(.*?)'\\)");
500 Matcher matcher = pattern.matcher(fieldName);
501 String metaField, indexField, queryTerm;
502 while (matcher.find()) {
503 metaField = matcher.group(1); // termfreq or totaltermfreq
504 indexField = matcher.group(2); //ZZ, TI
505 queryTerm = matcher.group(3);
506
507 //logger.info("\t@@@@ found field " + indexField);
508 //logger.info("\t@@@@ queryTerm " + queryTerm);
509
510 // Finally, can ask for the totaltermfreq value for this
511 // searchterm in its indexed field:
512 // e.g. totaltermfreq(TI,'farming'), e.g. termfreq(TI,'farming')
513 Long totaltermfreq = (Long)doc.get("totaltermfreq("+indexField+",'"+queryTerm+"')");
514
515 Integer termfreq = (Integer)doc.get("termfreq("+indexField+",'"+queryTerm+"')");
516
517 //System.err.println("**** ttf = " + totaltermfreq);
518 //System.err.println("**** tf = " + termfreq);
519 //logger.info("**** ttf = " + totaltermfreq);
520 //logger.info("**** tf = " + termfreq);
521 solr_query_result.addTerm(queryTerm, indexField, (int) hits.getNumFound(), totaltermfreq.intValue()); // long totaltermfreq to int
522 }
523 }
524 }
525 if(!foundTermInfo) { // no terms extracted from query_string
526 solr_query_result.addTerm(query_string, defaultField, (int) hits.getNumFound(), -1); // no terms
527 }
528 }
529
530 solr_query_result.addDoc(docOID, score.floatValue(), doc_term_freq); // doc_termfreq for which term????
531 }
532 }
533 else
534 {
535 solr_query_result.setTotalDocs(0);
536
537 solr_query_result.setStartResults(0);
538 solr_query_result.setEndResults(0);
539 }
540
541 solr_query_result.setFacetResults(solrResponse.getFacetFields());
542 }
543 catch (SolrServerException server_exception)
544 {
545 server_exception.printStackTrace();
546 solr_query_result.setError(SolrQueryResult.SERVER_ERROR);
547 }
548
549 return solr_query_result;
550 }
551// Highlighting query. Returns full highlighted text for document
552 public String runHighlightingQuery(String query,String hldocOID)
553 {
554 SolrQueryResult solr_query_result = new SolrQueryResult();
555 solr_query_result.clear();
556
557
558 /* Create Query*/
559
560 SolrQuery solrQuery = new SolrQuery(query);
561
562 /* Set Query Parameters*/
563
564 //Turn on highlighting
565 solrQuery.setHighlight(true);
566 //Extract default field from query
567
568 //Set field for highlighting
569 solrQuery.setParam("hl.fl", highlight_field);
570
571 // this option only available for the OriginalHighlighter (hl.method=original, the default)
572 // if we are doing document level search, we only want to highlight the first section,
573 // (TX element) if applicable. Otherwise get a middle section displayed at the start of
574 // a document, outside the toc.
575 // if we are doing section level search, there will only be one TX element
576 solrQuery.setParam("hl.maxMultiValuedToExamine", "1");
577
578 //Get whole highlighted field
579 solrQuery.setHighlightFragsize(0);
580
581 //Return only required document by docOID
582 solrQuery.setFilterQueries("docOID:"+ hldocOID);
583
584 solrQuery.setHighlightSimplePre("<span class=\"termHighlight\">");
585 solrQuery.setHighlightSimplePost("</span>");
586
587 //Prepare results
588 String text = null;
589 // do the query
590 try
591 {
592 QueryResponse solrResponse = solr_core.query(solrQuery); //solr_core.query(solrParams);
593 //Get highliting results
594 Map<String,Map<String,List<String>>> highlightingResults = solrResponse.getHighlighting();
595 // Check for existing highlighting results
596 if (highlightingResults != null && highlightingResults.get(hldocOID) != null && highlightingResults.get(hldocOID).get(highlight_field) != null)
597 {
598 //Get highlited document text
599 text = highlightingResults.get(hldocOID).get(highlight_field).get(0);
600 }
601 }
602 catch (SolrServerException server_exception)
603 {
604 server_exception.printStackTrace();
605
606 }
607 return text;
608 }
609
610 // start results always from 0
611 public void setStartResults(int start_results)
612 {
613 if (start_results < 0)
614 {
615 start_results = 0;
616 }
617 this.start_results = start_results;
618 }
619
620 public void cleanUp()
621 {
622 super.cleanUp();
623 }
624
625}
Note: See TracBrowser for help on using the repository browser.