source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/PerVolumeUtil.java@ 32106

Last change on this file since 32106 was 32106, checked in by davidb, 6 years ago

Rekindle ability to process a json-filelist.txt using Spark

  • Property svn:executable set to *
File size: 9.4 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.IOException;
4import java.util.ArrayList;
5import java.util.HashMap;
6import java.util.Iterator;
7
8import org.apache.hadoop.io.Text;
9import org.apache.spark.api.java.function.FlatMapFunction;
10import org.apache.spark.api.java.function.Function;
11import org.apache.spark.api.java.function.VoidFunction;
12import org.apache.spark.util.DoubleAccumulator;
13import org.json.JSONArray;
14import org.json.JSONObject;
15
16/*
17class PagedJSON implements Function<String, Boolean> {
18
19 private static final long serialVersionUID = 1L;
20
21 public Boolean call(String s) { return s.contains("a"); }
22}
23 */
24
25
26//public class PerVolumeJSON implements VoidFunction<String>
27public class PerVolumeUtil
28{
29 //private static final long serialVersionUID = 1L;
30 protected String _input_dir;
31 protected String _whitelist_filename;
32 protected String _langmap_directory;
33
34 protected final ArrayList<String> _solr_endpoints;
35 protected final int _solr_endpoints_len;
36
37 protected String _output_dir;
38
39 protected int _verbosity;
40
41 protected WhitelistBloomFilter _whitelist_bloomfilter;
42 protected UniversalPOSLangMap _universal_langmap;
43
44 boolean _icu_tokenize;
45 boolean _strict_file_io;
46
47 public PerVolumeUtil(String input_dir, String whitelist_filename, String langmap_directory,
48 ArrayList<String> solr_endpoints, String output_dir, int verbosity,
49 boolean icu_tokenize, boolean strict_file_io)
50 {
51 System.out.println("*** PerVolumeUtil Constructor, langmap_directory = " + langmap_directory);
52
53 _input_dir = input_dir;
54 _whitelist_filename = whitelist_filename;
55 _langmap_directory = langmap_directory;
56
57 _solr_endpoints = solr_endpoints;
58 _solr_endpoints_len = solr_endpoints.size();
59
60 //_solr_url = solr_url;
61 _output_dir = output_dir;
62 _verbosity = verbosity;
63
64 _icu_tokenize = icu_tokenize;
65 _strict_file_io = strict_file_io;
66
67 _whitelist_bloomfilter = null;
68 _universal_langmap = null;
69 }
70
71 public String getInputDir()
72 {
73 return _input_dir;
74 }
75
76 public Integer call(Text json_text) throws IOException
77
78 {
79 if (_whitelist_filename != null) {
80
81 synchronized (_whitelist_filename) {
82 if (_whitelist_bloomfilter == null) {
83
84 _whitelist_bloomfilter = new WhitelistBloomFilter(_whitelist_filename,true);
85 }
86 }
87 }
88
89 if (_langmap_directory != null) {
90
91 synchronized (_langmap_directory) {
92 if (_universal_langmap == null) {
93 _universal_langmap = new UniversalPOSLangMap(_langmap_directory);
94 }
95 }
96 }
97
98 int ef_num_pages = 0;
99
100 String solr_url = null;
101 if (_solr_endpoints_len > 0) {
102 int random_choice = (int)(_solr_endpoints_len * Math.random());
103 solr_url = _solr_endpoints.get(random_choice);
104 }
105
106 try {
107
108
109 JSONObject extracted_feature_record = new JSONObject(json_text.toString());
110
111 if (extracted_feature_record != null) {
112 String volume_id = extracted_feature_record.getString("id");
113
114 JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
115 //String title= ef_metadata.getString("title");
116
117 //
118 // Top-level metadata Solr doc
119 //
120 JSONObject solr_add_metadata_doc_json = SolrDocJSON.generateToplevelMetadataSolrDocJSON(volume_id,ef_metadata);
121 if (solr_add_metadata_doc_json != null) {
122
123 if ((_verbosity >=2)) {
124 System.out.println("==================");
125 System.out.println("Metadata JSON: " + solr_add_metadata_doc_json.toString());
126 System.out.println("==================");
127 }
128
129 if (solr_url != null) {
130
131 if ((_verbosity >=2) ) {
132 System.out.println("==================");
133 System.out.println("Posting to: " + solr_url);
134 System.out.println("==================");
135 }
136 SolrDocJSON.postSolrDoc(solr_url, solr_add_metadata_doc_json, volume_id, "top-level-metadata");
137 }
138 }
139
140 //
141 // Now move on to POS extracted features per-page
142 //
143 boolean index_pages = true;
144 if (index_pages) {
145
146 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
147
148 int ef_page_count = ef_features.getInt("pageCount");
149
150 if (_verbosity >= 1) {
151 System.out.println("Processing: " + volume_id);
152 System.out.println(" pageCount = " + ef_page_count);
153 }
154
155 JSONArray ef_pages = ef_features.getJSONArray("pages");
156 ef_num_pages = ef_pages.length();
157
158
159 for (int i = 0; i < ef_page_count; i++) {
160 String formatted_i = String.format("page-%06d", i);
161 String page_id = volume_id + "." + formatted_i;
162
163 if (_verbosity >= 2) {
164 System.out.println(" Page: " + page_id);
165 }
166
167
168 JSONObject ef_page = ef_pages.getJSONObject(i);
169
170 if (ef_page != null) {
171 // Convert to Solr add form
172 JSONObject solr_add_doc_json
173 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id,
174 ef_metadata, ef_page,
175 _whitelist_bloomfilter, _universal_langmap, _icu_tokenize);
176
177
178 if ((_verbosity >=2) && (i==20)) {
179 System.out.println("==================");
180 System.out.println("Sample output Solr add JSON [page 20]: " + solr_add_doc_json.toString());
181 System.out.println("==================");
182 }
183
184
185 if (solr_url != null) {
186 SolrDocJSON.postSolrDoc(solr_url, solr_add_doc_json,
187 volume_id, page_id);
188 }
189 }
190 else {
191 System.err.println("Skipping: " + page_id);
192 }
193
194 }
195 }
196 else {
197 System.err.println("Skipping per-page POS text indexing");
198 }
199
200 }
201 }
202 catch (Exception e) {
203 if (_strict_file_io) {
204 throw e;
205 }
206 else {
207 e.printStackTrace();
208 }
209 }
210
211 return ef_num_pages;
212
213 }
214
215 /*
216 //public void call(String json_file_in) throws IOException
217 public Integer call(String json_file_in) throws IOException
218
219 {
220 if ((_whitelist_filename != null) && (_whitelist_bloomfilter == null)) {
221 _whitelist_bloomfilter = new WhitelistBloomFilter(_whitelist_filename,true);
222 }
223
224 int ef_num_pages = 0;
225
226 ArrayList<String> ids = new ArrayList<String>(); // want it to be non-null so can return valid iterator
227
228 String full_json_file_in = _input_dir + "/" + json_file_in;
229 JSONObject extracted_feature_record = JSONClusterFileIO.readJSONFile(full_json_file_in);
230
231 if (extracted_feature_record != null) {
232 String volume_id = extracted_feature_record.getString("id");
233
234 //JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
235 //String title= ef_metadata.getString("title");
236
237 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
238
239 int ef_page_count = ef_features.getInt("pageCount");
240
241 if (_verbosity >= 1) {
242 System.out.println("Processing: " + json_file_in);
243 System.out.println(" pageCount = " + ef_page_count);
244 }
245
246 JSONArray ef_pages = ef_features.getJSONArray("pages");
247 ef_num_pages = ef_pages.length();
248
249 // Make directory for page-level JSON output
250 String json_dir = ClusterFileIO.removeSuffix(json_file_in,".json.bz2");
251 String page_json_dir = json_dir + "/pages";
252
253 if (_output_dir != null) {
254 ClusterFileIO.createDirectoryAll(_output_dir + "/" + page_json_dir);
255 }
256
257 ids = new ArrayList<String>(ef_num_pages);
258 for (int i = 0; i < ef_page_count; i++) {
259 String formatted_i = String.format("page-%06d", i);
260 String page_id = volume_id + "." + formatted_i;
261
262 if (_verbosity >= 2) {
263 System.out.println(" Page: " + page_id);
264 }
265
266 String output_json_bz2 = page_json_dir +"/" + formatted_i + ".json.bz2";
267 ids.add(page_id);
268
269 if (_verbosity >=2) {
270 if (i==0) {
271 System.out.println("Sample output JSON page file [i=0]: " + output_json_bz2);
272 }
273 }
274 JSONObject ef_page = ef_pages.getJSONObject(i);
275
276 if (ef_page != null) {
277 // Convert to Solr add form
278 JSONObject solr_add_doc_json
279 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id, ef_page, _whitelist_bloomfilter, _icu_tokenize);
280
281
282 if ((_verbosity >=2) && (i==20)) {
283 System.out.println("==================");
284 System.out.println("Sample output Solr add JSON [page 20]: " + solr_add_doc_json.toString());
285 System.out.println("==================");
286 }
287
288
289 if (_solr_url != null) {
290 if ((_verbosity >=2) && (i==20)) {
291 System.out.println("==================");
292 System.out.println("Posting to: " + _solr_url);
293 System.out.println("==================");
294 }
295 SolrDocJSON.postSolrDoc(_solr_url, solr_add_doc_json);
296 }
297
298 if (_output_dir != null) {
299 if ((_verbosity >=2) && (i==20)) {
300 System.out.println("==================");
301 System.out.println("Saving to: " + _output_dir);
302 System.out.println("==================");
303 }
304 SolrDocJSON.saveSolrDoc(solr_add_doc_json, _output_dir + "/" + output_json_bz2);
305 }
306 }
307 else {
308 System.err.println("Skipping: " + page_id);
309 }
310
311 }
312 }
313 else {
314 // File did not exist, or could not be parsed
315 String mess = "Failed to read in bzipped JSON file '" + full_json_file_in + "'";
316 if (_strict_file_io) {
317 throw new IOException(mess);
318 }
319 else {
320 System.err.println("Warning: " + mess);
321 System.out.println("Warning: " + mess);
322 }
323 }
324
325 return ef_num_pages;
326
327 }
328 */
329}
330
Note: See TracBrowser for help on using the repository browser.