source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/PerVolumeJSON.java@ 31675

Last change on this file since 31675 was 31675, checked in by davidb, 7 years ago

More careful set of metadata fields indexed

  • Property svn:executable set to *
File size: 9.2 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.IOException;
4import java.util.ArrayList;
5import java.util.HashMap;
6import java.util.Iterator;
7
8import org.apache.hadoop.io.Text;
9import org.apache.spark.api.java.function.FlatMapFunction;
10import org.apache.spark.api.java.function.Function;
11import org.apache.spark.api.java.function.VoidFunction;
12import org.apache.spark.util.DoubleAccumulator;
13import org.json.JSONArray;
14import org.json.JSONObject;
15
16/*
17class PagedJSON implements Function<String, Boolean> {
18
19 private static final long serialVersionUID = 1L;
20
21 public Boolean call(String s) { return s.contains("a"); }
22}
23 */
24
25
26//public class PerVolumeJSON implements VoidFunction<String>
27public class PerVolumeJSON implements Function<Text,Integer>
28{
29 private static final long serialVersionUID = 1L;
30 protected String _input_dir;
31 protected String _whitelist_filename;
32 protected String _langmap_directory;
33
34 protected final ArrayList<String> _solr_endpoints;
35 protected final int _solr_endpoints_len;
36
37 //protected String _solr_url;
38 protected String _output_dir;
39
40 protected int _verbosity;
41
42 protected WhitelistBloomFilter _whitelist_bloomfilter;
43 protected UniversalPOSLangMap _universal_langmap;
44
45 boolean _icu_tokenize;
46 boolean _strict_file_io;
47
48 public PerVolumeJSON(String input_dir, String whitelist_filename, String langmap_directory,
49 ArrayList<String> solr_endpoints, String output_dir, int verbosity,
50 boolean icu_tokenize, boolean strict_file_io)
51 {
52 System.out.println("*** PerVolumeJSON Constructor, langmap_directory = " + langmap_directory);
53
54 _input_dir = input_dir;
55 _whitelist_filename = whitelist_filename;
56 _langmap_directory = langmap_directory;
57
58 _solr_endpoints = solr_endpoints;
59 _solr_endpoints_len = solr_endpoints.size();
60
61 //_solr_url = solr_url;
62 _output_dir = output_dir;
63 _verbosity = verbosity;
64
65 _icu_tokenize = icu_tokenize;
66 _strict_file_io = strict_file_io;
67
68 _whitelist_bloomfilter = null;
69 _universal_langmap = null;
70 }
71
72
73 public Integer call(Text json_text) throws IOException
74
75 {
76 if (_whitelist_filename != null) {
77
78 synchronized (_whitelist_filename) {
79 if (_whitelist_bloomfilter == null) {
80
81 _whitelist_bloomfilter = new WhitelistBloomFilter(_whitelist_filename,true);
82 }
83 }
84 }
85
86 if (_langmap_directory != null) {
87
88 synchronized (_langmap_directory) {
89 if (_universal_langmap == null) {
90 _universal_langmap = new UniversalPOSLangMap(_langmap_directory);
91 }
92 }
93 }
94
95 int ef_num_pages = 0;
96
97 String solr_url = null;
98 if (_solr_endpoints_len > 0) {
99 int random_choice = (int)(_solr_endpoints_len * Math.random());
100 solr_url = _solr_endpoints.get(random_choice);
101 }
102
103 try {
104
105
106 JSONObject extracted_feature_record = new JSONObject(json_text.toString());
107
108 if (extracted_feature_record != null) {
109 String volume_id = extracted_feature_record.getString("id");
110
111 JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
112 //String title= ef_metadata.getString("title");
113
114 //
115 // Top-level metadata Solr doc
116 //
117 JSONObject solr_add_metadata_doc_json = SolrDocJSON.generateToplevelMetadataSolrDocJSON(volume_id,ef_metadata);
118 if (solr_add_metadata_doc_json != null) {
119
120 if ((_verbosity >=2)) {
121 System.out.println("==================");
122 System.out.println("Metadata JSON: " + solr_add_metadata_doc_json.toString());
123 System.out.println("==================");
124 }
125
126 if (solr_url != null) {
127
128 if ((_verbosity >=2) ) {
129 System.out.println("==================");
130 System.out.println("Posting to: " + solr_url);
131 System.out.println("==================");
132 }
133 SolrDocJSON.postSolrDoc(solr_url, solr_add_metadata_doc_json, volume_id, "top-level-metadata");
134 }
135 }
136
137 //
138 // Now move on to POS extracted features per-page
139 //
140 boolean index_pages = true;
141 if (index_pages) {
142
143 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
144
145 int ef_page_count = ef_features.getInt("pageCount");
146
147 if (_verbosity >= 1) {
148 System.out.println("Processing: " + volume_id);
149 System.out.println(" pageCount = " + ef_page_count);
150 }
151
152 JSONArray ef_pages = ef_features.getJSONArray("pages");
153 ef_num_pages = ef_pages.length();
154
155
156 for (int i = 0; i < ef_page_count; i++) {
157 String formatted_i = String.format("page-%06d", i);
158 String page_id = volume_id + "." + formatted_i;
159
160 if (_verbosity >= 2) {
161 System.out.println(" Page: " + page_id);
162 }
163
164
165 JSONObject ef_page = ef_pages.getJSONObject(i);
166
167 if (ef_page != null) {
168 // Convert to Solr add form
169 JSONObject solr_add_doc_json
170 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id, ef_page, _whitelist_bloomfilter, _universal_langmap, _icu_tokenize);
171
172
173 if ((_verbosity >=2) && (i==20)) {
174 System.out.println("==================");
175 System.out.println("Sample output Solr add JSON [page 20]: " + solr_add_doc_json.toString());
176 System.out.println("==================");
177 }
178
179
180 if (solr_url != null) {
181 SolrDocJSON.postSolrDoc(solr_url, solr_add_doc_json,
182 volume_id, page_id);
183 }
184 }
185 else {
186 System.err.println("Skipping: " + page_id);
187 }
188
189 }
190 }
191 }
192 }
193 catch (Exception e) {
194 if (_strict_file_io) {
195 throw e;
196 }
197 else {
198 e.printStackTrace();
199 }
200 }
201
202 return ef_num_pages;
203
204 }
205
206 /*
207 //public void call(String json_file_in) throws IOException
208 public Integer call(String json_file_in) throws IOException
209
210 {
211 if ((_whitelist_filename != null) && (_whitelist_bloomfilter == null)) {
212 _whitelist_bloomfilter = new WhitelistBloomFilter(_whitelist_filename,true);
213 }
214
215 int ef_num_pages = 0;
216
217 ArrayList<String> ids = new ArrayList<String>(); // want it to be non-null so can return valid iterator
218
219 String full_json_file_in = _input_dir + "/" + json_file_in;
220 JSONObject extracted_feature_record = JSONClusterFileIO.readJSONFile(full_json_file_in);
221
222 if (extracted_feature_record != null) {
223 String volume_id = extracted_feature_record.getString("id");
224
225 //JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
226 //String title= ef_metadata.getString("title");
227
228 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
229
230 int ef_page_count = ef_features.getInt("pageCount");
231
232 if (_verbosity >= 1) {
233 System.out.println("Processing: " + json_file_in);
234 System.out.println(" pageCount = " + ef_page_count);
235 }
236
237 JSONArray ef_pages = ef_features.getJSONArray("pages");
238 ef_num_pages = ef_pages.length();
239
240 // Make directory for page-level JSON output
241 String json_dir = ClusterFileIO.removeSuffix(json_file_in,".json.bz2");
242 String page_json_dir = json_dir + "/pages";
243
244 if (_output_dir != null) {
245 ClusterFileIO.createDirectoryAll(_output_dir + "/" + page_json_dir);
246 }
247
248 ids = new ArrayList<String>(ef_num_pages);
249 for (int i = 0; i < ef_page_count; i++) {
250 String formatted_i = String.format("page-%06d", i);
251 String page_id = volume_id + "." + formatted_i;
252
253 if (_verbosity >= 2) {
254 System.out.println(" Page: " + page_id);
255 }
256
257 String output_json_bz2 = page_json_dir +"/" + formatted_i + ".json.bz2";
258 ids.add(page_id);
259
260 if (_verbosity >=2) {
261 if (i==0) {
262 System.out.println("Sample output JSON page file [i=0]: " + output_json_bz2);
263 }
264 }
265 JSONObject ef_page = ef_pages.getJSONObject(i);
266
267 if (ef_page != null) {
268 // Convert to Solr add form
269 JSONObject solr_add_doc_json
270 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id, ef_page, _whitelist_bloomfilter, _icu_tokenize);
271
272
273 if ((_verbosity >=2) && (i==20)) {
274 System.out.println("==================");
275 System.out.println("Sample output Solr add JSON [page 20]: " + solr_add_doc_json.toString());
276 System.out.println("==================");
277 }
278
279
280 if (_solr_url != null) {
281 if ((_verbosity >=2) && (i==20)) {
282 System.out.println("==================");
283 System.out.println("Posting to: " + _solr_url);
284 System.out.println("==================");
285 }
286 SolrDocJSON.postSolrDoc(_solr_url, solr_add_doc_json);
287 }
288
289 if (_output_dir != null) {
290 if ((_verbosity >=2) && (i==20)) {
291 System.out.println("==================");
292 System.out.println("Saving to: " + _output_dir);
293 System.out.println("==================");
294 }
295 SolrDocJSON.saveSolrDoc(solr_add_doc_json, _output_dir + "/" + output_json_bz2);
296 }
297 }
298 else {
299 System.err.println("Skipping: " + page_id);
300 }
301
302 }
303 }
304 else {
305 // File did not exist, or could not be parsed
306 String mess = "Failed to read in bzipped JSON file '" + full_json_file_in + "'";
307 if (_strict_file_io) {
308 throw new IOException(mess);
309 }
310 else {
311 System.err.println("Warning: " + mess);
312 System.out.println("Warning: " + mess);
313 }
314 }
315
316 return ef_num_pages;
317
318 }
319 */
320}
321
Note: See TracBrowser for help on using the repository browser.