source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/PerVolumeJSON.java@ 31266

Last change on this file since 31266 was 31266, checked in by davidb, 7 years ago

Rekindling of per-volume approach. Also some tweaking to verbosity debug printing in per-page

  • Property svn:executable set to *
File size: 5.2 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.IOException;
4import java.util.ArrayList;
5import java.util.Iterator;
6
7import org.apache.spark.api.java.function.FlatMapFunction;
8import org.apache.spark.api.java.function.VoidFunction;
9import org.apache.spark.util.DoubleAccumulator;
10import org.json.JSONArray;
11import org.json.JSONObject;
12
13/*
14class PagedJSON implements Function<String, Boolean> {
15
16 private static final long serialVersionUID = 1L;
17
18 public Boolean call(String s) { return s.contains("a"); }
19}
20 */
21
22
23//public class PerVolumeJSON implements VoidFunction<String>
24public class PerVolumeJSON implements FlatMapFunction<String,String>
25{
26 private static final long serialVersionUID = 1L;
27 protected String _input_dir;
28 protected String _whitelist_filename;
29
30 protected String _solr_url;
31 protected String _output_dir;
32
33 protected int _verbosity;
34
35 protected WhitelistBloomFilter _whitelist_bloomfilter;
36
37 protected DoubleAccumulator _progress_accum;
38 protected double _progress_step;
39
40 boolean _icu_tokenize;
41 boolean _strict_file_io;
42
43 public PerVolumeJSON(String input_dir, String whitelist_filename,
44 String solr_url, String output_dir, int verbosity,
45 DoubleAccumulator progress_accum, double progress_step,
46 boolean icu_tokenize, boolean strict_file_io)
47 {
48 _input_dir = input_dir;
49 _whitelist_filename = whitelist_filename;
50
51 _solr_url = solr_url;
52 _output_dir = output_dir;
53 _verbosity = verbosity;
54
55 _progress_accum = progress_accum;
56 _progress_step = progress_step;
57
58 _icu_tokenize = icu_tokenize;
59 _strict_file_io = strict_file_io;
60
61 _whitelist_bloomfilter = null;
62 }
63
64 //public void call(String json_file_in) throws IOException
65 public Iterator<String> call(String json_file_in) throws IOException
66
67 {
68 if ((_whitelist_filename != null) && (_whitelist_bloomfilter == null)) {
69 _whitelist_bloomfilter = new WhitelistBloomFilter(_whitelist_filename,true);
70 }
71
72 ArrayList<String> ids = null;
73
74 String full_json_file_in = _input_dir + "/" + json_file_in;
75 JSONObject extracted_feature_record = JSONClusterFileIO.readJSONFile(full_json_file_in);
76
77 if (extracted_feature_record != null) {
78 String volume_id = extracted_feature_record.getString("id");
79
80 //JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
81 //String title= ef_metadata.getString("title");
82
83 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
84
85 int ef_page_count = ef_features.getInt("pageCount");
86
87 if (_verbosity >= 1) {
88 System.out.println("Processing: " + json_file_in);
89 System.out.println(" pageCount = " + ef_page_count);
90 }
91
92 JSONArray ef_pages = ef_features.getJSONArray("pages");
93 int ef_num_pages = ef_pages.length();
94
95 // Make directory for page-level JSON output
96 String json_dir = ClusterFileIO.removeSuffix(json_file_in,".json.bz2");
97 String page_json_dir = json_dir + "/pages";
98
99 if (_output_dir != null) {
100 ClusterFileIO.createDirectoryAll(_output_dir + "/" + page_json_dir);
101 }
102
103 ids = new ArrayList<String>(ef_num_pages);
104 for (int i = 0; i < ef_page_count; i++) {
105 String formatted_i = String.format("page-%06d", i);
106 String page_id = volume_id + "." + formatted_i;
107
108 if (_verbosity >= 2) {
109 System.out.println(" Page: " + page_id);
110 }
111
112 String output_json_bz2 = page_json_dir +"/" + formatted_i + ".json.bz2";
113 ids.add(page_id);
114
115 if (i==0) {
116 System.out.println("Sample output JSON page file: " + output_json_bz2);
117 }
118
119 JSONObject ef_page = ef_pages.getJSONObject(i);
120
121 if (ef_page != null) {
122 // Convert to Solr add form
123 JSONObject solr_add_doc_json
124 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id, ef_page, _whitelist_bloomfilter, _icu_tokenize);
125
126
127 if ((_verbosity >=2) && (i==20)) {
128 System.out.println("==================");
129 System.out.println("Sample output Solr add JSON [page 20]: " + solr_add_doc_json.toString());
130 System.out.println("==================");
131 }
132
133
134 if (_solr_url != null) {
135 if ((_verbosity >=2) && (i==20)) {
136 System.out.println("==================");
137 System.out.println("Posting to: " + _solr_url);
138 System.out.println("==================");
139 }
140 SolrDocJSON.postSolrDoc(_solr_url, solr_add_doc_json);
141 }
142
143 if (_output_dir != null) {
144 if ((_verbosity >=2) && (i==20)) {
145 System.out.println("==================");
146 System.out.println("Saving to: " + _output_dir);
147 System.out.println("==================");
148 }
149 SolrDocJSON.saveSolrDoc(solr_add_doc_json, _output_dir + "/" + output_json_bz2);
150 }
151 }
152 else {
153 System.err.println("Skipping: " + page_id);
154 }
155
156 }
157 }
158 else {
159 // File did not exist, or could not be parsed
160 String mess = "Failed to read in bzipped JSON file '" + full_json_file_in + "'";
161 if (_strict_file_io) {
162 throw new IOException(mess);
163 }
164 else {
165 System.err.println("Warning: " + mess);
166 System.out.println("Warning: " + mess);
167 }
168 }
169
170 //ids.add(volume_id);
171 _progress_accum.add(_progress_step);
172
173 return ids.iterator();
174 }
175}
176
Note: See TracBrowser for help on using the repository browser.