source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForSolrIngest.java@ 31226

Last change on this file since 31226 was 31220, checked in by davidb, 7 years ago

Use of whitelist Bloom filter added to words going into Solr index

  • Property svn:executable set to *
File size: 10.9 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import java.util.ArrayList;
9
10import org.apache.commons.cli.*;
11
12import org.apache.spark.api.java.*;
13import org.apache.spark.util.DoubleAccumulator;
14import org.apache.spark.util.LongAccumulator;
15import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
16import org.json.JSONObject;
17import org.apache.spark.SparkConf;
18
19public class ProcessForSolrIngest implements Serializable
20{
21 private static final long serialVersionUID = 1L;
22
23 // Following details on number of partitions to use given in
24 // "Parallelized collections" section of:
25 // https://spark.apache.org/docs/2.0.1/programming-guide.html
26 //
27 // For a more detailed discussion see:
28 // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
29
30 protected static final int DEFAULT_NUM_CORES = 6;
31 protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
32
33 protected String _input_dir;
34 protected String _json_list_filename;
35 protected String _whitelist_filename;
36 protected String _solr_url;
37 protected String _output_dir;
38
39 protected int _verbosity;
40
41 public ProcessForSolrIngest(String input_dir, String json_list_filename,
42 String solr_url, String output_dir, int verbosity)
43 {
44 _input_dir = input_dir;
45 _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
46
47 boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
48 _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
49
50 _solr_url = solr_url;
51 _output_dir = output_dir;
52 _verbosity = verbosity;
53 }
54
55 protected String generateSparkAppName(String exec_mode)
56 {
57 String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
58 spark_app_name += " [" + _json_list_filename + "]";
59
60 if (_solr_url != null) {
61 spark_app_name += " solr_url=" + _solr_url;
62 }
63
64 if (_output_dir != null) {
65 spark_app_name += " output_dir=" + _output_dir;
66 }
67
68 return spark_app_name;
69 }
70
71 public ArrayList<String> extrapolateSolrEndpoints()
72 {
73 ArrayList<String> solr_endpoints = new ArrayList<String>();
74
75 if (_solr_url != null) {
76 String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
77 if (solr_cloud_nodes != null) {
78 String [] cloud_nodes = solr_cloud_nodes.split(",");
79 for (String cn : cloud_nodes) {
80 String solr_endpoint = _solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
81 solr_endpoints.add(solr_endpoint);
82 }
83 }
84 else {
85 solr_endpoints.add(_solr_url);
86 }
87 }
88
89 return solr_endpoints;
90 }
91
92 public void execPerVolume()
93 {
94 String spark_app_name = generateSparkAppName("Per Volume");
95
96 SparkConf conf = new SparkConf().setAppName(spark_app_name);
97 JavaSparkContext jsc = new JavaSparkContext(conf);
98
99 if (_verbosity >= 2) {
100 System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
101 System.out.println("Default Parallelism: " + jsc.defaultParallelism());
102 }
103
104 int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
105
106 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,num_partitions).cache();
107
108 long num_volumes = json_list_data.count();
109 double per_vol = 100.0/(double)num_volumes;
110
111 //JavaRDD<String> json_list_data_rp = json_list_data.repartition((int)(num_volumes/100));
112
113 DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
114
115 System.err.println();
116 System.err.println();
117 System.err.println();
118 System.err.println("****##### _input_dir = " + _input_dir);
119 System.err.println();
120 System.err.println();
121 System.err.println();
122
123 PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
124 _solr_url,_output_dir,_verbosity, progress_accum,per_vol);
125
126 json_list_data.foreach(per_vol_json);
127
128 long num_ids = num_volumes;
129
130 System.out.println("");
131 System.out.println("############");
132 System.out.println("# Number of volume ids: " + num_ids);
133 System.out.println("############");
134 System.out.println("");
135
136 jsc.close();
137 }
138
139
140
141 public void execPerPage()
142 {
143 String spark_app_name = generateSparkAppName("Per Page");
144
145 SparkConf conf = new SparkConf().setAppName(spark_app_name);
146 JavaSparkContext jsc = new JavaSparkContext(conf);
147
148 if (_verbosity >= 2) {
149 System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
150 System.out.println("Default Parallelism: " + jsc.defaultParallelism());
151 }
152
153 int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
154 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,num_partitions).cache();
155
156 long num_volumes = json_list_data.count();
157 double per_vol = 100.0/(double)num_volumes;
158
159 //JavaRDD<String> json_list_data_rp = json_list_data.repartition((int)(num_volumes/100));
160
161 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
162
163 //String strict_file_io_str = System.getProperty("wcsa-ef-ingest.strict-file-io","true");
164 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
165
166 PerPageJSONFlatmap paged_solr_json_flatmap
167 = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
168 _solr_url,_output_dir,_verbosity,
169 per_vol_progress_accum,per_vol,
170 strict_file_io);
171 JavaRDD<JSONObject> per_page_jsonobjects = json_list_data.flatMap(paged_solr_json_flatmap).cache();
172
173 //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
174
175 LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
176 ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
177
178
179 PerPageJSONMap paged_json_id_map
180 = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
181 per_page_progress_accum,1);
182 JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
183
184/*
185 System.out.println("");
186 System.out.println("############");
187 System.out.println("# Progress Accumulator: " + progress_accum.value());
188 System.out.println("############");
189 System.out.println("");
190*/
191
192 long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
193
194 System.out.println("");
195 System.out.println("############");
196 System.out.println("# Number of page ids: " + num_page_ids);
197 System.out.println("############");
198 System.out.println("");
199
200 /*
201 if (_output_dir != null) {
202 String rdd_save_file = "rdd-solr-json-page-files";
203 json_ids.saveAsTextFile(rdd_save_file);
204 System.out.println("############");
205 System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
206 System.out.println("# " + rdd_save_file);
207 System.out.println("############");
208 System.out.println("");
209 }
210 */
211
212 jsc.close();
213 }
214
215
216
217
218 public static void print_usage(HelpFormatter formatter, Options options)
219 {
220 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
221 }
222
223 public static void main(String[] args) {
224 Options options = new Options();
225
226 Option verbosity_opt = new Option("v", "verbosity", true,
227 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
228 verbosity_opt.setRequired(false);
229 options.addOption(verbosity_opt);
230
231 Option properties_opt = new Option("p", "properties", true,
232 "Read in the specified Java properties file");
233 properties_opt.setRequired(false);
234 options.addOption(properties_opt);
235
236 Option output_dir_opt = new Option("o", "output-dir", true,
237 "If specified, save BZipped Solr JSON files to this directory");
238 output_dir_opt.setRequired(false);
239 options.addOption(output_dir_opt);
240
241 Option solr_url_opt = new Option("u", "solr-url", true,
242 "If specified, the URL to post the Solr JSON data to");
243 solr_url_opt.setRequired(false);
244 options.addOption(solr_url_opt);
245
246 Option read_only_opt = new Option("r", "read-only", false,
247 "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
248 read_only_opt.setRequired(false);
249 options.addOption(read_only_opt);
250
251 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
252 CommandLineParser parser = new GnuParser();
253 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
254
255 HelpFormatter formatter = new HelpFormatter();
256 CommandLine cmd = null;
257
258 try {
259 cmd = parser.parse(options, args);
260 }
261 catch (ParseException e) {
262 System.err.println(e.getMessage());
263 print_usage(formatter,options);
264 System.exit(1);
265 }
266
267
268 String verbosity_str = cmd.getOptionValue("verbosity","1");
269 int verbosity = Integer.parseInt(verbosity_str);
270
271 String property_filename = cmd.getOptionValue("properties",null);
272
273 String output_dir = cmd.getOptionValue("output-dir",null);
274 String solr_url = cmd.getOptionValue("solr-url",null);
275 boolean read_only = cmd.hasOption("read-only");
276
277 String[] filtered_args = cmd.getArgs();
278
279 if (filtered_args.length != 2) {
280 print_usage(formatter,options);
281 System.exit(1);
282 }
283
284 if (property_filename != null) {
285 try {
286 FileInputStream fis = new FileInputStream(property_filename);
287 BufferedInputStream bis = new BufferedInputStream(fis);
288
289 System.getProperties().load(bis);
290 }
291 catch (FileNotFoundException e) {
292 // TODO Auto-generated catch block
293 e.printStackTrace();
294 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
295 }
296 catch (IOException e) {
297 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
298 }
299 }
300
301 if (!read_only && ((output_dir == null) && (solr_url==null))) {
302 System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
303 print_usage(formatter,options);
304 System.exit(1);
305 }
306 if (read_only) {
307 // For this case, need to ensure solr-url and output-dir are null
308 output_dir = null;
309 solr_url = null;
310 }
311
312 String input_dir = filtered_args[0];
313 String json_list_filename = filtered_args[1];
314
315 ProcessForSolrIngest prep_for_ingest
316 = new ProcessForSolrIngest(input_dir,json_list_filename,solr_url,output_dir,verbosity);
317
318 String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
319 if (process_ef_json_mode.equals("per-volume")) {
320 prep_for_ingest.execPerVolume();
321 }
322 else {
323 prep_for_ingest.execPerPage();
324 }
325 }
326}
Note: See TracBrowser for help on using the repository browser.