source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForSolrIngest.java@ 31374

Last change on this file since 31374 was 31374, checked in by davidb, 7 years ago

simplified command line usage

  • Property svn:executable set to *
File size: 12.5 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import java.util.ArrayList;
9
10import org.apache.commons.cli.*;
11import org.apache.hadoop.io.Text;
12import org.apache.spark.api.java.*;
13import org.apache.spark.util.DoubleAccumulator;
14import org.apache.spark.util.LongAccumulator;
15import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
16import org.json.JSONObject;
17import org.apache.spark.SparkConf;
18
19public class ProcessForSolrIngest implements Serializable
20{
21 private static final long serialVersionUID = 1L;
22
23 // Following details on number of partitions to use given in
24 // "Parallelized collections" section of:
25 // https://spark.apache.org/docs/2.0.1/programming-guide.html
26 //
27 // For a more detailed discussion see:
28 // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
29
30 protected static final int DEFAULT_NUM_CORES = 10;
31 protected static final int MINIMUM_NUM_PARTITIONS = 10*DEFAULT_NUM_CORES;
32
33 protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
34
35 protected String _input_dir;
36 //protected String _json_list_filename;
37 protected String _whitelist_filename;
38 protected String _solr_url;
39 protected String _output_dir;
40
41 protected int _verbosity;
42
43 public ProcessForSolrIngest(String input_dir, /*String json_list_filename,*/
44 String solr_url, String output_dir, int verbosity)
45 {
46 _input_dir = input_dir;
47 //_json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
48
49 boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
50 _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
51
52 _solr_url = solr_url;
53 _output_dir = output_dir;
54 _verbosity = verbosity;
55 }
56
57 protected String generateSparkAppName(String exec_mode)
58 {
59 String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
60 //spark_app_name += " [" + _json_list_filename + "]";
61
62 if (_solr_url != null) {
63 spark_app_name += " solr_url=" + _solr_url;
64 }
65
66 if (_output_dir != null) {
67 spark_app_name += " output_dir=" + _output_dir;
68 }
69
70 return spark_app_name;
71 }
72
73 public ArrayList<String> extrapolateSolrEndpoints()
74 {
75 ArrayList<String> solr_endpoints = new ArrayList<String>();
76
77 if (_solr_url != null) {
78 String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
79 if (solr_cloud_nodes != null) {
80 String [] cloud_nodes = solr_cloud_nodes.split(",");
81 for (String cn : cloud_nodes) {
82 String solr_endpoint = _solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
83 solr_endpoints.add(solr_endpoint);
84 }
85 }
86 else {
87 solr_endpoints.add(_solr_url);
88 }
89 }
90
91 return solr_endpoints;
92 }
93
94 public void execPerVolumeSequenceFile()
95 {
96 String spark_app_name = generateSparkAppName("Per Volume");
97
98 SparkConf conf = new SparkConf().setAppName(spark_app_name);
99 JavaSparkContext jsc = new JavaSparkContext(conf);
100 jsc.hadoopConfiguration().set("io.compression.codec.bzip2.library", "java-builtin");
101
102 //String packed_sequence_path = "hdfs:///user/capitanu/data/packed-ef";
103 String packed_sequence_path = _input_dir;
104
105 JavaPairRDD<Text, Text> input_pair_rdd = jsc.sequenceFile(packed_sequence_path, Text.class, Text.class);
106
107 JavaRDD<Text> json_text_rdd = input_pair_rdd.map(item -> item._2);
108
109 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
110 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
111
112 PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
113 _solr_url,_output_dir,_verbosity,
114 icu_tokenize,strict_file_io);
115
116
117 JavaRDD<Integer> per_volume_page_count = json_text_rdd.map(per_vol_json);
118
119 Integer num_page_ids = per_volume_page_count.reduce((a, b) -> a + b);
120
121 System.out.println("");
122 System.out.println("############");
123 System.out.println("# Number of page ids: " + num_page_ids);
124 System.out.println("############");
125 System.out.println("");
126
127 jsc.close();
128
129 }
130
131 /*
132 public void execPerVolume()
133 {
134 String spark_app_name = generateSparkAppName("Per Volume");
135
136 SparkConf conf = new SparkConf().setAppName(spark_app_name);
137 JavaSparkContext jsc = new JavaSparkContext(conf);
138
139 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
140 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
141
142 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
143
144 long num_volumes = json_list_data.count();
145 double per_vol = 100.0/(double)num_volumes;
146
147 int num_partitions = (int)(num_volumes/files_per_partition)+1;
148 if (num_partitions < MINIMUM_NUM_PARTITIONS) {
149 num_partitions = MINIMUM_NUM_PARTITIONS;
150 }
151 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
152
153 DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
154
155 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
156 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
157
158 PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
159 _solr_url,_output_dir,_verbosity, progress_accum,per_vol,
160 icu_tokenize,strict_file_io);
161
162 //json_list_data_rp.foreach(per_vol_json);
163 JavaRDD<String> per_page_ids = json_list_data_rp.flatMap(per_vol_json);
164 long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol
165
166 //long num_ids = num_volumes;
167
168 System.out.println("");
169 System.out.println("############");
170 System.out.println("# Number of page ids: " + num_page_ids);
171 System.out.println("############");
172 System.out.println("");
173
174 jsc.close();
175 }
176 */
177
178 /*
179 public void execPerPage()
180 {
181 String spark_app_name = generateSparkAppName("Per Page");
182
183 SparkConf conf = new SparkConf().setAppName(spark_app_name);
184 JavaSparkContext jsc = new JavaSparkContext(conf);
185
186
187
188 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
189 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
190
191 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
192
193 long num_volumes = json_list_data.count();
194 double per_vol = 100.0/(double)num_volumes;
195
196 int num_partitions = (int)(num_volumes/files_per_partition)+1;
197 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
198
199 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
200
201 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
202 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
203
204 PerPageJSONFlatmap paged_solr_json_flatmap
205 = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
206 _solr_url,_output_dir,_verbosity,
207 per_vol_progress_accum,per_vol,
208 icu_tokenize,strict_file_io);
209 //JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap).cache();
210 JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap);
211
212 //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
213
214 LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
215 ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
216
217
218 PerPageJSONMap paged_json_id_map
219 = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
220 per_page_progress_accum,1);
221 JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
222
223
224 long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
225
226 System.out.println("");
227 System.out.println("############");
228 System.out.println("# Number of page ids: " + num_page_ids);
229 System.out.println("############");
230 System.out.println("");
231
232
233 //if (_output_dir != null) {
234 //String rdd_save_file = "rdd-solr-json-page-files";
235 //json_ids.saveAsTextFile(rdd_save_file);
236 //System.out.println("############");
237 //System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
238 //System.out.println("# " + rdd_save_file);
239 //System.out.println("############");
240 //System.out.println("");
241 //}
242
243
244 jsc.close();
245 }
246*/
247
248
249
250 public static void print_usage(HelpFormatter formatter, Options options)
251 {
252 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
253 }
254
255 public static void main(String[] args) {
256 Options options = new Options();
257
258 Option verbosity_opt = new Option("v", "verbosity", true,
259 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
260 verbosity_opt.setRequired(false);
261 options.addOption(verbosity_opt);
262
263 Option properties_opt = new Option("p", "properties", true,
264 "Read in the specified Java properties file");
265 properties_opt.setRequired(false);
266 options.addOption(properties_opt);
267
268 Option output_dir_opt = new Option("o", "output-dir", true,
269 "If specified, save BZipped Solr JSON files to this directory");
270 output_dir_opt.setRequired(false);
271 options.addOption(output_dir_opt);
272
273 Option solr_url_opt = new Option("u", "solr-url", true,
274 "If specified, the URL to post the Solr JSON data to");
275 solr_url_opt.setRequired(false);
276 options.addOption(solr_url_opt);
277
278 Option read_only_opt = new Option("r", "read-only", false,
279 "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
280 read_only_opt.setRequired(false);
281 options.addOption(read_only_opt);
282
283 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
284 CommandLineParser parser = new GnuParser();
285 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
286
287 HelpFormatter formatter = new HelpFormatter();
288 CommandLine cmd = null;
289
290 try {
291 cmd = parser.parse(options, args);
292 }
293 catch (ParseException e) {
294 System.err.println(e.getMessage());
295 print_usage(formatter,options);
296 System.exit(1);
297 }
298
299
300 String verbosity_str = cmd.getOptionValue("verbosity","1");
301 int verbosity = Integer.parseInt(verbosity_str);
302
303 String property_filename = cmd.getOptionValue("properties",null);
304
305 String output_dir = cmd.getOptionValue("output-dir",null);
306 String solr_url = cmd.getOptionValue("solr-url",null);
307 boolean read_only = cmd.hasOption("read-only");
308
309 String[] filtered_args = cmd.getArgs();
310
311 if (filtered_args.length != 1) {
312 print_usage(formatter,options);
313 System.exit(1);
314 }
315
316 if (property_filename != null) {
317 try {
318 FileInputStream fis = new FileInputStream(property_filename);
319 BufferedInputStream bis = new BufferedInputStream(fis);
320
321 System.getProperties().load(bis);
322 }
323 catch (FileNotFoundException e) {
324 // TODO Auto-generated catch block
325 e.printStackTrace();
326 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
327 }
328 catch (IOException e) {
329 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
330 }
331 }
332
333 if (!read_only && ((output_dir == null) && (solr_url==null))) {
334 System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
335 print_usage(formatter,options);
336 System.exit(1);
337 }
338 if (read_only) {
339 // For this case, need to ensure solr-url and output-dir are null
340 output_dir = null;
341 solr_url = null;
342 }
343
344 String input_dir = filtered_args[0];
345 //String json_list_filename = filtered_args[1];
346
347 ProcessForSolrIngest prep_for_ingest
348 = new ProcessForSolrIngest(input_dir,/*json_list_filename,*/solr_url,output_dir,verbosity);
349
350 prep_for_ingest.execPerVolumeSequenceFile();
351
352 /*
353 String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
354 if (process_ef_json_mode.equals("per-volume")) {
355 prep_for_ingest.execPerVolume();
356 }
357 else {
358 prep_for_ingest.execPerPage();
359 }*/
360 }
361}
Note: See TracBrowser for help on using the repository browser.