source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForSolrIngest.java@ 31375

Last change on this file since 31375 was 31375, checked in by davidb, 7 years ago

Initial cut at including POS information to solr index

  • Property svn:executable set to *
File size: 12.4 KB
RevLine 
[30998]1package org.hathitrust.extractedfeatures;
2
[31024]3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
[30998]7import java.io.Serializable;
[31028]8import java.util.ArrayList;
9
[30998]10import org.apache.commons.cli.*;
[31372]11import org.apache.hadoop.io.Text;
[30998]12import org.apache.spark.api.java.*;
13import org.apache.spark.util.DoubleAccumulator;
[31028]14import org.apache.spark.util.LongAccumulator;
[31007]15import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
[31001]16import org.json.JSONObject;
[30998]17import org.apache.spark.SparkConf;
18
19public class ProcessForSolrIngest implements Serializable
20{
21 private static final long serialVersionUID = 1L;
22
[31276]23 protected static final int DEFAULT_NUM_CORES = 10;
[31277]24 protected static final int MINIMUM_NUM_PARTITIONS = 10*DEFAULT_NUM_CORES;
[30998]25
[31266]26 protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
27
[30998]28 protected String _input_dir;
[31374]29 //protected String _json_list_filename;
[31220]30 protected String _whitelist_filename;
[31375]31 protected String _langmap_directory;
32
[30998]33 protected String _solr_url;
34 protected String _output_dir;
35
36 protected int _verbosity;
37
[31374]38 public ProcessForSolrIngest(String input_dir, /*String json_list_filename,*/
[30998]39 String solr_url, String output_dir, int verbosity)
40 {
41 _input_dir = input_dir;
[31374]42 //_json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
[30998]43
[31220]44 boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
45 _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
46
[31375]47 boolean use_langmap = Boolean.getBoolean("wcsa-ef-ingest.use-langmap");
48 _langmap_directory = (use_langmap) ? System.getProperty("wcsa-ef-ingest.langmap-directory") : null;
49
50
[30998]51 _solr_url = solr_url;
52 _output_dir = output_dir;
53 _verbosity = verbosity;
54 }
55
[31010]56 protected String generateSparkAppName(String exec_mode)
57 {
58 String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
[31374]59 //spark_app_name += " [" + _json_list_filename + "]";
[30998]60
[31008]61 if (_solr_url != null) {
[31010]62 spark_app_name += " solr_url=" + _solr_url;
[31008]63 }
64
65 if (_output_dir != null) {
[31010]66 spark_app_name += " output_dir=" + _output_dir;
[31008]67 }
68
[31010]69 return spark_app_name;
70 }
71
[31045]72 public ArrayList<String> extrapolateSolrEndpoints()
73 {
74 ArrayList<String> solr_endpoints = new ArrayList<String>();
75
76 if (_solr_url != null) {
[31100]77 String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
78 if (solr_cloud_nodes != null) {
79 String [] cloud_nodes = solr_cloud_nodes.split(",");
80 for (String cn : cloud_nodes) {
81 String solr_endpoint = _solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
[31045]82 solr_endpoints.add(solr_endpoint);
83 }
84 }
85 else {
86 solr_endpoints.add(_solr_url);
87 }
88 }
89
90 return solr_endpoints;
91 }
92
[31372]93 public void execPerVolumeSequenceFile()
94 {
95 String spark_app_name = generateSparkAppName("Per Volume");
96
97 SparkConf conf = new SparkConf().setAppName(spark_app_name);
98 JavaSparkContext jsc = new JavaSparkContext(conf);
99 jsc.hadoopConfiguration().set("io.compression.codec.bzip2.library", "java-builtin");
100
101 //String packed_sequence_path = "hdfs:///user/capitanu/data/packed-ef";
[31374]102 String packed_sequence_path = _input_dir;
[31372]103
104 JavaPairRDD<Text, Text> input_pair_rdd = jsc.sequenceFile(packed_sequence_path, Text.class, Text.class);
105
106 JavaRDD<Text> json_text_rdd = input_pair_rdd.map(item -> item._2);
107
108 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
109 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
110
[31375]111 PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename, _langmap_directory,
[31372]112 _solr_url,_output_dir,_verbosity,
113 icu_tokenize,strict_file_io);
114
115
116 JavaRDD<Integer> per_volume_page_count = json_text_rdd.map(per_vol_json);
117
118 Integer num_page_ids = per_volume_page_count.reduce((a, b) -> a + b);
119
120 System.out.println("");
121 System.out.println("############");
122 System.out.println("# Number of page ids: " + num_page_ids);
123 System.out.println("############");
124 System.out.println("");
125
126 jsc.close();
127
128 }
129
130 /*
[31010]131 public void execPerVolume()
132 {
133 String spark_app_name = generateSparkAppName("Per Volume");
[31008]134
[30998]135 SparkConf conf = new SparkConf().setAppName(spark_app_name);
136 JavaSparkContext jsc = new JavaSparkContext(conf);
[31266]137
138 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
[31271]139 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
[30998]140
[31266]141 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
[30998]142
143 long num_volumes = json_list_data.count();
144 double per_vol = 100.0/(double)num_volumes;
145
[31266]146 int num_partitions = (int)(num_volumes/files_per_partition)+1;
[31276]147 if (num_partitions < MINIMUM_NUM_PARTITIONS) {
148 num_partitions = MINIMUM_NUM_PARTITIONS;
149 }
[31266]150 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
[31091]151
[30998]152 DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
153
[31252]154 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
155 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
156
[31220]157 PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
[31252]158 _solr_url,_output_dir,_verbosity, progress_accum,per_vol,
159 icu_tokenize,strict_file_io);
[31001]160
[31266]161 //json_list_data_rp.foreach(per_vol_json);
162 JavaRDD<String> per_page_ids = json_list_data_rp.flatMap(per_vol_json);
[31269]163 long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol
[31001]164
[31269]165 //long num_ids = num_volumes;
[31001]166
167 System.out.println("");
168 System.out.println("############");
[31266]169 System.out.println("# Number of page ids: " + num_page_ids);
[31001]170 System.out.println("############");
171 System.out.println("");
172
173 jsc.close();
174 }
[31372]175 */
[31028]176
[31372]177 /*
[31001]178 public void execPerPage()
179 {
[31010]180 String spark_app_name = generateSparkAppName("Per Page");
[31008]181
[31001]182 SparkConf conf = new SparkConf().setAppName(spark_app_name);
183 JavaSparkContext jsc = new JavaSparkContext(conf);
184
[31266]185
[31372]186
[31266]187 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
188 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
189
190 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
[31001]191
192 long num_volumes = json_list_data.count();
193 double per_vol = 100.0/(double)num_volumes;
194
[31266]195 int num_partitions = (int)(num_volumes/files_per_partition)+1;
196 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
[31091]197
[31013]198 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
[31001]199
[31252]200 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
[31045]201 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
[31252]202
[31013]203 PerPageJSONFlatmap paged_solr_json_flatmap
[31220]204 = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
205 _solr_url,_output_dir,_verbosity,
[31045]206 per_vol_progress_accum,per_vol,
[31252]207 icu_tokenize,strict_file_io);
[31266]208 //JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap).cache();
209 JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap);
[31001]210
[31013]211 //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
212
[31028]213 LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
214 ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
[31013]215
[31045]216
[31013]217 PerPageJSONMap paged_json_id_map
[31045]218 = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
219 per_page_progress_accum,1);
[31011]220 JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
[30998]221
222
[31011]223 long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
[30998]224
225 System.out.println("");
226 System.out.println("############");
[31001]227 System.out.println("# Number of page ids: " + num_page_ids);
[30998]228 System.out.println("############");
229 System.out.println("");
230
231
[31372]232 //if (_output_dir != null) {
233 //String rdd_save_file = "rdd-solr-json-page-files";
234 //json_ids.saveAsTextFile(rdd_save_file);
235 //System.out.println("############");
236 //System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
237 //System.out.println("# " + rdd_save_file);
238 //System.out.println("############");
239 //System.out.println("");
240 //}
241
242
[30998]243 jsc.close();
244 }
[31372]245*/
[31001]246
247
248
[30998]249 public static void print_usage(HelpFormatter formatter, Options options)
250 {
251 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
252 }
253
254 public static void main(String[] args) {
255 Options options = new Options();
256
257 Option verbosity_opt = new Option("v", "verbosity", true,
258 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
259 verbosity_opt.setRequired(false);
260 options.addOption(verbosity_opt);
261
[31026]262 Option properties_opt = new Option("p", "properties", true,
263 "Read in the specified Java properties file");
[31024]264 properties_opt.setRequired(false);
265 options.addOption(properties_opt);
266
[30998]267 Option output_dir_opt = new Option("o", "output-dir", true,
268 "If specified, save BZipped Solr JSON files to this directory");
269 output_dir_opt.setRequired(false);
270 options.addOption(output_dir_opt);
271
272 Option solr_url_opt = new Option("u", "solr-url", true,
273 "If specified, the URL to post the Solr JSON data to");
274 solr_url_opt.setRequired(false);
275 options.addOption(solr_url_opt);
276
277 Option read_only_opt = new Option("r", "read-only", false,
278 "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
279 read_only_opt.setRequired(false);
280 options.addOption(read_only_opt);
281
282 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
283 CommandLineParser parser = new GnuParser();
284 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
285
286 HelpFormatter formatter = new HelpFormatter();
287 CommandLine cmd = null;
288
289 try {
290 cmd = parser.parse(options, args);
291 }
292 catch (ParseException e) {
293 System.err.println(e.getMessage());
294 print_usage(formatter,options);
295 System.exit(1);
296 }
297
298
[31028]299 String verbosity_str = cmd.getOptionValue("verbosity","1");
[30998]300 int verbosity = Integer.parseInt(verbosity_str);
301
[31024]302 String property_filename = cmd.getOptionValue("properties",null);
303
[30998]304 String output_dir = cmd.getOptionValue("output-dir",null);
305 String solr_url = cmd.getOptionValue("solr-url",null);
306 boolean read_only = cmd.hasOption("read-only");
307
308 String[] filtered_args = cmd.getArgs();
309
[31374]310 if (filtered_args.length != 1) {
[30998]311 print_usage(formatter,options);
312 System.exit(1);
313 }
[31024]314
315 if (property_filename != null) {
316 try {
317 FileInputStream fis = new FileInputStream(property_filename);
318 BufferedInputStream bis = new BufferedInputStream(fis);
319
320 System.getProperties().load(bis);
321 }
322 catch (FileNotFoundException e) {
323 // TODO Auto-generated catch block
324 e.printStackTrace();
325 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
326 }
327 catch (IOException e) {
328 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
329 }
330 }
[30998]331
332 if (!read_only && ((output_dir == null) && (solr_url==null))) {
333 System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
334 print_usage(formatter,options);
335 System.exit(1);
336 }
337 if (read_only) {
338 // For this case, need to ensure solr-url and output-dir are null
339 output_dir = null;
340 solr_url = null;
341 }
342
343 String input_dir = filtered_args[0];
[31374]344 //String json_list_filename = filtered_args[1];
[30998]345
346 ProcessForSolrIngest prep_for_ingest
[31374]347 = new ProcessForSolrIngest(input_dir,/*json_list_filename,*/solr_url,output_dir,verbosity);
[31220]348
[31372]349 prep_for_ingest.execPerVolumeSequenceFile();
350
351 /*
[31028]352 String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
353 if (process_ef_json_mode.equals("per-volume")) {
[31025]354 prep_for_ingest.execPerVolume();
355 }
356 else {
357 prep_for_ingest.execPerPage();
[31372]358 }*/
[30998]359 }
360}
Note: See TracBrowser for help on using the repository browser.