1 | package org.hathitrust.extractedfeatures;
|
---|
2 |
|
---|
3 | import java.io.BufferedInputStream;
|
---|
4 | import java.io.FileInputStream;
|
---|
5 | import java.io.FileNotFoundException;
|
---|
6 | import java.io.IOException;
|
---|
7 | import java.io.Serializable;
|
---|
8 | import java.util.ArrayList;
|
---|
9 |
|
---|
10 | import org.apache.commons.cli.*;
|
---|
11 | import org.apache.hadoop.io.Text;
|
---|
12 | import org.apache.spark.api.java.*;
|
---|
13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
14 | import org.apache.spark.util.LongAccumulator;
|
---|
15 | import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
|
---|
16 | import org.json.JSONObject;
|
---|
17 | import org.apache.spark.SparkConf;
|
---|
18 |
|
---|
19 | public class ProcessForSolrIngest implements Serializable
|
---|
20 | {
|
---|
21 | private static final long serialVersionUID = 1L;
|
---|
22 |
|
---|
23 | protected static final int DEFAULT_NUM_CORES = 10;
|
---|
24 | protected static final int MINIMUM_NUM_PARTITIONS = 10*DEFAULT_NUM_CORES;
|
---|
25 |
|
---|
26 | protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
|
---|
27 |
|
---|
28 | protected String _input_dir;
|
---|
29 | protected String _solr_base_url;
|
---|
30 | protected String _solr_collection;
|
---|
31 |
|
---|
32 | protected String _whitelist_filename;
|
---|
33 | protected String _langmap_directory;
|
---|
34 |
|
---|
35 | //protected String _solr_url;
|
---|
36 | protected String _output_dir;
|
---|
37 |
|
---|
38 | protected int _verbosity;
|
---|
39 |
|
---|
40 | public ProcessForSolrIngest(String input_dir, String solr_collection,
|
---|
41 | String solr_base_url, String output_dir, int verbosity)
|
---|
42 | {
|
---|
43 | _input_dir = input_dir;
|
---|
44 | _solr_collection = solr_collection;
|
---|
45 |
|
---|
46 | boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
|
---|
47 | _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
|
---|
48 |
|
---|
49 | boolean use_langmap = Boolean.getBoolean("wcsa-ef-ingest.use-langmap");
|
---|
50 | _langmap_directory = (use_langmap) ? System.getProperty("wcsa-ef-ingest.langmap-directory") : null;
|
---|
51 |
|
---|
52 |
|
---|
53 | _solr_base_url = solr_base_url;
|
---|
54 | _output_dir = output_dir;
|
---|
55 | _verbosity = verbosity;
|
---|
56 | }
|
---|
57 |
|
---|
58 | protected String generateSparkAppName(String exec_mode)
|
---|
59 | {
|
---|
60 | String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
|
---|
61 | spark_app_name += " [" + _solr_collection + "]";
|
---|
62 |
|
---|
63 | if (_solr_base_url != null) {
|
---|
64 | spark_app_name += " solr_base_url=" + _solr_base_url;
|
---|
65 | }
|
---|
66 |
|
---|
67 | if (_output_dir != null) {
|
---|
68 | spark_app_name += " output_dir=" + _output_dir;
|
---|
69 | }
|
---|
70 |
|
---|
71 | return spark_app_name;
|
---|
72 | }
|
---|
73 |
|
---|
74 | public ArrayList<String> extrapolateSolrEndpoints(String solr_collection)
|
---|
75 | {
|
---|
76 | ArrayList<String> solr_endpoints = new ArrayList<String>();
|
---|
77 |
|
---|
78 | if (_solr_base_url != null) {
|
---|
79 | String solr_url = _solr_base_url + "/" + solr_collection + "/update";
|
---|
80 |
|
---|
81 | String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
|
---|
82 | if (solr_cloud_nodes != null) {
|
---|
83 | String [] cloud_nodes = solr_cloud_nodes.split(",");
|
---|
84 | for (String cn : cloud_nodes) {
|
---|
85 | String solr_endpoint = solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
|
---|
86 | solr_endpoints.add(solr_endpoint);
|
---|
87 | }
|
---|
88 | }
|
---|
89 | else {
|
---|
90 | solr_endpoints.add(solr_url);
|
---|
91 | }
|
---|
92 | }
|
---|
93 |
|
---|
94 | return solr_endpoints;
|
---|
95 | }
|
---|
96 |
|
---|
97 | public void execPerVolumeSequenceFile()
|
---|
98 | {
|
---|
99 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
100 |
|
---|
101 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
102 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
103 | jsc.hadoopConfiguration().set("io.compression.codec.bzip2.library", "java-builtin");
|
---|
104 |
|
---|
105 | //String packed_sequence_path = "hdfs:///user/capitanu/data/packed-ef";
|
---|
106 | String packed_sequence_path = _input_dir;
|
---|
107 |
|
---|
108 | JavaPairRDD<Text, Text> input_pair_rdd = jsc.sequenceFile(packed_sequence_path, Text.class, Text.class);
|
---|
109 | //JavaPairRDD<String, String> input_pair_rdd = jsc.wholeTextFiles(packed_sequence_path);
|
---|
110 |
|
---|
111 | //JavaPairRDD<Text, Text> input_pair_sampled_rdd = input_pair_rdd.sample(false,0.5,42);
|
---|
112 |
|
---|
113 | //JavaRDD<Text> json_text_rdd = input_pair_sampled_rdd.map(item -> item._2);
|
---|
114 | //JavaRDD<Text> json_text_rdd = input_pair_rdd.map(item -> new Text(item._2));
|
---|
115 | JavaRDD<Text> json_text_rdd = input_pair_rdd.map(item -> item._2);
|
---|
116 |
|
---|
117 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
118 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
119 |
|
---|
120 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints(_solr_collection);
|
---|
121 |
|
---|
122 | System.out.println("*** away to create PerVolumeJSON class, _langmap_directory = " + _langmap_directory);
|
---|
123 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename, _langmap_directory,
|
---|
124 | solr_endpoints,_output_dir,_verbosity,
|
---|
125 | icu_tokenize,strict_file_io);
|
---|
126 |
|
---|
127 | JavaRDD<Integer> per_volume_page_count = json_text_rdd.map(per_vol_json);
|
---|
128 |
|
---|
129 | //Integer num_page_ids = per_volume_page_count.reduce((a, b) -> a + b);
|
---|
130 | long num_vol_ids = per_volume_page_count.count();
|
---|
131 |
|
---|
132 | System.out.println("");
|
---|
133 | System.out.println("############");
|
---|
134 | //System.out.println("# Number of page ids: " + num_page_ids);
|
---|
135 | System.out.println("# Number of volume ids: " + num_vol_ids);
|
---|
136 | System.out.println("############");
|
---|
137 | System.out.println("");
|
---|
138 |
|
---|
139 | jsc.close();
|
---|
140 |
|
---|
141 | }
|
---|
142 |
|
---|
143 | /*
|
---|
144 | public void execPerVolume()
|
---|
145 | {
|
---|
146 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
147 |
|
---|
148 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
149 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
150 |
|
---|
151 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
152 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
|
---|
153 |
|
---|
154 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
155 |
|
---|
156 | long num_volumes = json_list_data.count();
|
---|
157 | double per_vol = 100.0/(double)num_volumes;
|
---|
158 |
|
---|
159 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
160 | if (num_partitions < MINIMUM_NUM_PARTITIONS) {
|
---|
161 | num_partitions = MINIMUM_NUM_PARTITIONS;
|
---|
162 | }
|
---|
163 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
164 |
|
---|
165 | DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
|
---|
166 |
|
---|
167 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
168 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
169 |
|
---|
170 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
|
---|
171 | _solr_url,_output_dir,_verbosity, progress_accum,per_vol,
|
---|
172 | icu_tokenize,strict_file_io);
|
---|
173 |
|
---|
174 | //json_list_data_rp.foreach(per_vol_json);
|
---|
175 | JavaRDD<String> per_page_ids = json_list_data_rp.flatMap(per_vol_json);
|
---|
176 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
177 |
|
---|
178 | //long num_ids = num_volumes;
|
---|
179 |
|
---|
180 | System.out.println("");
|
---|
181 | System.out.println("############");
|
---|
182 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
183 | System.out.println("############");
|
---|
184 | System.out.println("");
|
---|
185 |
|
---|
186 | jsc.close();
|
---|
187 | }
|
---|
188 | */
|
---|
189 |
|
---|
190 | /*
|
---|
191 | public void execPerPage()
|
---|
192 | {
|
---|
193 | String spark_app_name = generateSparkAppName("Per Page");
|
---|
194 |
|
---|
195 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
196 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
197 |
|
---|
198 |
|
---|
199 |
|
---|
200 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
201 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
|
---|
202 |
|
---|
203 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
204 |
|
---|
205 | long num_volumes = json_list_data.count();
|
---|
206 | double per_vol = 100.0/(double)num_volumes;
|
---|
207 |
|
---|
208 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
209 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
210 |
|
---|
211 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
212 |
|
---|
213 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
214 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
215 |
|
---|
216 | PerPageJSONFlatmap paged_solr_json_flatmap
|
---|
217 | = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
|
---|
218 | _solr_url,_output_dir,_verbosity,
|
---|
219 | per_vol_progress_accum,per_vol,
|
---|
220 | icu_tokenize,strict_file_io);
|
---|
221 | //JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap).cache();
|
---|
222 | JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap);
|
---|
223 |
|
---|
224 | //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
225 |
|
---|
226 | LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
|
---|
227 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
|
---|
228 |
|
---|
229 |
|
---|
230 | PerPageJSONMap paged_json_id_map
|
---|
231 | = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
|
---|
232 | per_page_progress_accum,1);
|
---|
233 | JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
|
---|
234 |
|
---|
235 |
|
---|
236 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
|
---|
237 |
|
---|
238 | System.out.println("");
|
---|
239 | System.out.println("############");
|
---|
240 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
241 | System.out.println("############");
|
---|
242 | System.out.println("");
|
---|
243 |
|
---|
244 |
|
---|
245 | //if (_output_dir != null) {
|
---|
246 | //String rdd_save_file = "rdd-solr-json-page-files";
|
---|
247 | //json_ids.saveAsTextFile(rdd_save_file);
|
---|
248 | //System.out.println("############");
|
---|
249 | //System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
|
---|
250 | //System.out.println("# " + rdd_save_file);
|
---|
251 | //System.out.println("############");
|
---|
252 | //System.out.println("");
|
---|
253 | //}
|
---|
254 |
|
---|
255 |
|
---|
256 | jsc.close();
|
---|
257 | }
|
---|
258 | */
|
---|
259 |
|
---|
260 |
|
---|
261 |
|
---|
262 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
263 | {
|
---|
264 | formatter.printHelp("RUN.bash [options] input-dir solr-collection", options);
|
---|
265 | }
|
---|
266 |
|
---|
267 | public static void main(String[] args) {
|
---|
268 | Options options = new Options();
|
---|
269 |
|
---|
270 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
271 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
272 | verbosity_opt.setRequired(false);
|
---|
273 | options.addOption(verbosity_opt);
|
---|
274 |
|
---|
275 | Option properties_opt = new Option("p", "properties", true,
|
---|
276 | "Read in the specified Java properties file");
|
---|
277 | properties_opt.setRequired(false);
|
---|
278 | options.addOption(properties_opt);
|
---|
279 |
|
---|
280 | Option output_dir_opt = new Option("o", "output-dir", true,
|
---|
281 | "If specified, save BZipped Solr JSON files to this directory");
|
---|
282 | output_dir_opt.setRequired(false);
|
---|
283 | options.addOption(output_dir_opt);
|
---|
284 |
|
---|
285 | Option solr_base_url_opt = new Option("u", "solr-base-url", true,
|
---|
286 | "If specified, the base URL to post the Solr JSON data to");
|
---|
287 | solr_base_url_opt.setRequired(false);
|
---|
288 | options.addOption(solr_base_url_opt);
|
---|
289 |
|
---|
290 | Option read_only_opt = new Option("r", "read-only", false,
|
---|
291 | "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
|
---|
292 | read_only_opt.setRequired(false);
|
---|
293 | options.addOption(read_only_opt);
|
---|
294 |
|
---|
295 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
296 | CommandLineParser parser = new GnuParser();
|
---|
297 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
298 |
|
---|
299 | HelpFormatter formatter = new HelpFormatter();
|
---|
300 | CommandLine cmd = null;
|
---|
301 |
|
---|
302 | try {
|
---|
303 | cmd = parser.parse(options, args);
|
---|
304 | }
|
---|
305 | catch (ParseException e) {
|
---|
306 | System.err.println(e.getMessage());
|
---|
307 | print_usage(formatter,options);
|
---|
308 | System.exit(1);
|
---|
309 | }
|
---|
310 |
|
---|
311 |
|
---|
312 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
313 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
314 |
|
---|
315 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
316 |
|
---|
317 | String output_dir = cmd.getOptionValue("output-dir",null);
|
---|
318 | String solr_base_url = cmd.getOptionValue("solr-base-url",null);
|
---|
319 | boolean read_only = cmd.hasOption("read-only");
|
---|
320 |
|
---|
321 | String[] filtered_args = cmd.getArgs();
|
---|
322 |
|
---|
323 | if (filtered_args.length != 2) {
|
---|
324 | print_usage(formatter,options);
|
---|
325 | System.exit(1);
|
---|
326 | }
|
---|
327 |
|
---|
328 | if (property_filename != null) {
|
---|
329 | try {
|
---|
330 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
331 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
332 |
|
---|
333 | System.getProperties().load(bis);
|
---|
334 | }
|
---|
335 | catch (FileNotFoundException e) {
|
---|
336 | // TODO Auto-generated catch block
|
---|
337 | e.printStackTrace();
|
---|
338 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
339 | }
|
---|
340 | catch (IOException e) {
|
---|
341 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
342 | }
|
---|
343 | }
|
---|
344 |
|
---|
345 | if (!read_only && ((output_dir == null) && (solr_base_url==null))) {
|
---|
346 | System.err.println("Need to specify either --solr-base-url or --output-dir otherwise generated files are not ingested/saved");
|
---|
347 | print_usage(formatter,options);
|
---|
348 | System.exit(1);
|
---|
349 | }
|
---|
350 | if (read_only) {
|
---|
351 | // For this case, need to ensure solr-url and output-dir are null
|
---|
352 | output_dir = null;
|
---|
353 | solr_base_url = null;
|
---|
354 | }
|
---|
355 |
|
---|
356 | String input_dir = filtered_args[0];
|
---|
357 | String solr_collection = filtered_args[1];
|
---|
358 |
|
---|
359 | ProcessForSolrIngest prep_for_ingest
|
---|
360 | = new ProcessForSolrIngest(input_dir,solr_collection,solr_base_url,output_dir,verbosity);
|
---|
361 |
|
---|
362 | prep_for_ingest.execPerVolumeSequenceFile();
|
---|
363 |
|
---|
364 | /*
|
---|
365 | String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
|
---|
366 | if (process_ef_json_mode.equals("per-volume")) {
|
---|
367 | prep_for_ingest.execPerVolume();
|
---|
368 | }
|
---|
369 | else {
|
---|
370 | prep_for_ingest.execPerPage();
|
---|
371 | }*/
|
---|
372 | }
|
---|
373 | }
|
---|