1 | package org.hathitrust.extractedfeatures;
|
---|
2 |
|
---|
3 | import java.io.BufferedInputStream;
|
---|
4 | import java.io.FileInputStream;
|
---|
5 | import java.io.FileNotFoundException;
|
---|
6 | import java.io.IOException;
|
---|
7 | import java.io.Serializable;
|
---|
8 | import java.util.ArrayList;
|
---|
9 |
|
---|
10 | import org.apache.commons.cli.*;
|
---|
11 |
|
---|
12 | import org.apache.spark.api.java.*;
|
---|
13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
14 | import org.apache.spark.util.LongAccumulator;
|
---|
15 | import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
|
---|
16 | import org.json.JSONObject;
|
---|
17 | import org.apache.spark.SparkConf;
|
---|
18 |
|
---|
19 | public class ProcessForSolrIngest implements Serializable
|
---|
20 | {
|
---|
21 | private static final long serialVersionUID = 1L;
|
---|
22 |
|
---|
23 | // Following details on number of partitions to use given in
|
---|
24 | // "Parallelized collections" section of:
|
---|
25 | // https://spark.apache.org/docs/2.0.1/programming-guide.html
|
---|
26 | //
|
---|
27 | // For a more detailed discussion see:
|
---|
28 | // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
|
---|
29 |
|
---|
30 | public static final int NUM_CORES = 6;
|
---|
31 | public static final int NUM_PARTITIONS = 2*NUM_CORES; // default would appear to be 2
|
---|
32 |
|
---|
33 | protected String _input_dir;
|
---|
34 | protected String _json_list_filename;
|
---|
35 | protected String _solr_url;
|
---|
36 | protected String _output_dir;
|
---|
37 |
|
---|
38 | protected int _verbosity;
|
---|
39 |
|
---|
40 | public ProcessForSolrIngest(String input_dir, String json_list_filename,
|
---|
41 | String solr_url, String output_dir, int verbosity)
|
---|
42 | {
|
---|
43 | _input_dir = input_dir;
|
---|
44 | _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
|
---|
45 |
|
---|
46 | _solr_url = solr_url;
|
---|
47 | _output_dir = output_dir;
|
---|
48 | _verbosity = verbosity;
|
---|
49 | }
|
---|
50 |
|
---|
51 | protected String generateSparkAppName(String exec_mode)
|
---|
52 | {
|
---|
53 | String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
|
---|
54 | spark_app_name += " [" + _json_list_filename + "]";
|
---|
55 |
|
---|
56 | if (_solr_url != null) {
|
---|
57 | spark_app_name += " solr_url=" + _solr_url;
|
---|
58 | }
|
---|
59 |
|
---|
60 | if (_output_dir != null) {
|
---|
61 | spark_app_name += " output_dir=" + _output_dir;
|
---|
62 | }
|
---|
63 |
|
---|
64 | return spark_app_name;
|
---|
65 | }
|
---|
66 |
|
---|
67 | public ArrayList<String> extrapolateSolrEndpoints()
|
---|
68 | {
|
---|
69 | ArrayList<String> solr_endpoints = new ArrayList<String>();
|
---|
70 |
|
---|
71 | if (_solr_url != null) {
|
---|
72 | String solr_endpoint_ips = System.getProperty("wcsa-ef-ingest.solr-endpoint-ips",null);
|
---|
73 | if (solr_endpoint_ips != null) {
|
---|
74 | String [] ips = solr_endpoint_ips.split(",");
|
---|
75 | for (String ip : ips) {
|
---|
76 | String solr_endpoint = _solr_url.replaceFirst("//.*?:", "//"+ip+":");
|
---|
77 | solr_endpoints.add(solr_endpoint);
|
---|
78 | }
|
---|
79 | }
|
---|
80 | else {
|
---|
81 | solr_endpoints.add(_solr_url);
|
---|
82 | }
|
---|
83 | }
|
---|
84 |
|
---|
85 | return solr_endpoints;
|
---|
86 | }
|
---|
87 |
|
---|
88 | public void execPerVolume()
|
---|
89 | {
|
---|
90 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
91 |
|
---|
92 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
93 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
94 |
|
---|
95 | if (_verbosity >= 2) {
|
---|
96 | System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
|
---|
97 | System.out.println("Default Parallelism: " + jsc.defaultParallelism());
|
---|
98 | }
|
---|
99 |
|
---|
100 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,NUM_PARTITIONS).cache();
|
---|
101 |
|
---|
102 | long num_volumes = json_list_data.count();
|
---|
103 | double per_vol = 100.0/(double)num_volumes;
|
---|
104 |
|
---|
105 | DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
|
---|
106 |
|
---|
107 | System.err.println();
|
---|
108 | System.err.println();
|
---|
109 | System.err.println();
|
---|
110 | System.err.println("****##### _input_dir = " + _input_dir);
|
---|
111 | System.err.println();
|
---|
112 | System.err.println();
|
---|
113 | System.err.println();
|
---|
114 |
|
---|
115 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_solr_url,_output_dir,_verbosity, progress_accum,per_vol);
|
---|
116 |
|
---|
117 | json_list_data.foreach(per_vol_json);
|
---|
118 |
|
---|
119 | long num_ids = num_volumes;
|
---|
120 |
|
---|
121 | System.out.println("");
|
---|
122 | System.out.println("############");
|
---|
123 | System.out.println("# Number of volume ids: " + num_ids);
|
---|
124 | System.out.println("############");
|
---|
125 | System.out.println("");
|
---|
126 |
|
---|
127 | jsc.close();
|
---|
128 | }
|
---|
129 |
|
---|
130 |
|
---|
131 |
|
---|
132 | public void execPerPage()
|
---|
133 | {
|
---|
134 | String spark_app_name = generateSparkAppName("Per Page");
|
---|
135 |
|
---|
136 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
137 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
138 |
|
---|
139 | if (_verbosity >= 2) {
|
---|
140 | System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
|
---|
141 | System.out.println("Default Parallelism: " + jsc.defaultParallelism());
|
---|
142 | }
|
---|
143 |
|
---|
144 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,NUM_PARTITIONS).cache();
|
---|
145 |
|
---|
146 | long num_volumes = json_list_data.count();
|
---|
147 | double per_vol = 100.0/(double)num_volumes;
|
---|
148 |
|
---|
149 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
150 |
|
---|
151 | //String strict_file_io_str = System.getProperty("wcsa-ef-ingest.strict-file-io","true");
|
---|
152 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
153 |
|
---|
154 | PerPageJSONFlatmap paged_solr_json_flatmap
|
---|
155 | = new PerPageJSONFlatmap(_input_dir,_solr_url,_output_dir,_verbosity,
|
---|
156 | per_vol_progress_accum,per_vol,
|
---|
157 | strict_file_io);
|
---|
158 | JavaRDD<JSONObject> per_page_jsonobjects = json_list_data.flatMap(paged_solr_json_flatmap).cache();
|
---|
159 |
|
---|
160 | //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
161 |
|
---|
162 | LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
|
---|
163 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
|
---|
164 |
|
---|
165 |
|
---|
166 | PerPageJSONMap paged_json_id_map
|
---|
167 | = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
|
---|
168 | per_page_progress_accum,1);
|
---|
169 | JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
|
---|
170 |
|
---|
171 | /*
|
---|
172 | System.out.println("");
|
---|
173 | System.out.println("############");
|
---|
174 | System.out.println("# Progress Accumulator: " + progress_accum.value());
|
---|
175 | System.out.println("############");
|
---|
176 | System.out.println("");
|
---|
177 | */
|
---|
178 |
|
---|
179 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
|
---|
180 |
|
---|
181 | System.out.println("");
|
---|
182 | System.out.println("############");
|
---|
183 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
184 | System.out.println("############");
|
---|
185 | System.out.println("");
|
---|
186 |
|
---|
187 | /*
|
---|
188 | if (_output_dir != null) {
|
---|
189 | String rdd_save_file = "rdd-solr-json-page-files";
|
---|
190 | json_ids.saveAsTextFile(rdd_save_file);
|
---|
191 | System.out.println("############");
|
---|
192 | System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
|
---|
193 | System.out.println("# " + rdd_save_file);
|
---|
194 | System.out.println("############");
|
---|
195 | System.out.println("");
|
---|
196 | }
|
---|
197 | */
|
---|
198 |
|
---|
199 | jsc.close();
|
---|
200 | }
|
---|
201 |
|
---|
202 |
|
---|
203 |
|
---|
204 |
|
---|
205 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
206 | {
|
---|
207 | formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
|
---|
208 | }
|
---|
209 |
|
---|
210 | public static void main(String[] args) {
|
---|
211 | Options options = new Options();
|
---|
212 |
|
---|
213 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
214 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
215 | verbosity_opt.setRequired(false);
|
---|
216 | options.addOption(verbosity_opt);
|
---|
217 |
|
---|
218 | Option properties_opt = new Option("p", "properties", true,
|
---|
219 | "Read in the specified Java properties file");
|
---|
220 | properties_opt.setRequired(false);
|
---|
221 | options.addOption(properties_opt);
|
---|
222 |
|
---|
223 | Option output_dir_opt = new Option("o", "output-dir", true,
|
---|
224 | "If specified, save BZipped Solr JSON files to this directory");
|
---|
225 | output_dir_opt.setRequired(false);
|
---|
226 | options.addOption(output_dir_opt);
|
---|
227 |
|
---|
228 | Option solr_url_opt = new Option("u", "solr-url", true,
|
---|
229 | "If specified, the URL to post the Solr JSON data to");
|
---|
230 | solr_url_opt.setRequired(false);
|
---|
231 | options.addOption(solr_url_opt);
|
---|
232 |
|
---|
233 | Option read_only_opt = new Option("r", "read-only", false,
|
---|
234 | "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
|
---|
235 | read_only_opt.setRequired(false);
|
---|
236 | options.addOption(read_only_opt);
|
---|
237 |
|
---|
238 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
239 | CommandLineParser parser = new GnuParser();
|
---|
240 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
241 |
|
---|
242 | HelpFormatter formatter = new HelpFormatter();
|
---|
243 | CommandLine cmd = null;
|
---|
244 |
|
---|
245 | try {
|
---|
246 | cmd = parser.parse(options, args);
|
---|
247 | }
|
---|
248 | catch (ParseException e) {
|
---|
249 | System.err.println(e.getMessage());
|
---|
250 | print_usage(formatter,options);
|
---|
251 | System.exit(1);
|
---|
252 | }
|
---|
253 |
|
---|
254 |
|
---|
255 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
256 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
257 |
|
---|
258 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
259 |
|
---|
260 | String output_dir = cmd.getOptionValue("output-dir",null);
|
---|
261 | String solr_url = cmd.getOptionValue("solr-url",null);
|
---|
262 | boolean read_only = cmd.hasOption("read-only");
|
---|
263 |
|
---|
264 | String[] filtered_args = cmd.getArgs();
|
---|
265 |
|
---|
266 | if (filtered_args.length != 2) {
|
---|
267 | print_usage(formatter,options);
|
---|
268 | System.exit(1);
|
---|
269 | }
|
---|
270 |
|
---|
271 | if (property_filename != null) {
|
---|
272 | try {
|
---|
273 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
274 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
275 |
|
---|
276 | System.getProperties().load(bis);
|
---|
277 | }
|
---|
278 | catch (FileNotFoundException e) {
|
---|
279 | // TODO Auto-generated catch block
|
---|
280 | e.printStackTrace();
|
---|
281 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
282 | }
|
---|
283 | catch (IOException e) {
|
---|
284 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
285 | }
|
---|
286 | }
|
---|
287 |
|
---|
288 | if (!read_only && ((output_dir == null) && (solr_url==null))) {
|
---|
289 | System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
|
---|
290 | print_usage(formatter,options);
|
---|
291 | System.exit(1);
|
---|
292 | }
|
---|
293 | if (read_only) {
|
---|
294 | // For this case, need to ensure solr-url and output-dir are null
|
---|
295 | output_dir = null;
|
---|
296 | solr_url = null;
|
---|
297 | }
|
---|
298 |
|
---|
299 | String input_dir = filtered_args[0];
|
---|
300 | String json_list_filename = filtered_args[1];
|
---|
301 |
|
---|
302 | ProcessForSolrIngest prep_for_ingest
|
---|
303 | = new ProcessForSolrIngest(input_dir,json_list_filename,solr_url,output_dir,verbosity);
|
---|
304 |
|
---|
305 | String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
|
---|
306 | if (process_ef_json_mode.equals("per-volume")) {
|
---|
307 | prep_for_ingest.execPerVolume();
|
---|
308 | }
|
---|
309 | else {
|
---|
310 | prep_for_ingest.execPerPage();
|
---|
311 | }
|
---|
312 | }
|
---|
313 | }
|
---|