[30998] | 1 | package org.hathitrust.extractedfeatures;
|
---|
| 2 |
|
---|
[31024] | 3 | import java.io.BufferedInputStream;
|
---|
| 4 | import java.io.FileInputStream;
|
---|
| 5 | import java.io.FileNotFoundException;
|
---|
| 6 | import java.io.IOException;
|
---|
[30998] | 7 | import java.io.Serializable;
|
---|
[31028] | 8 | import java.util.ArrayList;
|
---|
| 9 |
|
---|
[30998] | 10 | import org.apache.commons.cli.*;
|
---|
| 11 |
|
---|
| 12 | import org.apache.spark.api.java.*;
|
---|
| 13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
[31028] | 14 | import org.apache.spark.util.LongAccumulator;
|
---|
[31007] | 15 | import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
|
---|
[31001] | 16 | import org.json.JSONObject;
|
---|
[30998] | 17 | import org.apache.spark.SparkConf;
|
---|
| 18 |
|
---|
| 19 | public class ProcessForSolrIngest implements Serializable
|
---|
| 20 | {
|
---|
| 21 | private static final long serialVersionUID = 1L;
|
---|
| 22 |
|
---|
| 23 | // Following details on number of partitions to use given in
|
---|
| 24 | // "Parallelized collections" section of:
|
---|
| 25 | // https://spark.apache.org/docs/2.0.1/programming-guide.html
|
---|
| 26 | //
|
---|
| 27 | // For a more detailed discussion see:
|
---|
| 28 | // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
|
---|
| 29 |
|
---|
[31266] | 30 | //protected static final int DEFAULT_NUM_CORES = 6;
|
---|
| 31 | //protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
|
---|
[30998] | 32 |
|
---|
[31266] | 33 | protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
|
---|
| 34 |
|
---|
[30998] | 35 | protected String _input_dir;
|
---|
| 36 | protected String _json_list_filename;
|
---|
[31220] | 37 | protected String _whitelist_filename;
|
---|
[30998] | 38 | protected String _solr_url;
|
---|
| 39 | protected String _output_dir;
|
---|
| 40 |
|
---|
| 41 | protected int _verbosity;
|
---|
| 42 |
|
---|
| 43 | public ProcessForSolrIngest(String input_dir, String json_list_filename,
|
---|
| 44 | String solr_url, String output_dir, int verbosity)
|
---|
| 45 | {
|
---|
| 46 | _input_dir = input_dir;
|
---|
| 47 | _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
|
---|
| 48 |
|
---|
[31220] | 49 | boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
|
---|
| 50 | _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
|
---|
| 51 |
|
---|
[30998] | 52 | _solr_url = solr_url;
|
---|
| 53 | _output_dir = output_dir;
|
---|
| 54 | _verbosity = verbosity;
|
---|
| 55 | }
|
---|
| 56 |
|
---|
[31010] | 57 | protected String generateSparkAppName(String exec_mode)
|
---|
| 58 | {
|
---|
| 59 | String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
|
---|
[30998] | 60 | spark_app_name += " [" + _json_list_filename + "]";
|
---|
| 61 |
|
---|
[31008] | 62 | if (_solr_url != null) {
|
---|
[31010] | 63 | spark_app_name += " solr_url=" + _solr_url;
|
---|
[31008] | 64 | }
|
---|
| 65 |
|
---|
| 66 | if (_output_dir != null) {
|
---|
[31010] | 67 | spark_app_name += " output_dir=" + _output_dir;
|
---|
[31008] | 68 | }
|
---|
| 69 |
|
---|
[31010] | 70 | return spark_app_name;
|
---|
| 71 | }
|
---|
| 72 |
|
---|
[31045] | 73 | public ArrayList<String> extrapolateSolrEndpoints()
|
---|
| 74 | {
|
---|
| 75 | ArrayList<String> solr_endpoints = new ArrayList<String>();
|
---|
| 76 |
|
---|
| 77 | if (_solr_url != null) {
|
---|
[31100] | 78 | String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
|
---|
| 79 | if (solr_cloud_nodes != null) {
|
---|
| 80 | String [] cloud_nodes = solr_cloud_nodes.split(",");
|
---|
| 81 | for (String cn : cloud_nodes) {
|
---|
| 82 | String solr_endpoint = _solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
|
---|
[31045] | 83 | solr_endpoints.add(solr_endpoint);
|
---|
| 84 | }
|
---|
| 85 | }
|
---|
| 86 | else {
|
---|
| 87 | solr_endpoints.add(_solr_url);
|
---|
| 88 | }
|
---|
| 89 | }
|
---|
| 90 |
|
---|
| 91 | return solr_endpoints;
|
---|
| 92 | }
|
---|
| 93 |
|
---|
[31010] | 94 | public void execPerVolume()
|
---|
| 95 | {
|
---|
| 96 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
[31008] | 97 |
|
---|
[30998] | 98 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 99 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
[31266] | 100 |
|
---|
| 101 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
| 102 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
|
---|
[30998] | 103 |
|
---|
[31266] | 104 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
[30998] | 105 |
|
---|
| 106 | long num_volumes = json_list_data.count();
|
---|
| 107 | double per_vol = 100.0/(double)num_volumes;
|
---|
| 108 |
|
---|
[31266] | 109 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
| 110 |
|
---|
| 111 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
[31091] | 112 |
|
---|
[30998] | 113 | DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
|
---|
| 114 |
|
---|
[31252] | 115 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
| 116 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
| 117 |
|
---|
[31220] | 118 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
|
---|
[31252] | 119 | _solr_url,_output_dir,_verbosity, progress_accum,per_vol,
|
---|
| 120 | icu_tokenize,strict_file_io);
|
---|
[31001] | 121 |
|
---|
[31266] | 122 | //json_list_data_rp.foreach(per_vol_json);
|
---|
| 123 | JavaRDD<String> per_page_ids = json_list_data_rp.flatMap(per_vol_json);
|
---|
[31269] | 124 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
[31001] | 125 |
|
---|
[31269] | 126 | //long num_ids = num_volumes;
|
---|
[31001] | 127 |
|
---|
| 128 | System.out.println("");
|
---|
| 129 | System.out.println("############");
|
---|
[31266] | 130 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
[31001] | 131 | System.out.println("############");
|
---|
| 132 | System.out.println("");
|
---|
| 133 |
|
---|
| 134 | jsc.close();
|
---|
| 135 | }
|
---|
[31028] | 136 |
|
---|
[31045] | 137 |
|
---|
| 138 |
|
---|
[31001] | 139 | public void execPerPage()
|
---|
| 140 | {
|
---|
[31010] | 141 | String spark_app_name = generateSparkAppName("Per Page");
|
---|
[31008] | 142 |
|
---|
[31001] | 143 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 144 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
| 145 |
|
---|
[31266] | 146 | /*
|
---|
[31001] | 147 | if (_verbosity >= 2) {
|
---|
| 148 | System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
|
---|
| 149 | System.out.println("Default Parallelism: " + jsc.defaultParallelism());
|
---|
| 150 | }
|
---|
[31266] | 151 | */
|
---|
| 152 |
|
---|
| 153 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
| 154 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
|
---|
| 155 |
|
---|
| 156 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
[31001] | 157 |
|
---|
| 158 | long num_volumes = json_list_data.count();
|
---|
| 159 | double per_vol = 100.0/(double)num_volumes;
|
---|
| 160 |
|
---|
[31266] | 161 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
| 162 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
[31091] | 163 |
|
---|
[31013] | 164 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
[31001] | 165 |
|
---|
[31252] | 166 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
[31045] | 167 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
[31252] | 168 |
|
---|
[31013] | 169 | PerPageJSONFlatmap paged_solr_json_flatmap
|
---|
[31220] | 170 | = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
|
---|
| 171 | _solr_url,_output_dir,_verbosity,
|
---|
[31045] | 172 | per_vol_progress_accum,per_vol,
|
---|
[31252] | 173 | icu_tokenize,strict_file_io);
|
---|
[31266] | 174 | //JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap).cache();
|
---|
| 175 | JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap);
|
---|
[31001] | 176 |
|
---|
[31013] | 177 | //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
| 178 |
|
---|
[31028] | 179 | LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
|
---|
| 180 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
|
---|
[31013] | 181 |
|
---|
[31045] | 182 |
|
---|
[31013] | 183 | PerPageJSONMap paged_json_id_map
|
---|
[31045] | 184 | = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
|
---|
| 185 | per_page_progress_accum,1);
|
---|
[31011] | 186 | JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
|
---|
[30998] | 187 |
|
---|
| 188 | /*
|
---|
| 189 | System.out.println("");
|
---|
| 190 | System.out.println("############");
|
---|
| 191 | System.out.println("# Progress Accumulator: " + progress_accum.value());
|
---|
| 192 | System.out.println("############");
|
---|
| 193 | System.out.println("");
|
---|
| 194 | */
|
---|
| 195 |
|
---|
[31011] | 196 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
|
---|
[30998] | 197 |
|
---|
| 198 | System.out.println("");
|
---|
| 199 | System.out.println("############");
|
---|
[31001] | 200 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
[30998] | 201 | System.out.println("############");
|
---|
| 202 | System.out.println("");
|
---|
| 203 |
|
---|
| 204 | /*
|
---|
| 205 | if (_output_dir != null) {
|
---|
| 206 | String rdd_save_file = "rdd-solr-json-page-files";
|
---|
| 207 | json_ids.saveAsTextFile(rdd_save_file);
|
---|
| 208 | System.out.println("############");
|
---|
| 209 | System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
|
---|
| 210 | System.out.println("# " + rdd_save_file);
|
---|
| 211 | System.out.println("############");
|
---|
| 212 | System.out.println("");
|
---|
| 213 | }
|
---|
| 214 | */
|
---|
| 215 |
|
---|
| 216 | jsc.close();
|
---|
| 217 | }
|
---|
| 218 |
|
---|
[31001] | 219 |
|
---|
| 220 |
|
---|
| 221 |
|
---|
[30998] | 222 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
| 223 | {
|
---|
| 224 | formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
|
---|
| 225 | }
|
---|
| 226 |
|
---|
| 227 | public static void main(String[] args) {
|
---|
| 228 | Options options = new Options();
|
---|
| 229 |
|
---|
| 230 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
| 231 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
| 232 | verbosity_opt.setRequired(false);
|
---|
| 233 | options.addOption(verbosity_opt);
|
---|
| 234 |
|
---|
[31026] | 235 | Option properties_opt = new Option("p", "properties", true,
|
---|
| 236 | "Read in the specified Java properties file");
|
---|
[31024] | 237 | properties_opt.setRequired(false);
|
---|
| 238 | options.addOption(properties_opt);
|
---|
| 239 |
|
---|
[30998] | 240 | Option output_dir_opt = new Option("o", "output-dir", true,
|
---|
| 241 | "If specified, save BZipped Solr JSON files to this directory");
|
---|
| 242 | output_dir_opt.setRequired(false);
|
---|
| 243 | options.addOption(output_dir_opt);
|
---|
| 244 |
|
---|
| 245 | Option solr_url_opt = new Option("u", "solr-url", true,
|
---|
| 246 | "If specified, the URL to post the Solr JSON data to");
|
---|
| 247 | solr_url_opt.setRequired(false);
|
---|
| 248 | options.addOption(solr_url_opt);
|
---|
| 249 |
|
---|
| 250 | Option read_only_opt = new Option("r", "read-only", false,
|
---|
| 251 | "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
|
---|
| 252 | read_only_opt.setRequired(false);
|
---|
| 253 | options.addOption(read_only_opt);
|
---|
| 254 |
|
---|
| 255 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
| 256 | CommandLineParser parser = new GnuParser();
|
---|
| 257 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
| 258 |
|
---|
| 259 | HelpFormatter formatter = new HelpFormatter();
|
---|
| 260 | CommandLine cmd = null;
|
---|
| 261 |
|
---|
| 262 | try {
|
---|
| 263 | cmd = parser.parse(options, args);
|
---|
| 264 | }
|
---|
| 265 | catch (ParseException e) {
|
---|
| 266 | System.err.println(e.getMessage());
|
---|
| 267 | print_usage(formatter,options);
|
---|
| 268 | System.exit(1);
|
---|
| 269 | }
|
---|
| 270 |
|
---|
| 271 |
|
---|
[31028] | 272 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
[30998] | 273 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
| 274 |
|
---|
[31024] | 275 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
| 276 |
|
---|
[30998] | 277 | String output_dir = cmd.getOptionValue("output-dir",null);
|
---|
| 278 | String solr_url = cmd.getOptionValue("solr-url",null);
|
---|
| 279 | boolean read_only = cmd.hasOption("read-only");
|
---|
| 280 |
|
---|
| 281 | String[] filtered_args = cmd.getArgs();
|
---|
| 282 |
|
---|
| 283 | if (filtered_args.length != 2) {
|
---|
| 284 | print_usage(formatter,options);
|
---|
| 285 | System.exit(1);
|
---|
| 286 | }
|
---|
[31024] | 287 |
|
---|
| 288 | if (property_filename != null) {
|
---|
| 289 | try {
|
---|
| 290 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
| 291 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
| 292 |
|
---|
| 293 | System.getProperties().load(bis);
|
---|
| 294 | }
|
---|
| 295 | catch (FileNotFoundException e) {
|
---|
| 296 | // TODO Auto-generated catch block
|
---|
| 297 | e.printStackTrace();
|
---|
| 298 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
| 299 | }
|
---|
| 300 | catch (IOException e) {
|
---|
| 301 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
| 302 | }
|
---|
| 303 | }
|
---|
[30998] | 304 |
|
---|
| 305 | if (!read_only && ((output_dir == null) && (solr_url==null))) {
|
---|
| 306 | System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
|
---|
| 307 | print_usage(formatter,options);
|
---|
| 308 | System.exit(1);
|
---|
| 309 | }
|
---|
| 310 | if (read_only) {
|
---|
| 311 | // For this case, need to ensure solr-url and output-dir are null
|
---|
| 312 | output_dir = null;
|
---|
| 313 | solr_url = null;
|
---|
| 314 | }
|
---|
| 315 |
|
---|
| 316 | String input_dir = filtered_args[0];
|
---|
| 317 | String json_list_filename = filtered_args[1];
|
---|
| 318 |
|
---|
| 319 | ProcessForSolrIngest prep_for_ingest
|
---|
| 320 | = new ProcessForSolrIngest(input_dir,json_list_filename,solr_url,output_dir,verbosity);
|
---|
[31220] | 321 |
|
---|
[31028] | 322 | String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
|
---|
| 323 | if (process_ef_json_mode.equals("per-volume")) {
|
---|
[31025] | 324 | prep_for_ingest.execPerVolume();
|
---|
| 325 | }
|
---|
| 326 | else {
|
---|
| 327 | prep_for_ingest.execPerPage();
|
---|
| 328 | }
|
---|
[30998] | 329 | }
|
---|
| 330 | }
|
---|