[30998] | 1 | package org.hathitrust.extractedfeatures;
|
---|
| 2 |
|
---|
[31024] | 3 | import java.io.BufferedInputStream;
|
---|
| 4 | import java.io.FileInputStream;
|
---|
| 5 | import java.io.FileNotFoundException;
|
---|
| 6 | import java.io.IOException;
|
---|
[30998] | 7 | import java.io.Serializable;
|
---|
[31028] | 8 | import java.util.ArrayList;
|
---|
| 9 |
|
---|
[30998] | 10 | import org.apache.commons.cli.*;
|
---|
[31372] | 11 | import org.apache.hadoop.io.Text;
|
---|
[30998] | 12 | import org.apache.spark.api.java.*;
|
---|
| 13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
[31028] | 14 | import org.apache.spark.util.LongAccumulator;
|
---|
[31007] | 15 | import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
|
---|
[31001] | 16 | import org.json.JSONObject;
|
---|
[30998] | 17 | import org.apache.spark.SparkConf;
|
---|
| 18 |
|
---|
| 19 | public class ProcessForSolrIngest implements Serializable
|
---|
| 20 | {
|
---|
| 21 | private static final long serialVersionUID = 1L;
|
---|
| 22 |
|
---|
[31276] | 23 | protected static final int DEFAULT_NUM_CORES = 10;
|
---|
[31277] | 24 | protected static final int MINIMUM_NUM_PARTITIONS = 10*DEFAULT_NUM_CORES;
|
---|
[30998] | 25 |
|
---|
[31266] | 26 | protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
|
---|
| 27 |
|
---|
[30998] | 28 | protected String _input_dir;
|
---|
[31451] | 29 | protected String _solr_base_url;
|
---|
| 30 | protected String _solr_collection;
|
---|
| 31 |
|
---|
[31220] | 32 | protected String _whitelist_filename;
|
---|
[31375] | 33 | protected String _langmap_directory;
|
---|
| 34 |
|
---|
[31451] | 35 | //protected String _solr_url;
|
---|
[30998] | 36 | protected String _output_dir;
|
---|
| 37 |
|
---|
| 38 | protected int _verbosity;
|
---|
| 39 |
|
---|
[31451] | 40 | public ProcessForSolrIngest(String input_dir, String solr_collection,
|
---|
| 41 | String solr_base_url, String output_dir, int verbosity)
|
---|
[30998] | 42 | {
|
---|
| 43 | _input_dir = input_dir;
|
---|
[31451] | 44 | _solr_collection = solr_collection;
|
---|
| 45 |
|
---|
[31220] | 46 | boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
|
---|
| 47 | _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
|
---|
| 48 |
|
---|
[31375] | 49 | boolean use_langmap = Boolean.getBoolean("wcsa-ef-ingest.use-langmap");
|
---|
| 50 | _langmap_directory = (use_langmap) ? System.getProperty("wcsa-ef-ingest.langmap-directory") : null;
|
---|
| 51 |
|
---|
| 52 |
|
---|
[31451] | 53 | _solr_base_url = solr_base_url;
|
---|
[30998] | 54 | _output_dir = output_dir;
|
---|
| 55 | _verbosity = verbosity;
|
---|
| 56 | }
|
---|
| 57 |
|
---|
[31010] | 58 | protected String generateSparkAppName(String exec_mode)
|
---|
| 59 | {
|
---|
| 60 | String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest";
|
---|
[31451] | 61 | spark_app_name += " [" + _solr_collection + "]";
|
---|
[30998] | 62 |
|
---|
[31451] | 63 | if (_solr_base_url != null) {
|
---|
| 64 | spark_app_name += " solr_base_url=" + _solr_base_url;
|
---|
[31008] | 65 | }
|
---|
| 66 |
|
---|
| 67 | if (_output_dir != null) {
|
---|
[31010] | 68 | spark_app_name += " output_dir=" + _output_dir;
|
---|
[31008] | 69 | }
|
---|
| 70 |
|
---|
[31010] | 71 | return spark_app_name;
|
---|
| 72 | }
|
---|
| 73 |
|
---|
[31451] | 74 | public ArrayList<String> extrapolateSolrEndpoints(String solr_collection)
|
---|
[31045] | 75 | {
|
---|
| 76 | ArrayList<String> solr_endpoints = new ArrayList<String>();
|
---|
| 77 |
|
---|
[31451] | 78 | if (_solr_base_url != null) {
|
---|
| 79 | String solr_url = _solr_base_url + "/" + solr_collection + "/update";
|
---|
| 80 |
|
---|
[31100] | 81 | String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
|
---|
| 82 | if (solr_cloud_nodes != null) {
|
---|
| 83 | String [] cloud_nodes = solr_cloud_nodes.split(",");
|
---|
| 84 | for (String cn : cloud_nodes) {
|
---|
[31451] | 85 | String solr_endpoint = solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
|
---|
[31045] | 86 | solr_endpoints.add(solr_endpoint);
|
---|
| 87 | }
|
---|
| 88 | }
|
---|
| 89 | else {
|
---|
[31451] | 90 | solr_endpoints.add(solr_url);
|
---|
[31045] | 91 | }
|
---|
| 92 | }
|
---|
| 93 |
|
---|
| 94 | return solr_endpoints;
|
---|
| 95 | }
|
---|
| 96 |
|
---|
[31372] | 97 | public void execPerVolumeSequenceFile()
|
---|
| 98 | {
|
---|
| 99 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
| 100 |
|
---|
| 101 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 102 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
| 103 | jsc.hadoopConfiguration().set("io.compression.codec.bzip2.library", "java-builtin");
|
---|
| 104 |
|
---|
| 105 | //String packed_sequence_path = "hdfs:///user/capitanu/data/packed-ef";
|
---|
[31374] | 106 | String packed_sequence_path = _input_dir;
|
---|
[31372] | 107 |
|
---|
| 108 | JavaPairRDD<Text, Text> input_pair_rdd = jsc.sequenceFile(packed_sequence_path, Text.class, Text.class);
|
---|
| 109 |
|
---|
| 110 | JavaRDD<Text> json_text_rdd = input_pair_rdd.map(item -> item._2);
|
---|
| 111 |
|
---|
| 112 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
| 113 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
| 114 |
|
---|
[31451] | 115 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints(_solr_collection);
|
---|
| 116 |
|
---|
[31450] | 117 | System.out.println("*** away to create PerVolumeJSON class, _langmap_directory = " + _langmap_directory);
|
---|
[31375] | 118 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename, _langmap_directory,
|
---|
[31451] | 119 | solr_endpoints,_output_dir,_verbosity,
|
---|
[31372] | 120 | icu_tokenize,strict_file_io);
|
---|
| 121 |
|
---|
| 122 |
|
---|
| 123 | JavaRDD<Integer> per_volume_page_count = json_text_rdd.map(per_vol_json);
|
---|
| 124 |
|
---|
| 125 | Integer num_page_ids = per_volume_page_count.reduce((a, b) -> a + b);
|
---|
| 126 |
|
---|
| 127 | System.out.println("");
|
---|
| 128 | System.out.println("############");
|
---|
| 129 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
| 130 | System.out.println("############");
|
---|
| 131 | System.out.println("");
|
---|
| 132 |
|
---|
| 133 | jsc.close();
|
---|
| 134 |
|
---|
| 135 | }
|
---|
| 136 |
|
---|
| 137 | /*
|
---|
[31010] | 138 | public void execPerVolume()
|
---|
| 139 | {
|
---|
| 140 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
[31008] | 141 |
|
---|
[30998] | 142 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 143 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
[31266] | 144 |
|
---|
| 145 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
[31271] | 146 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
|
---|
[30998] | 147 |
|
---|
[31266] | 148 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
[30998] | 149 |
|
---|
| 150 | long num_volumes = json_list_data.count();
|
---|
| 151 | double per_vol = 100.0/(double)num_volumes;
|
---|
| 152 |
|
---|
[31266] | 153 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
[31276] | 154 | if (num_partitions < MINIMUM_NUM_PARTITIONS) {
|
---|
| 155 | num_partitions = MINIMUM_NUM_PARTITIONS;
|
---|
| 156 | }
|
---|
[31266] | 157 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
[31091] | 158 |
|
---|
[30998] | 159 | DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
|
---|
| 160 |
|
---|
[31252] | 161 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
| 162 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
| 163 |
|
---|
[31220] | 164 | PerVolumeJSON per_vol_json = new PerVolumeJSON(_input_dir,_whitelist_filename,
|
---|
[31252] | 165 | _solr_url,_output_dir,_verbosity, progress_accum,per_vol,
|
---|
| 166 | icu_tokenize,strict_file_io);
|
---|
[31001] | 167 |
|
---|
[31266] | 168 | //json_list_data_rp.foreach(per_vol_json);
|
---|
| 169 | JavaRDD<String> per_page_ids = json_list_data_rp.flatMap(per_vol_json);
|
---|
[31269] | 170 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
[31001] | 171 |
|
---|
[31269] | 172 | //long num_ids = num_volumes;
|
---|
[31001] | 173 |
|
---|
| 174 | System.out.println("");
|
---|
| 175 | System.out.println("############");
|
---|
[31266] | 176 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
[31001] | 177 | System.out.println("############");
|
---|
| 178 | System.out.println("");
|
---|
| 179 |
|
---|
| 180 | jsc.close();
|
---|
| 181 | }
|
---|
[31372] | 182 | */
|
---|
[31028] | 183 |
|
---|
[31372] | 184 | /*
|
---|
[31001] | 185 | public void execPerPage()
|
---|
| 186 | {
|
---|
[31010] | 187 | String spark_app_name = generateSparkAppName("Per Page");
|
---|
[31008] | 188 |
|
---|
[31001] | 189 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 190 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
| 191 |
|
---|
[31266] | 192 |
|
---|
[31372] | 193 |
|
---|
[31266] | 194 | //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
| 195 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
|
---|
| 196 |
|
---|
| 197 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
[31001] | 198 |
|
---|
| 199 | long num_volumes = json_list_data.count();
|
---|
| 200 | double per_vol = 100.0/(double)num_volumes;
|
---|
| 201 |
|
---|
[31266] | 202 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
| 203 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
[31091] | 204 |
|
---|
[31013] | 205 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
[31001] | 206 |
|
---|
[31252] | 207 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
[31045] | 208 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
[31252] | 209 |
|
---|
[31013] | 210 | PerPageJSONFlatmap paged_solr_json_flatmap
|
---|
[31220] | 211 | = new PerPageJSONFlatmap(_input_dir,_whitelist_filename,
|
---|
| 212 | _solr_url,_output_dir,_verbosity,
|
---|
[31045] | 213 | per_vol_progress_accum,per_vol,
|
---|
[31252] | 214 | icu_tokenize,strict_file_io);
|
---|
[31266] | 215 | //JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap).cache();
|
---|
| 216 | JavaRDD<JSONObject> per_page_jsonobjects = json_list_data_rp.flatMap(paged_solr_json_flatmap);
|
---|
[31001] | 217 |
|
---|
[31013] | 218 | //long num_page_ids = per_page_jsonobjects.count(); // trigger lazy eval of: flatmap:per-vol
|
---|
| 219 |
|
---|
[31028] | 220 | LongAccumulator per_page_progress_accum = jsc.sc().longAccumulator("Pages Processed");
|
---|
| 221 | ArrayList<String> solr_endpoints = extrapolateSolrEndpoints();
|
---|
[31013] | 222 |
|
---|
[31045] | 223 |
|
---|
[31013] | 224 | PerPageJSONMap paged_json_id_map
|
---|
[31045] | 225 | = new PerPageJSONMap(_input_dir,solr_endpoints,_output_dir,_verbosity,
|
---|
| 226 | per_page_progress_accum,1);
|
---|
[31011] | 227 | JavaRDD<String> per_page_ids = per_page_jsonobjects.map(paged_json_id_map);
|
---|
[30998] | 228 |
|
---|
| 229 |
|
---|
[31011] | 230 | long num_page_ids = per_page_ids.count(); // trigger lazy eval of: flatmap:per-vol -> map:per-page
|
---|
[30998] | 231 |
|
---|
| 232 | System.out.println("");
|
---|
| 233 | System.out.println("############");
|
---|
[31001] | 234 | System.out.println("# Number of page ids: " + num_page_ids);
|
---|
[30998] | 235 | System.out.println("############");
|
---|
| 236 | System.out.println("");
|
---|
| 237 |
|
---|
| 238 |
|
---|
[31372] | 239 | //if (_output_dir != null) {
|
---|
| 240 | //String rdd_save_file = "rdd-solr-json-page-files";
|
---|
| 241 | //json_ids.saveAsTextFile(rdd_save_file);
|
---|
| 242 | //System.out.println("############");
|
---|
| 243 | //System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
|
---|
| 244 | //System.out.println("# " + rdd_save_file);
|
---|
| 245 | //System.out.println("############");
|
---|
| 246 | //System.out.println("");
|
---|
| 247 | //}
|
---|
| 248 |
|
---|
| 249 |
|
---|
[30998] | 250 | jsc.close();
|
---|
| 251 | }
|
---|
[31372] | 252 | */
|
---|
[31001] | 253 |
|
---|
| 254 |
|
---|
| 255 |
|
---|
[30998] | 256 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
| 257 | {
|
---|
[31451] | 258 | formatter.printHelp("RUN.bash [options] input-dir solr-collection", options);
|
---|
[30998] | 259 | }
|
---|
| 260 |
|
---|
| 261 | public static void main(String[] args) {
|
---|
| 262 | Options options = new Options();
|
---|
| 263 |
|
---|
| 264 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
| 265 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
| 266 | verbosity_opt.setRequired(false);
|
---|
| 267 | options.addOption(verbosity_opt);
|
---|
| 268 |
|
---|
[31026] | 269 | Option properties_opt = new Option("p", "properties", true,
|
---|
| 270 | "Read in the specified Java properties file");
|
---|
[31024] | 271 | properties_opt.setRequired(false);
|
---|
| 272 | options.addOption(properties_opt);
|
---|
| 273 |
|
---|
[30998] | 274 | Option output_dir_opt = new Option("o", "output-dir", true,
|
---|
| 275 | "If specified, save BZipped Solr JSON files to this directory");
|
---|
| 276 | output_dir_opt.setRequired(false);
|
---|
| 277 | options.addOption(output_dir_opt);
|
---|
| 278 |
|
---|
[31451] | 279 | Option solr_base_url_opt = new Option("u", "solr-base-url", true,
|
---|
| 280 | "If specified, the base URL to post the Solr JSON data to");
|
---|
| 281 | solr_base_url_opt.setRequired(false);
|
---|
| 282 | options.addOption(solr_base_url_opt);
|
---|
[30998] | 283 |
|
---|
| 284 | Option read_only_opt = new Option("r", "read-only", false,
|
---|
| 285 | "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
|
---|
| 286 | read_only_opt.setRequired(false);
|
---|
| 287 | options.addOption(read_only_opt);
|
---|
| 288 |
|
---|
| 289 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
| 290 | CommandLineParser parser = new GnuParser();
|
---|
| 291 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
| 292 |
|
---|
| 293 | HelpFormatter formatter = new HelpFormatter();
|
---|
| 294 | CommandLine cmd = null;
|
---|
| 295 |
|
---|
| 296 | try {
|
---|
| 297 | cmd = parser.parse(options, args);
|
---|
| 298 | }
|
---|
| 299 | catch (ParseException e) {
|
---|
| 300 | System.err.println(e.getMessage());
|
---|
| 301 | print_usage(formatter,options);
|
---|
| 302 | System.exit(1);
|
---|
| 303 | }
|
---|
| 304 |
|
---|
| 305 |
|
---|
[31028] | 306 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
[30998] | 307 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
| 308 |
|
---|
[31024] | 309 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
| 310 |
|
---|
[30998] | 311 | String output_dir = cmd.getOptionValue("output-dir",null);
|
---|
[31451] | 312 | String solr_base_url = cmd.getOptionValue("solr-base-url",null);
|
---|
[30998] | 313 | boolean read_only = cmd.hasOption("read-only");
|
---|
| 314 |
|
---|
| 315 | String[] filtered_args = cmd.getArgs();
|
---|
| 316 |
|
---|
[31451] | 317 | if (filtered_args.length != 2) {
|
---|
[30998] | 318 | print_usage(formatter,options);
|
---|
| 319 | System.exit(1);
|
---|
| 320 | }
|
---|
[31024] | 321 |
|
---|
| 322 | if (property_filename != null) {
|
---|
| 323 | try {
|
---|
| 324 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
| 325 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
| 326 |
|
---|
| 327 | System.getProperties().load(bis);
|
---|
| 328 | }
|
---|
| 329 | catch (FileNotFoundException e) {
|
---|
| 330 | // TODO Auto-generated catch block
|
---|
| 331 | e.printStackTrace();
|
---|
| 332 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
| 333 | }
|
---|
| 334 | catch (IOException e) {
|
---|
| 335 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
| 336 | }
|
---|
| 337 | }
|
---|
[30998] | 338 |
|
---|
[31451] | 339 | if (!read_only && ((output_dir == null) && (solr_base_url==null))) {
|
---|
| 340 | System.err.println("Need to specify either --solr-base-url or --output-dir otherwise generated files are not ingested/saved");
|
---|
[30998] | 341 | print_usage(formatter,options);
|
---|
| 342 | System.exit(1);
|
---|
| 343 | }
|
---|
| 344 | if (read_only) {
|
---|
| 345 | // For this case, need to ensure solr-url and output-dir are null
|
---|
| 346 | output_dir = null;
|
---|
[31451] | 347 | solr_base_url = null;
|
---|
[30998] | 348 | }
|
---|
| 349 |
|
---|
| 350 | String input_dir = filtered_args[0];
|
---|
[31451] | 351 | String solr_collection = filtered_args[1];
|
---|
[30998] | 352 |
|
---|
| 353 | ProcessForSolrIngest prep_for_ingest
|
---|
[31451] | 354 | = new ProcessForSolrIngest(input_dir,solr_collection,solr_base_url,output_dir,verbosity);
|
---|
[31220] | 355 |
|
---|
[31372] | 356 | prep_for_ingest.execPerVolumeSequenceFile();
|
---|
| 357 |
|
---|
| 358 | /*
|
---|
[31028] | 359 | String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
|
---|
| 360 | if (process_ef_json_mode.equals("per-volume")) {
|
---|
[31025] | 361 | prep_for_ingest.execPerVolume();
|
---|
| 362 | }
|
---|
| 363 | else {
|
---|
| 364 | prep_for_ingest.execPerPage();
|
---|
[31372] | 365 | }*/
|
---|
[30998] | 366 | }
|
---|
| 367 | }
|
---|