[31310] | 1 | package org.hathitrust.extractedfeatures;
|
---|
| 2 |
|
---|
| 3 | import java.io.BufferedInputStream;
|
---|
| 4 | import java.io.FileInputStream;
|
---|
| 5 | import java.io.FileNotFoundException;
|
---|
| 6 | import java.io.IOException;
|
---|
| 7 | import java.io.Serializable;
|
---|
| 8 | import org.apache.commons.cli.*;
|
---|
| 9 |
|
---|
| 10 | import org.apache.spark.api.java.*;
|
---|
| 11 | import org.apache.spark.api.java.function.Function;
|
---|
| 12 | import org.apache.spark.storage.StorageLevel;
|
---|
| 13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
| 14 | import scala.Tuple2;
|
---|
| 15 |
|
---|
| 16 | import org.apache.spark.SparkConf;
|
---|
| 17 |
|
---|
| 18 | /*
|
---|
| 19 | import com.mongodb.spark.api.java.MongoSpark;
|
---|
| 20 | import org.bson.Document;
|
---|
| 21 | import static java.util.Arrays.asList;
|
---|
| 22 | */
|
---|
| 23 | public class ProcessForMongoDBIngest implements Serializable
|
---|
| 24 | {
|
---|
| 25 | private static final long serialVersionUID = 1L;
|
---|
| 26 |
|
---|
| 27 | protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
|
---|
| 28 |
|
---|
| 29 | protected String _input_dir;
|
---|
| 30 | protected String _json_list_filename;
|
---|
| 31 |
|
---|
| 32 | protected int _verbosity;
|
---|
| 33 |
|
---|
| 34 | public ProcessForMongoDBIngest(String input_dir, String json_list_filename, int verbosity)
|
---|
| 35 | {
|
---|
| 36 | _input_dir = input_dir;
|
---|
| 37 | _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
|
---|
| 38 |
|
---|
| 39 | _verbosity = verbosity;
|
---|
| 40 | }
|
---|
| 41 |
|
---|
| 42 | protected String generateSparkAppName(String exec_mode)
|
---|
| 43 | {
|
---|
| 44 | String spark_app_name = "[" + exec_mode + "] Extracted Features: Process for MongoDB Ingest";
|
---|
| 45 | spark_app_name += " [" + _json_list_filename + "]";
|
---|
| 46 |
|
---|
| 47 | return spark_app_name;
|
---|
| 48 | }
|
---|
| 49 |
|
---|
| 50 | public void execMongoDBIngest()
|
---|
| 51 | {
|
---|
| 52 | String spark_app_name = generateSparkAppName("Per Volume");
|
---|
| 53 |
|
---|
| 54 | SparkConf spark_conf = new SparkConf().setAppName(spark_app_name);
|
---|
| 55 | //spark_conf.set("spark.mongodb.output.uri", "mongodb://127.0.0.1/htrc_ef.volumes");
|
---|
| 56 |
|
---|
| 57 | JavaSparkContext jsc = new JavaSparkContext(spark_conf);
|
---|
| 58 |
|
---|
| 59 | /*
|
---|
| 60 | JavaRDD<Document> documents = jsc.parallelize(asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)).map
|
---|
| 61 | (new Function<Integer, Document>() {
|
---|
| 62 | public Document call(final Integer i) throws Exception {
|
---|
| 63 | return Document.parse("{test: " + i + "}");
|
---|
| 64 | }
|
---|
| 65 | });
|
---|
| 66 |
|
---|
| 67 | MongoSpark.save(documents);
|
---|
| 68 | */
|
---|
| 69 |
|
---|
| 70 | String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
|
---|
| 71 | String output_directory = "catalog-lang-" + filename_root + "-out";
|
---|
| 72 | if (ClusterFileIO.exists(output_directory))
|
---|
| 73 | {
|
---|
| 74 | System.err.println("Error: " + output_directory + " already exists. Spark unable to write output data");
|
---|
| 75 | jsc.close();
|
---|
| 76 | System.exit(1);
|
---|
| 77 | }
|
---|
| 78 |
|
---|
| 79 | int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
|
---|
| 80 |
|
---|
| 81 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
|
---|
| 82 | json_list_data.setName("JSON-file-list");
|
---|
| 83 |
|
---|
| 84 | long num_volumes = json_list_data.count();
|
---|
| 85 | double per_vol = 100.0/(double)num_volumes;
|
---|
| 86 |
|
---|
| 87 | int num_partitions = (int)(num_volumes/files_per_partition)+1;
|
---|
| 88 |
|
---|
| 89 | JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
|
---|
| 90 | json_list_data_rp.setName("JSON-file-list--repartitioned");
|
---|
| 91 |
|
---|
| 92 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
| 93 |
|
---|
| 94 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
| 95 |
|
---|
| 96 | PerVolumeMongoDBDocumentsMap volume_mongodb_docs_map
|
---|
| 97 | = new PerVolumeMongoDBDocumentsMap(_input_dir,_verbosity,
|
---|
| 98 | per_vol_progress_accum,per_vol,
|
---|
| 99 | strict_file_io);
|
---|
| 100 | JavaRDD<Integer> volume_page_counts = json_list_data_rp.map(volume_mongodb_docs_map);
|
---|
| 101 | volume_page_counts.persist(StorageLevel.MEMORY_AND_DISK());
|
---|
| 102 | volume_page_counts.setName("volume-page-counts");
|
---|
| 103 |
|
---|
| 104 | Integer total_page_count = volume_page_counts.reduce((a, b) -> a + b);
|
---|
| 105 | jsc.close();
|
---|
| 106 |
|
---|
| 107 | System.out.println("################");
|
---|
[31319] | 108 | System.out.println("# Total Volume Count = " + total_page_count);
|
---|
[31310] | 109 | System.out.println("################");
|
---|
| 110 | }
|
---|
| 111 |
|
---|
| 112 |
|
---|
| 113 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
| 114 | {
|
---|
| 115 | formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
|
---|
| 116 | }
|
---|
| 117 |
|
---|
| 118 | public static void main(String[] args) {
|
---|
| 119 | Options options = new Options();
|
---|
| 120 |
|
---|
| 121 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
| 122 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
| 123 | verbosity_opt.setRequired(false);
|
---|
| 124 | options.addOption(verbosity_opt);
|
---|
| 125 |
|
---|
| 126 | Option properties_opt = new Option("p", "properties", true,
|
---|
| 127 | "Read in the specified Java properties file");
|
---|
| 128 | properties_opt.setRequired(false);
|
---|
| 129 | options.addOption(properties_opt);
|
---|
| 130 |
|
---|
| 131 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
| 132 | CommandLineParser parser = new GnuParser();
|
---|
| 133 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
| 134 |
|
---|
| 135 | HelpFormatter formatter = new HelpFormatter();
|
---|
| 136 | CommandLine cmd = null;
|
---|
| 137 |
|
---|
| 138 | try {
|
---|
| 139 | cmd = parser.parse(options, args);
|
---|
| 140 | }
|
---|
| 141 | catch (ParseException e) {
|
---|
| 142 | System.err.println(e.getMessage());
|
---|
| 143 | print_usage(formatter,options);
|
---|
| 144 | System.exit(1);
|
---|
| 145 | }
|
---|
| 146 |
|
---|
| 147 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
| 148 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
| 149 |
|
---|
| 150 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
| 151 |
|
---|
| 152 | String[] filtered_args = cmd.getArgs();
|
---|
| 153 |
|
---|
| 154 | if (filtered_args.length != 2) {
|
---|
| 155 | print_usage(formatter,options);
|
---|
| 156 | System.exit(1);
|
---|
| 157 | }
|
---|
| 158 |
|
---|
| 159 | if (property_filename != null) {
|
---|
| 160 | try {
|
---|
| 161 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
| 162 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
| 163 |
|
---|
| 164 | System.getProperties().load(bis);
|
---|
| 165 | }
|
---|
| 166 | catch (FileNotFoundException e) {
|
---|
| 167 | e.printStackTrace();
|
---|
| 168 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
| 169 | }
|
---|
| 170 | catch (IOException e) {
|
---|
| 171 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
| 172 | }
|
---|
| 173 | }
|
---|
| 174 |
|
---|
| 175 | String input_dir = filtered_args[0];
|
---|
| 176 | String json_list_filename = filtered_args[1];
|
---|
| 177 |
|
---|
| 178 | ProcessForMongoDBIngest process_mongodb_ingest
|
---|
| 179 | = new ProcessForMongoDBIngest(input_dir,json_list_filename,verbosity);
|
---|
| 180 |
|
---|
| 181 | process_mongodb_ingest.execMongoDBIngest();
|
---|
| 182 |
|
---|
| 183 | }
|
---|
| 184 | }
|
---|