source: other-projects/hathitrust/solr-extracted-features/trunk/src/main/java/org/hathitrust/PrepareForIngest.java@ 30988

Last change on this file since 30988 was 30988, checked in by davidb, 8 years ago

Changed flag to 'read-only' and changed the filed name full text saved under

  • Property svn:executable set to *
File size: 5.2 KB
RevLine 
[30898]1package org.hathitrust;
2
3import java.io.Serializable;
4import org.apache.commons.cli.*;
5
6import org.apache.spark.api.java.*;
[30984]7import org.apache.spark.util.DoubleAccumulator;
[30898]8import org.apache.spark.SparkConf;
9
10public class PrepareForIngest implements Serializable
11{
12 private static final long serialVersionUID = 1L;
13
[30945]14 public static final int NUM_PARTITIONS = 6; // default would appear to be 2
15
[30937]16 protected String _input_dir;
17 protected String _json_list_filename;
[30975]18 protected String _solr_url;
[30937]19 protected String _output_dir;
20
21 protected int _verbosity;
22
[30975]23 public PrepareForIngest(String input_dir, String json_list_filename,
24 String solr_url, String output_dir, int verbosity)
[30898]25 {
26 _input_dir = input_dir;
[30918]27 _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
[30979]28
29 _solr_url = solr_url;
[30898]30 _output_dir = output_dir;
[30979]31 _verbosity = verbosity;
[30898]32 }
33
34 public void exec()
35 {
[30937]36 String spark_app_name = "HathiTrust Extract Features: Prepare for Solr Ingest";
[30941]37 spark_app_name += " [" + _json_list_filename + "]";
[30898]38
[30937]39 SparkConf conf = new SparkConf().setAppName(spark_app_name);
40 JavaSparkContext jsc = new JavaSparkContext(conf);
41
[30945]42 if (_verbosity >= 2) {
[30943]43 System.out.println("Default Minimum Partions: " + jsc.defaultMinPartitions());
44 System.out.println("Default Parallelism: " + jsc.defaultParallelism());
45 }
[30937]46
[30945]47 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,NUM_PARTITIONS).cache();
[30898]48
[30984]49 long num_volumes = json_list_data.count();
50 double per_vol = 100.0/(double)num_volumes;
51
[30986]52 DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Progress Percent");
[30985]53
54 PagedJSON paged_json = new PagedJSON(_input_dir,_solr_url,_output_dir,_verbosity, progress_accum,per_vol);
55 //JavaRDD<String> json_ids = json_list_data.flatMap(paged_json).cache();
[30984]56
[30985]57 json_list_data.foreach(paged_json);
[30984]58
[30985]59
[30988]60/*
[30986]61 System.out.println("");
62 System.out.println("############");
63 System.out.println("# Progress Accumulator: " + progress_accum.value());
64 System.out.println("############");
65 System.out.println("");
[30988]66*/
[30986]67
[30985]68 //long num_ids = json_ids.count();
69 long num_ids = num_volumes;
[30984]70
[30934]71 System.out.println("");
72 System.out.println("############");
[30942]73 System.out.println("# Number of page ids: " + num_ids);
[30934]74 System.out.println("############");
75 System.out.println("");
[30937]76
[30985]77 /*
[30977]78 if (_output_dir != null) {
79 String rdd_save_file = "rdd-solr-json-page-files";
80 json_ids.saveAsTextFile(rdd_save_file);
81 System.out.println("############");
82 System.out.println("# Saved RDD of Solr JSON page files, top-level, as:");
83 System.out.println("# " + rdd_save_file);
84 System.out.println("############");
85 System.out.println("");
86 }
[30985]87 */
[30951]88
[30937]89 jsc.close();
[30898]90 }
91
[30975]92 public static void print_usage(HelpFormatter formatter, Options options)
93 {
94 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
95 }
[30898]96 public static void main(String[] args) {
97
98
99 Options options = new Options();
100
[30945]101 Option verbosity_opt = new Option("v", "verbosity", true,
102 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
103 verbosity_opt.setRequired(false);
104 options.addOption(verbosity_opt);
105
[30975]106 Option output_dir_opt = new Option("o", "output-dir", true,
107 "If specified, save BZipped Solr JSON files to this directory");
108 output_dir_opt.setRequired(false);
109 options.addOption(output_dir_opt);
[30945]110
[30975]111 Option solr_url_opt = new Option("u", "solr-url", true,
112 "If specified, the URL to post the Solr JSON data to");
113 solr_url_opt.setRequired(false);
114 options.addOption(solr_url_opt);
115
[30988]116 Option read_only_opt = new Option("r", "read-only", false,
117 "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
118 read_only_opt.setRequired(false);
119 options.addOption(read_only_opt);
[30975]120
[30988]121 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
[30945]122 CommandLineParser parser = new GnuParser();
[30975]123 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
124
[30898]125 HelpFormatter formatter = new HelpFormatter();
[30975]126 CommandLine cmd = null;
[30898]127
128 try {
129 cmd = parser.parse(options, args);
130 }
131 catch (ParseException e) {
132 System.err.println(e.getMessage());
[30975]133 print_usage(formatter,options);
[30898]134 System.exit(1);
135 }
136
[30945]137
[30934]138 String verbosity_str = cmd.getOptionValue("verbosity","0");
139 int verbosity = Integer.parseInt(verbosity_str);
[30898]140
[30975]141 String output_dir = cmd.getOptionValue("output-dir",null);
142 String solr_url = cmd.getOptionValue("solr-url",null);
143 boolean dry_run = cmd.hasOption("dry-run");
144
[30937]145 String[] filtered_args = cmd.getArgs();
[30898]146
[30975]147 if (filtered_args.length != 2) {
148 print_usage(formatter,options);
[30937]149 System.exit(1);
[30898]150 }
[30975]151
152 if (!dry_run && ((output_dir == null) && (solr_url==null))) {
153 System.err.println("Need to specify either --solr-url or --output-dir otherwise generated files are not ingested/saved");
154 print_usage(formatter,options);
[30976]155 System.exit(1);
[30975]156 }
[30976]157
158 String input_dir = filtered_args[0];
159 String json_list_filename = filtered_args[1];
[30985]160
[30975]161 PrepareForIngest prep_for_ingest
162 = new PrepareForIngest(input_dir,json_list_filename,solr_url,output_dir,verbosity);
[30898]163 prep_for_ingest.exec();
164
165 }
166}
Note: See TracBrowser for help on using the repository browser.