source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForSolrIngestJSONFilelist.java@ 32106

Last change on this file since 32106 was 32106, checked in by davidb, 6 years ago

Rekindle ability to process a json-filelist.txt using Spark

  • Property svn:executable set to *
File size: 8.9 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import java.util.ArrayList;
9
10import org.apache.commons.cli.*;
11import org.apache.hadoop.io.Text;
12import org.apache.spark.api.java.*;
13import org.apache.spark.util.DoubleAccumulator;
14import org.apache.spark.util.LongAccumulator;
15import org.hathitrust.extractedfeatures.PerPageJSONFlatmap;
16import org.json.JSONObject;
17import org.apache.spark.SparkConf;
18
19public class ProcessForSolrIngestJSONFilelist implements Serializable
20{
21 private static final long serialVersionUID = 1L;
22
23 protected static final int DEFAULT_NUM_CORES = 10;
24 protected static final int MINIMUM_NUM_PARTITIONS = 10*DEFAULT_NUM_CORES;
25
26 protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
27
28 protected String _input_dir;
29 protected String _json_list_filename;
30
31 protected String _solr_base_url;
32 protected String _solr_collection;
33
34 protected String _whitelist_filename;
35 protected String _langmap_directory;
36
37 //protected String _solr_url;
38 protected String _output_dir;
39
40 protected int _verbosity;
41
42 public ProcessForSolrIngestJSONFilelist(String input_dir, String json_list_filename,
43 String solr_collection, String solr_base_url,
44 String output_dir, int verbosity)
45 {
46 _input_dir = input_dir;
47 _json_list_filename = json_list_filename;
48
49 _solr_collection = solr_collection;
50 _solr_base_url = solr_base_url;
51
52 boolean use_whitelist = Boolean.getBoolean("wcsa-ef-ingest.use-whitelist");
53 _whitelist_filename = (use_whitelist) ? System.getProperty("wcsa-ef-ingest.whitelist-filename") : null;
54
55 boolean use_langmap = Boolean.getBoolean("wcsa-ef-ingest.use-langmap");
56 _langmap_directory = (use_langmap) ? System.getProperty("wcsa-ef-ingest.langmap-directory") : null;
57
58 _output_dir = output_dir;
59 _verbosity = verbosity;
60 }
61
62 protected String generateSparkAppName(String exec_mode)
63 {
64 String spark_app_name = "[" + exec_mode + "] Extract Features: Process for Solr Ingest from JSON-filelist";
65 spark_app_name += " [" + _solr_collection + "]";
66
67 if (_solr_base_url != null) {
68 spark_app_name += " solr_base_url=" + _solr_base_url;
69 }
70
71 if (_output_dir != null) {
72 spark_app_name += " output_dir=" + _output_dir;
73 }
74
75 return spark_app_name;
76 }
77
78 public ArrayList<String> extrapolateSolrEndpoints(String solr_collection)
79 {
80 ArrayList<String> solr_endpoints = new ArrayList<String>();
81
82 if (_solr_base_url != null) {
83 String solr_url = _solr_base_url + "/" + solr_collection + "/update";
84
85 String solr_cloud_nodes = System.getProperty("wcsa-ef-ingest.solr-cloud-nodes",null);
86 if (solr_cloud_nodes != null) {
87 String [] cloud_nodes = solr_cloud_nodes.split(",");
88 for (String cn : cloud_nodes) {
89 String solr_endpoint = solr_url.replaceFirst("//.*?:\\d+/", "//"+cn+"/");
90 solr_endpoints.add(solr_endpoint);
91 }
92 }
93 else {
94 solr_endpoints.add(solr_url);
95 }
96 }
97
98 return solr_endpoints;
99 }
100
101 public void execPerVolumeJSONFilelist()
102 {
103 String spark_app_name = generateSparkAppName("Per Volume");
104
105 SparkConf spark_conf = new SparkConf().setAppName(spark_app_name);
106 JavaSparkContext jsc = new JavaSparkContext(spark_conf);
107
108 //String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
109
110 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
111 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.files-per-partition", DEFAULT_FILES_PER_PARTITION);
112
113 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename).cache();
114 json_list_data.setName("JSON-file-list");
115
116 long num_volumes = json_list_data.count();
117 double per_vol = 100.0/(double)num_volumes;
118
119 int num_partitions = (int)(num_volumes/files_per_partition)+1;
120 if (num_partitions < MINIMUM_NUM_PARTITIONS) {
121 num_partitions = MINIMUM_NUM_PARTITIONS;
122 }
123 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
124 json_list_data_rp.setName("JSON-file-list--repartitioned");
125
126 DoubleAccumulator progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
127
128 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
129 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
130
131 ArrayList<String> solr_endpoints = extrapolateSolrEndpoints(_solr_collection);
132
133 PerVolumeJSONList per_vol_json = new PerVolumeJSONList(_input_dir,_whitelist_filename, _langmap_directory,
134 solr_endpoints,_output_dir,_verbosity,
135 icu_tokenize,strict_file_io);
136
137 JavaRDD<Integer> per_volume_page_count = json_list_data_rp.map(per_vol_json);
138 per_volume_page_count.setName("volume-page-counts");
139
140 //Integer total_page_count = volume_page_counts.reduce((a, b) -> a + b);
141 long num_vol_ids = per_volume_page_count.count();
142
143 System.out.println("");
144 System.out.println("############");
145 System.out.println("# Number of volume ids: " + num_vol_ids);
146 System.out.println("############");
147 System.out.println("");
148
149 jsc.close();
150 }
151
152 public static void print_usage(HelpFormatter formatter, Options options)
153 {
154 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt solr-collection", options);
155 }
156
157 public static void main(String[] args) {
158 Options options = new Options();
159
160 Option verbosity_opt = new Option("v", "verbosity", true,
161 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
162 verbosity_opt.setRequired(false);
163 options.addOption(verbosity_opt);
164
165 Option properties_opt = new Option("p", "properties", true,
166 "Read in the specified Java properties file");
167 properties_opt.setRequired(false);
168 options.addOption(properties_opt);
169
170 Option output_dir_opt = new Option("o", "output-dir", true,
171 "If specified, save BZipped Solr JSON files to this directory");
172 output_dir_opt.setRequired(false);
173 options.addOption(output_dir_opt);
174
175 Option solr_base_url_opt = new Option("u", "solr-base-url", true,
176 "If specified, the base URL to post the Solr JSON data to");
177 solr_base_url_opt.setRequired(false);
178 options.addOption(solr_base_url_opt);
179
180 Option read_only_opt = new Option("r", "read-only", false,
181 "Used to initiate a run where the files are all read in, but nothing is ingested/saved");
182 read_only_opt.setRequired(false);
183 options.addOption(read_only_opt);
184
185 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
186 CommandLineParser parser = new GnuParser();
187 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
188
189 HelpFormatter formatter = new HelpFormatter();
190 CommandLine cmd = null;
191
192 try {
193 cmd = parser.parse(options, args);
194 }
195 catch (ParseException e) {
196 System.err.println(e.getMessage());
197 print_usage(formatter,options);
198 System.exit(1);
199 }
200
201
202 String verbosity_str = cmd.getOptionValue("verbosity","1");
203 int verbosity = Integer.parseInt(verbosity_str);
204
205 String property_filename = cmd.getOptionValue("properties",null);
206
207 String output_dir = cmd.getOptionValue("output-dir",null);
208 String solr_base_url = cmd.getOptionValue("solr-base-url",null);
209 boolean read_only = cmd.hasOption("read-only");
210
211 String[] filtered_args = cmd.getArgs();
212
213 if (filtered_args.length != 3) {
214 print_usage(formatter,options);
215 System.exit(1);
216 }
217
218 if (property_filename != null) {
219 try {
220 FileInputStream fis = new FileInputStream(property_filename);
221 BufferedInputStream bis = new BufferedInputStream(fis);
222
223 System.getProperties().load(bis);
224 }
225 catch (FileNotFoundException e) {
226 // TODO Auto-generated catch block
227 e.printStackTrace();
228 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
229 }
230 catch (IOException e) {
231 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
232 }
233 }
234
235 if (!read_only && ((output_dir == null) && (solr_base_url==null))) {
236 System.err.println("Need to specify either --solr-base-url or --output-dir otherwise generated files are not ingested/saved");
237 print_usage(formatter,options);
238 System.exit(1);
239 }
240 if (read_only) {
241 // For this case, need to ensure solr-url and output-dir are null
242 output_dir = null;
243 solr_base_url = null;
244 }
245
246 String input_dir = filtered_args[0];
247 String json_file_list = filtered_args[1];
248 String solr_collection = filtered_args[2];
249
250 ProcessForSolrIngestJSONFilelist prep_for_ingest
251 = new ProcessForSolrIngestJSONFilelist(input_dir,json_file_list,
252 solr_collection,solr_base_url,
253 output_dir,verbosity);
254
255 prep_for_ingest.execPerVolumeJSONFilelist();
256
257 }
258}
Note: See TracBrowser for help on using the repository browser.