source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForWhitelist.java@ 31308

Last change on this file since 31308 was 31308, checked in by davidb, 7 years ago

Minor tidy-up

  • Property svn:executable set to *
File size: 7.9 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import org.apache.commons.cli.*;
9
10import org.apache.spark.api.java.*;
11import org.apache.spark.api.java.function.Function2;
12import org.apache.spark.api.java.function.PairFunction;
13import org.apache.spark.util.DoubleAccumulator;
14import scala.Tuple2;
15
16import org.apache.spark.SparkConf;
17
18public class ProcessForWhitelist implements Serializable
19{
20 private static final long serialVersionUID = 1L;
21
22 // Following details on number of partitions to use given in
23 // "Parallelized collections" section of:
24 // https://spark.apache.org/docs/2.0.1/programming-guide.html
25 //
26 // For a more detailed discussion see:
27 // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
28
29 protected static final int DEFAULT_NUM_CORES = 6;
30 protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
31
32 protected String _input_dir;
33 protected String _json_list_filename;
34
35 protected int _verbosity;
36
37 public ProcessForWhitelist(String input_dir, String json_list_filename, int verbosity)
38 {
39 _input_dir = input_dir;
40 _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
41
42 _verbosity = verbosity;
43 }
44
45 protected String generateSparkAppName(String exec_mode)
46 {
47 String spark_app_name = "[" + exec_mode + "] Extracted Features: Process for Whitelist";
48 spark_app_name += " [" + _json_list_filename + "]";
49
50 return spark_app_name;
51 }
52
53
54
55 public void execWordCount()
56 {
57 String spark_app_name = generateSparkAppName("Per Page");
58
59 SparkConf conf = new SparkConf().setAppName(spark_app_name);
60 JavaSparkContext jsc = new JavaSparkContext(conf);
61
62 String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
63 String output_directory = "whitelist-" + filename_root + "-out";
64 if (ClusterFileIO.exists(output_directory))
65 {
66 System.err.println("Error: " + output_directory + " already exists. Spark unable to write output data");
67 jsc.close();
68 System.exit(1);
69 }
70
71 int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
72 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,num_partitions).cache();
73 json_list_data.setName("JSON-file-list");
74
75 long num_volumes = json_list_data.count();
76 double per_vol = 100.0/(double)num_volumes;
77
78 //JavaRDD<String> json_list_data_rp = json_list_data.repartition((int)(num_volumes/100));
79
80 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
81
82 boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
83 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
84
85 //System.err.println("***** icu_tokenize = " + icu_tokenize);
86 //System.err.println("***** num_part = " + num_partitions);
87
88 PerVolumeWordStreamFlatmap paged_solr_wordfreq_flatmap
89 = new PerVolumeWordStreamFlatmap(_input_dir,_verbosity,
90 per_vol_progress_accum,per_vol,
91 icu_tokenize,
92 strict_file_io);
93 JavaRDD<String> words = json_list_data.flatMap(paged_solr_wordfreq_flatmap);
94 words.setName("tokenized-words");
95
96 JavaPairRDD<String, Long> pairs = words.mapToPair(new PairFunction<String, String, Long>() {
97 public Tuple2<String, Long> call(String s) { return new Tuple2<String, Long>(s, 1L); }
98 });
99 pairs.setName("single-word-count");
100
101 JavaPairRDD<String, Long> counts = pairs.reduceByKey(new Function2<Long, Long, Long>() {
102 public Long call(Long a, Long b) { return a + b; }
103 });
104 counts.setName("word-frequency");
105
106 /*
107 JavaPairRDD<Long, String> swapped_pair = counts.mapToPair(new PairFunction<Tuple2<String, Long>, Long, String>() {
108 @Override
109 public Tuple2<Long, String> call(Tuple2<String, Long> item) throws Exception {
110 return item.swap();
111 }
112
113 });
114 swapped_pair.setName("frequency-word-swap");
115
116 JavaPairRDD<Long, String> sorted_swapped_pair = swapped_pair.sortByKey(false,num_partitions);
117 sorted_swapped_pair.setName("descending-sorted-frequency-word");
118
119 JavaPairRDD<String, Long> sorted_swaped_back_pair = sorted_swapped_pair.mapToPair(new PairFunction<Tuple2<Long, String>, String, Long>() {
120 @Override
121 public Tuple2<String, Long> call(Tuple2<Long, String> item) throws Exception {
122 return item.swap();
123 }
124 });
125 sorted_swaped_back_pair.setName("descending-word-frequency");
126 */
127
128
129 JavaPairRDD<Long, String> counts_swapped_pair
130 = counts.mapToPair(item -> item.swap());
131 counts_swapped_pair.setName("frequency-word-swap");
132
133 JavaPairRDD<Long, String> counts_swapped_pair_sorted
134 = counts_swapped_pair.sortByKey(false, num_partitions);
135 counts_swapped_pair_sorted.setName("descending-sorted-frequency-word");
136
137 JavaPairRDD<String, Long> count_sorted
138 = counts_swapped_pair_sorted.mapToPair(item -> item.swap());
139 count_sorted.setName("descending-word-frequency");
140
141
142
143 //sorted_swaped_back_pair.saveAsTextFile(output_directory);
144 count_sorted.saveAsTextFile(output_directory);
145
146
147 //System.out.println("");
148 //System.out.println("############");
149 //System.out.println("# Number of page ids: " + num_page_ids);
150 //System.out.println("############");
151
152 jsc.close();
153 }
154
155
156 public static void print_usage(HelpFormatter formatter, Options options)
157 {
158 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
159 }
160
161 public static void main(String[] args) {
162 Options options = new Options();
163
164 Option verbosity_opt = new Option("v", "verbosity", true,
165 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
166 verbosity_opt.setRequired(false);
167 options.addOption(verbosity_opt);
168
169 Option properties_opt = new Option("p", "properties", true,
170 "Read in the specified Java properties file");
171 properties_opt.setRequired(false);
172 options.addOption(properties_opt);
173
174 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
175 CommandLineParser parser = new GnuParser();
176 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
177
178 HelpFormatter formatter = new HelpFormatter();
179 CommandLine cmd = null;
180
181 try {
182 cmd = parser.parse(options, args);
183 }
184 catch (ParseException e) {
185 System.err.println(e.getMessage());
186 print_usage(formatter,options);
187 System.exit(1);
188 }
189
190 String verbosity_str = cmd.getOptionValue("verbosity","1");
191 int verbosity = Integer.parseInt(verbosity_str);
192
193 String property_filename = cmd.getOptionValue("properties",null);
194
195 String[] filtered_args = cmd.getArgs();
196
197 if (filtered_args.length != 2) {
198 print_usage(formatter,options);
199 System.exit(1);
200 }
201
202 if (property_filename != null) {
203 try {
204 FileInputStream fis = new FileInputStream(property_filename);
205 BufferedInputStream bis = new BufferedInputStream(fis);
206
207 System.getProperties().load(bis);
208 }
209 catch (FileNotFoundException e) {
210 e.printStackTrace();
211 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
212 }
213 catch (IOException e) {
214 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
215 }
216 }
217
218 String input_dir = filtered_args[0];
219 String json_list_filename = filtered_args[1];
220
221 ProcessForWhitelist prep_for_whitelist
222 = new ProcessForWhitelist(input_dir,json_list_filename,verbosity);
223
224 //String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
225 prep_for_whitelist.execWordCount();
226
227 }
228}
Note: See TracBrowser for help on using the repository browser.