1 | package org.hathitrust.extractedfeatures;
|
---|
2 |
|
---|
3 | import java.io.BufferedInputStream;
|
---|
4 | import java.io.FileInputStream;
|
---|
5 | import java.io.FileNotFoundException;
|
---|
6 | import java.io.IOException;
|
---|
7 | import java.io.Serializable;
|
---|
8 | import org.apache.commons.cli.*;
|
---|
9 |
|
---|
10 | import org.apache.spark.api.java.*;
|
---|
11 | import org.apache.spark.api.java.function.Function2;
|
---|
12 | import org.apache.spark.api.java.function.PairFunction;
|
---|
13 | import org.apache.spark.util.DoubleAccumulator;
|
---|
14 | import scala.Tuple2;
|
---|
15 |
|
---|
16 | import org.apache.spark.SparkConf;
|
---|
17 |
|
---|
18 | public class ProcessForWhitelist implements Serializable
|
---|
19 | {
|
---|
20 | private static final long serialVersionUID = 1L;
|
---|
21 |
|
---|
22 | // Following details on number of partitions to use given in
|
---|
23 | // "Parallelized collections" section of:
|
---|
24 | // https://spark.apache.org/docs/2.0.1/programming-guide.html
|
---|
25 | //
|
---|
26 | // For a more detailed discussion see:
|
---|
27 | // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
|
---|
28 |
|
---|
29 | protected static final int DEFAULT_NUM_CORES = 6;
|
---|
30 | protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
|
---|
31 |
|
---|
32 | protected String _input_dir;
|
---|
33 | protected String _json_list_filename;
|
---|
34 |
|
---|
35 | protected int _verbosity;
|
---|
36 |
|
---|
37 | public ProcessForWhitelist(String input_dir, String json_list_filename, int verbosity)
|
---|
38 | {
|
---|
39 | _input_dir = input_dir;
|
---|
40 | _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
|
---|
41 |
|
---|
42 | _verbosity = verbosity;
|
---|
43 | }
|
---|
44 |
|
---|
45 | protected String generateSparkAppName(String exec_mode)
|
---|
46 | {
|
---|
47 | String spark_app_name = "[" + exec_mode + "] Extracted Features: Process for Whitelist";
|
---|
48 | spark_app_name += " [" + _json_list_filename + "]";
|
---|
49 |
|
---|
50 | return spark_app_name;
|
---|
51 | }
|
---|
52 |
|
---|
53 |
|
---|
54 |
|
---|
55 | public void execWordCount()
|
---|
56 | {
|
---|
57 | String spark_app_name = generateSparkAppName("Per Page");
|
---|
58 |
|
---|
59 | SparkConf conf = new SparkConf().setAppName(spark_app_name);
|
---|
60 | JavaSparkContext jsc = new JavaSparkContext(conf);
|
---|
61 |
|
---|
62 | String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
|
---|
63 | String output_directory = "whitelist-" + filename_root + "-out";
|
---|
64 | if (ClusterFileIO.exists(output_directory))
|
---|
65 | {
|
---|
66 | System.err.println("Error: " + output_directory + " already exists. Spark unable to write output data");
|
---|
67 | jsc.close();
|
---|
68 | System.exit(1);
|
---|
69 | }
|
---|
70 |
|
---|
71 | int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
|
---|
72 | JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,num_partitions).cache();
|
---|
73 | json_list_data.setName("JSON-file-list");
|
---|
74 |
|
---|
75 | long num_volumes = json_list_data.count();
|
---|
76 | double per_vol = 100.0/(double)num_volumes;
|
---|
77 |
|
---|
78 | //JavaRDD<String> json_list_data_rp = json_list_data.repartition((int)(num_volumes/100));
|
---|
79 |
|
---|
80 | DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
|
---|
81 |
|
---|
82 | boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
|
---|
83 | boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
|
---|
84 |
|
---|
85 | PerVolumeWordStreamFlatmap paged_solr_wordfreq_flatmap
|
---|
86 | = new PerVolumeWordStreamFlatmap(_input_dir,_verbosity,
|
---|
87 | per_vol_progress_accum,per_vol,
|
---|
88 | icu_tokenize,
|
---|
89 | strict_file_io);
|
---|
90 | JavaRDD<String> words = json_list_data.flatMap(paged_solr_wordfreq_flatmap);
|
---|
91 | words.setName("tokenized-words");
|
---|
92 |
|
---|
93 | JavaPairRDD<String, Long> pairs = words.mapToPair(new PairFunction<String, String, Long>() {
|
---|
94 | public Tuple2<String, Long> call(String s) { return new Tuple2<String, Long>(s, 1L); }
|
---|
95 | });
|
---|
96 | pairs.setName("single-word-count");
|
---|
97 |
|
---|
98 | JavaPairRDD<String, Long> counts = pairs.reduceByKey(new Function2<Long, Long, Long>() {
|
---|
99 | public Long call(Long a, Long b) { return a + b; }
|
---|
100 | });
|
---|
101 | counts.setName("word-frequency");
|
---|
102 |
|
---|
103 | /*
|
---|
104 | JavaPairRDD<Long, String> swapped_pair = counts.mapToPair(new PairFunction<Tuple2<String, Long>, Long, String>() {
|
---|
105 | @Override
|
---|
106 | public Tuple2<Long, String> call(Tuple2<String, Long> item) throws Exception {
|
---|
107 | return item.swap();
|
---|
108 | }
|
---|
109 |
|
---|
110 | });
|
---|
111 | swapped_pair.setName("frequency-word-swap");
|
---|
112 |
|
---|
113 | JavaPairRDD<Long, String> sorted_swapped_pair = swapped_pair.sortByKey(false,num_partitions);
|
---|
114 | sorted_swapped_pair.setName("descending-sorted-frequency-word");
|
---|
115 |
|
---|
116 | JavaPairRDD<String, Long> sorted_swaped_back_pair = sorted_swapped_pair.mapToPair(new PairFunction<Tuple2<Long, String>, String, Long>() {
|
---|
117 | @Override
|
---|
118 | public Tuple2<String, Long> call(Tuple2<Long, String> item) throws Exception {
|
---|
119 | return item.swap();
|
---|
120 | }
|
---|
121 | });
|
---|
122 | sorted_swaped_back_pair.setName("descending-word-frequency");
|
---|
123 | */
|
---|
124 |
|
---|
125 |
|
---|
126 | JavaPairRDD<Long, String> counts_swapped_pair
|
---|
127 | = counts.mapToPair(item -> item.swap());
|
---|
128 | counts_swapped_pair.setName("frequency-word-swap");
|
---|
129 |
|
---|
130 | JavaPairRDD<Long, String> counts_swapped_pair_sorted
|
---|
131 | = counts_swapped_pair.sortByKey(false, num_partitions);
|
---|
132 | counts_swapped_pair_sorted.setName("descending-sorted-frequency-word");
|
---|
133 |
|
---|
134 | JavaPairRDD<String, Long> count_sorted
|
---|
135 | = counts_swapped_pair_sorted.mapToPair(item -> item.swap());
|
---|
136 | count_sorted.setName("descending-word-frequency");
|
---|
137 |
|
---|
138 |
|
---|
139 |
|
---|
140 | //sorted_swaped_back_pair.saveAsTextFile(output_directory);
|
---|
141 | count_sorted.saveAsTextFile(output_directory);
|
---|
142 |
|
---|
143 |
|
---|
144 | //System.out.println("");
|
---|
145 | //System.out.println("############");
|
---|
146 | //System.out.println("# Number of page ids: " + num_page_ids);
|
---|
147 | //System.out.println("############");
|
---|
148 |
|
---|
149 | jsc.close();
|
---|
150 | }
|
---|
151 |
|
---|
152 |
|
---|
153 | public static void print_usage(HelpFormatter formatter, Options options)
|
---|
154 | {
|
---|
155 | formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
|
---|
156 | }
|
---|
157 |
|
---|
158 | public static void main(String[] args) {
|
---|
159 | Options options = new Options();
|
---|
160 |
|
---|
161 | Option verbosity_opt = new Option("v", "verbosity", true,
|
---|
162 | "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
|
---|
163 | verbosity_opt.setRequired(false);
|
---|
164 | options.addOption(verbosity_opt);
|
---|
165 |
|
---|
166 | Option properties_opt = new Option("p", "properties", true,
|
---|
167 | "Read in the specified Java properties file");
|
---|
168 | properties_opt.setRequired(false);
|
---|
169 | options.addOption(properties_opt);
|
---|
170 |
|
---|
171 | // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
|
---|
172 | CommandLineParser parser = new GnuParser();
|
---|
173 | //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
|
---|
174 |
|
---|
175 | HelpFormatter formatter = new HelpFormatter();
|
---|
176 | CommandLine cmd = null;
|
---|
177 |
|
---|
178 | try {
|
---|
179 | cmd = parser.parse(options, args);
|
---|
180 | }
|
---|
181 | catch (ParseException e) {
|
---|
182 | System.err.println(e.getMessage());
|
---|
183 | print_usage(formatter,options);
|
---|
184 | System.exit(1);
|
---|
185 | }
|
---|
186 |
|
---|
187 | String verbosity_str = cmd.getOptionValue("verbosity","1");
|
---|
188 | int verbosity = Integer.parseInt(verbosity_str);
|
---|
189 |
|
---|
190 | String property_filename = cmd.getOptionValue("properties",null);
|
---|
191 |
|
---|
192 | String[] filtered_args = cmd.getArgs();
|
---|
193 |
|
---|
194 | if (filtered_args.length != 2) {
|
---|
195 | print_usage(formatter,options);
|
---|
196 | System.exit(1);
|
---|
197 | }
|
---|
198 |
|
---|
199 | if (property_filename != null) {
|
---|
200 | try {
|
---|
201 | FileInputStream fis = new FileInputStream(property_filename);
|
---|
202 | BufferedInputStream bis = new BufferedInputStream(fis);
|
---|
203 |
|
---|
204 | System.getProperties().load(bis);
|
---|
205 | }
|
---|
206 | catch (FileNotFoundException e) {
|
---|
207 | e.printStackTrace();
|
---|
208 | System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
|
---|
209 | }
|
---|
210 | catch (IOException e) {
|
---|
211 | System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
|
---|
212 | }
|
---|
213 | }
|
---|
214 |
|
---|
215 | String input_dir = filtered_args[0];
|
---|
216 | String json_list_filename = filtered_args[1];
|
---|
217 |
|
---|
218 | ProcessForWhitelist prep_for_whitelist
|
---|
219 | = new ProcessForWhitelist(input_dir,json_list_filename,verbosity);
|
---|
220 |
|
---|
221 | //String process_ef_json_mode = System.getProperty("wcsa-ef-ingest.process-ef-json-mode","per-page");
|
---|
222 | prep_for_whitelist.execWordCount();
|
---|
223 |
|
---|
224 | }
|
---|
225 | }
|
---|