source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForPOSCount.java@ 31259

Last change on this file since 31259 was 31259, checked in by davidb, 7 years ago

Lambda sort had wrong boolean arg to sort descending. Now fixed

  • Property svn:executable set to *
File size: 6.7 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import org.apache.commons.cli.*;
9
10import org.apache.spark.api.java.*;
11import org.apache.spark.api.java.function.Function2;
12import org.apache.spark.api.java.function.PairFunction;
13import org.apache.spark.util.DoubleAccumulator;
14import scala.Tuple2;
15
16import org.apache.spark.SparkConf;
17
18public class ProcessForPOSCount implements Serializable
19{
20 private static final long serialVersionUID = 1L;
21
22 // Following details on number of partitions to use given in
23 // "Parallelized collections" section of:
24 // https://spark.apache.org/docs/2.0.1/programming-guide.html
25 //
26 // For a more detailed discussion see:
27 // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
28
29 protected static final int DEFAULT_NUM_CORES = 6;
30 protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
31
32 protected String _input_dir;
33 protected String _json_list_filename;
34
35 protected int _verbosity;
36
37 public ProcessForPOSCount(String input_dir, String json_list_filename, int verbosity)
38 {
39 _input_dir = input_dir;
40 _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
41
42 _verbosity = verbosity;
43 }
44
45 protected String generateSparkAppName(String exec_mode)
46 {
47 String spark_app_name = "[" + exec_mode + "] Extracted Features: Process for POS";
48 spark_app_name += " [" + _json_list_filename + "]";
49
50 return spark_app_name;
51 }
52
53 public void execPOSCount()
54 {
55 String spark_app_name = generateSparkAppName("Per Page");
56
57 SparkConf conf = new SparkConf().setAppName(spark_app_name);
58 JavaSparkContext jsc = new JavaSparkContext(conf);
59
60 String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
61 String output_directory = "pos-" + filename_root + "-out";
62 if (ClusterFileIO.exists(output_directory))
63 {
64 System.err.println("Error: " + output_directory + " already exists. Spark unable to write output data");
65 jsc.close();
66 System.exit(1);
67 }
68
69 int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
70 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,num_partitions).cache();
71 json_list_data.setName("JSON-file-list");
72
73 long num_volumes = json_list_data.count();
74 double per_vol = 100.0/(double)num_volumes;
75
76 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
77
78 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
79 //boolean icu_tokenize = Boolean.getBoolean("wcsa-ef-ingest.icu-tokenize");
80
81 PerVolumePOSStreamFlatmap paged_solr_wordfreq_flatmap
82 = new PerVolumePOSStreamFlatmap(_input_dir,_verbosity,
83 per_vol_progress_accum,per_vol,
84 strict_file_io);
85 JavaRDD<String> pos_list = json_list_data.flatMap(paged_solr_wordfreq_flatmap);
86 pos_list.setName("pos-stream");
87
88 /*
89 JavaPairRDD<String, Integer> pos_pairs = pos_list.mapToPair(new PairFunction<String, String, Integer>() {
90 public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); }
91 });
92 pos_pairs.setName("single-pos-count");
93
94 JavaPairRDD<String, Integer> pos_counts = pos_pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
95 public Integer call(Integer a, Integer b) { return a + b; }
96 });
97 pos_counts.setName("pos-frequency");
98 */
99
100 JavaPairRDD<String, Integer> pos_pairs = pos_list.mapToPair(s -> new Tuple2<String, Integer>(s, 1));
101 pos_pairs.setName("single-pos-count");
102
103 JavaPairRDD<String, Integer> pos_counts = pos_pairs.reduceByKey((a, b) -> a + b);
104 pos_counts.setName("pos-frequency");
105
106 JavaPairRDD<Integer, String> pos_counts_swapped_pair
107 = pos_counts.mapToPair(item -> item.swap());
108 pos_counts_swapped_pair.setName("frequency-pos-swap");
109
110 JavaPairRDD<Integer, String> pos_counts_swapped_pair_sorted
111 = pos_counts_swapped_pair.sortByKey(false, num_partitions);
112 pos_counts_swapped_pair_sorted.setName("descending-sorted-frequency-pos");
113
114 JavaPairRDD<String, Integer> pos_count_sorted
115 = pos_counts_swapped_pair_sorted.mapToPair(item -> item.swap());
116 pos_count_sorted.setName("descending-pos-frequency");
117
118
119 pos_count_sorted.saveAsTextFile(output_directory);
120 jsc.close();
121 }
122
123
124 public static void print_usage(HelpFormatter formatter, Options options)
125 {
126 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
127 }
128
129 public static void main(String[] args) {
130 Options options = new Options();
131
132 Option verbosity_opt = new Option("v", "verbosity", true,
133 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
134 verbosity_opt.setRequired(false);
135 options.addOption(verbosity_opt);
136
137 Option properties_opt = new Option("p", "properties", true,
138 "Read in the specified Java properties file");
139 properties_opt.setRequired(false);
140 options.addOption(properties_opt);
141
142 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
143 CommandLineParser parser = new GnuParser();
144 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
145
146 HelpFormatter formatter = new HelpFormatter();
147 CommandLine cmd = null;
148
149 try {
150 cmd = parser.parse(options, args);
151 }
152 catch (ParseException e) {
153 System.err.println(e.getMessage());
154 print_usage(formatter,options);
155 System.exit(1);
156 }
157
158 String verbosity_str = cmd.getOptionValue("verbosity","1");
159 int verbosity = Integer.parseInt(verbosity_str);
160
161 String property_filename = cmd.getOptionValue("properties",null);
162
163 String[] filtered_args = cmd.getArgs();
164
165 if (filtered_args.length != 2) {
166 print_usage(formatter,options);
167 System.exit(1);
168 }
169
170 if (property_filename != null) {
171 try {
172 FileInputStream fis = new FileInputStream(property_filename);
173 BufferedInputStream bis = new BufferedInputStream(fis);
174
175 System.getProperties().load(bis);
176 }
177 catch (FileNotFoundException e) {
178 e.printStackTrace();
179 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
180 }
181 catch (IOException e) {
182 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
183 }
184 }
185
186 String input_dir = filtered_args[0];
187 String json_list_filename = filtered_args[1];
188
189 ProcessForPOSCount prep_for_pos
190 = new ProcessForPOSCount(input_dir,json_list_filename,verbosity);
191
192 prep_for_pos.execPOSCount();
193
194 }
195}
Note: See TracBrowser for help on using the repository browser.