source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/ProcessForLangCount.java@ 31270

Last change on this file since 31270 was 31270, checked in by davidb, 7 years ago

Changed over to repartition approach

  • Property svn:executable set to *
File size: 6.5 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.BufferedInputStream;
4import java.io.FileInputStream;
5import java.io.FileNotFoundException;
6import java.io.IOException;
7import java.io.Serializable;
8import org.apache.commons.cli.*;
9
10import org.apache.spark.api.java.*;
11import org.apache.spark.api.java.function.Function2;
12import org.apache.spark.api.java.function.PairFunction;
13import org.apache.spark.util.DoubleAccumulator;
14import scala.Tuple2;
15
16import org.apache.spark.SparkConf;
17
18public class ProcessForLangCount implements Serializable
19{
20 private static final long serialVersionUID = 1L;
21
22 // Following details on number of partitions to use given in
23 // "Parallelized collections" section of:
24 // https://spark.apache.org/docs/2.0.1/programming-guide.html
25 //
26 // For a more detailed discussion see:
27 // http://blog.cloudera.com/blog/2015/03/how-to-tune-your-apache-spark-jobs-part-2/
28
29 //protected static final int DEFAULT_NUM_CORES = 6;
30 //protected static final int DEFAULT_NUM_PARTITIONS = 3*DEFAULT_NUM_CORES;
31 protected static final int DEFAULT_FILES_PER_PARTITION = 3000;
32
33 protected String _input_dir;
34 protected String _json_list_filename;
35
36 protected int _verbosity;
37
38 public ProcessForLangCount(String input_dir, String json_list_filename, int verbosity)
39 {
40 _input_dir = input_dir;
41 _json_list_filename = (json_list_filename != null) ? json_list_filename : input_dir;
42
43 _verbosity = verbosity;
44 }
45
46 protected String generateSparkAppName(String exec_mode)
47 {
48 String spark_app_name = "[" + exec_mode + "] Extracted Features: Process for Language Labels";
49 spark_app_name += " [" + _json_list_filename + "]";
50
51 return spark_app_name;
52 }
53
54 public void execLangCount()
55 {
56 String spark_app_name = generateSparkAppName("Per Volume");
57
58 SparkConf conf = new SparkConf().setAppName(spark_app_name);
59 JavaSparkContext jsc = new JavaSparkContext(conf);
60
61 String filename_root = _json_list_filename.replaceAll(".*/","").replaceAll("\\..*$","");
62 String output_directory = "lang-" + filename_root + "-out";
63 if (ClusterFileIO.exists(output_directory))
64 {
65 System.err.println("Error: " + output_directory + " already exists. Spark unable to write output data");
66 jsc.close();
67 System.exit(1);
68 }
69
70 //int num_partitions = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_NUM_PARTITIONS);
71 int files_per_partition = Integer.getInteger("wcsa-ef-ingest.num-partitions", DEFAULT_FILES_PER_PARTITION);
72
73
74 JavaRDD<String> json_list_data = jsc.textFile(_json_list_filename,files_per_partition).cache();
75 json_list_data.setName("JSON-file-list");
76
77 long num_volumes = json_list_data.count();
78 double per_vol = 100.0/(double)num_volumes;
79
80 int num_partitions = (int)(num_volumes/files_per_partition)+1;
81
82 JavaRDD<String> json_list_data_rp = json_list_data.repartition(num_partitions);
83
84 DoubleAccumulator per_vol_progress_accum = jsc.sc().doubleAccumulator("Per Volume Progress Percent");
85
86 boolean strict_file_io = Boolean.getBoolean("wcsa-ef-ingest.strict-file-io");
87
88 PerVolumeLangStreamFlatmap paged_solr_langfreq_flatmap
89 = new PerVolumeLangStreamFlatmap(_input_dir,_verbosity,
90 per_vol_progress_accum,per_vol,
91 strict_file_io);
92 JavaRDD<String> lang_list = json_list_data_rp.flatMap(paged_solr_langfreq_flatmap);
93 lang_list.setName("lang-stream");
94
95
96 JavaPairRDD<String, Long> lang_pairs = lang_list.mapToPair(s -> new Tuple2<String, Long>(s, 1L));
97 lang_pairs.setName("single-lang-count");
98
99 JavaPairRDD<String, Long> lang_counts = lang_pairs.reduceByKey((a, b) -> a + b);
100 lang_counts.setName("lang-frequency");
101
102 JavaPairRDD<Long, String> lang_counts_swapped_pair
103 = lang_counts.mapToPair(item -> item.swap());
104 lang_counts_swapped_pair.setName("frequency-lang-swap");
105
106 JavaPairRDD<Long, String> lang_counts_swapped_pair_sorted
107 = lang_counts_swapped_pair.sortByKey(false, num_partitions);
108 lang_counts_swapped_pair_sorted.setName("descending-sorted-frequency-lang");
109
110 JavaPairRDD<String, Long> lang_count_sorted
111 = lang_counts_swapped_pair_sorted.mapToPair(item -> item.swap());
112 lang_count_sorted.setName("descending-lang-frequency");
113
114
115 lang_count_sorted.saveAsTextFile(output_directory);
116 jsc.close();
117 }
118
119
120 public static void print_usage(HelpFormatter formatter, Options options)
121 {
122 formatter.printHelp("RUN.bash [options] input-dir json-filelist.txt", options);
123 }
124
125 public static void main(String[] args) {
126 Options options = new Options();
127
128 Option verbosity_opt = new Option("v", "verbosity", true,
129 "Set to control the level of debugging output [0=none, 1=some, 2=lots]");
130 verbosity_opt.setRequired(false);
131 options.addOption(verbosity_opt);
132
133 Option properties_opt = new Option("p", "properties", true,
134 "Read in the specified Java properties file");
135 properties_opt.setRequired(false);
136 options.addOption(properties_opt);
137
138 // Need to work with CLI v1.2 as this is the JAR that is bundled with Hadoop/Spark
139 CommandLineParser parser = new GnuParser();
140 //CommandLineParser parser = new DefaultParser(); // if working with CLI v1.3 and above
141
142 HelpFormatter formatter = new HelpFormatter();
143 CommandLine cmd = null;
144
145 try {
146 cmd = parser.parse(options, args);
147 }
148 catch (ParseException e) {
149 System.err.println(e.getMessage());
150 print_usage(formatter,options);
151 System.exit(1);
152 }
153
154 String verbosity_str = cmd.getOptionValue("verbosity","1");
155 int verbosity = Integer.parseInt(verbosity_str);
156
157 String property_filename = cmd.getOptionValue("properties",null);
158
159 String[] filtered_args = cmd.getArgs();
160
161 if (filtered_args.length != 2) {
162 print_usage(formatter,options);
163 System.exit(1);
164 }
165
166 if (property_filename != null) {
167 try {
168 FileInputStream fis = new FileInputStream(property_filename);
169 BufferedInputStream bis = new BufferedInputStream(fis);
170
171 System.getProperties().load(bis);
172 }
173 catch (FileNotFoundException e) {
174 e.printStackTrace();
175 System.err.println("File not found: '" + property_filename + "'. Skipping property file read");
176 }
177 catch (IOException e) {
178 System.err.println("IO Exception for: '" + property_filename + "'. Malformed syntax? Skipping property file read");
179 }
180 }
181
182 String input_dir = filtered_args[0];
183 String json_list_filename = filtered_args[1];
184
185 ProcessForLangCount prep_for_lang
186 = new ProcessForLangCount(input_dir,json_list_filename,verbosity);
187
188 prep_for_lang.execLangCount();
189
190 }
191}
Note: See TracBrowser for help on using the repository browser.