source: other-projects/hathitrust/wcsa/extracted-features-solr/trunk/solr-ingest/src/main/java/org/hathitrust/extractedfeatures/PerPageJSONFlatmap.java@ 31221

Last change on this file since 31221 was 31221, checked in by davidb, 7 years ago

Missing argument added in

  • Property svn:executable set to *
File size: 4.5 KB
Line 
1package org.hathitrust.extractedfeatures;
2
3import java.io.IOException;
4import java.util.ArrayList;
5import java.util.Iterator;
6import org.apache.spark.api.java.function.FlatMapFunction;
7import org.apache.spark.util.DoubleAccumulator;
8import org.json.JSONArray;
9import org.json.JSONObject;
10
11/*
12class PagedJSON implements Function<String, Boolean> {
13
14 private static final long serialVersionUID = 1L;
15
16 public Boolean call(String s) { return s.contains("a"); }
17}
18 */
19
20
21class PerPageJSONFlatmap implements FlatMapFunction<String, JSONObject>
22//public class PagedJSON implements VoidFunction<String>
23{
24 private static final long serialVersionUID = 1L;
25
26 protected String _input_dir;
27
28 protected WhitelistBloomFilter _whitelist_bloomfilter;
29
30 protected String _solr_url;
31 protected String _output_dir;
32 protected int _verbosity;
33
34 protected DoubleAccumulator _progress_accum;
35 protected double _progress_step;
36
37 boolean _strict_file_io;
38
39 public PerPageJSONFlatmap(String input_dir, String whitelist_filename,
40 String solr_url, String output_dir, int verbosity,
41 DoubleAccumulator progress_accum, double progress_step,
42 boolean strict_file_io)
43 {
44 _input_dir = input_dir;
45
46 if (whitelist_filename != null) {
47 _whitelist_bloomfilter = new WhitelistBloomFilter(whitelist_filename,true);
48 }
49
50 _solr_url = solr_url;
51 _output_dir = output_dir;
52 _verbosity = verbosity;
53
54 _progress_accum = progress_accum;
55 _progress_step = progress_step;
56
57 _strict_file_io = strict_file_io;
58 }
59
60
61 public Iterator<JSONObject> call(String json_file_in) throws IOException
62 {
63 String full_json_file_in = _input_dir + "/" + json_file_in;
64 JSONObject extracted_feature_record = JSONClusterFileIO.readJSONFile(full_json_file_in);
65
66 ArrayList<JSONObject> json_pages = new ArrayList<JSONObject>();
67
68 if (extracted_feature_record != null) {
69 String volume_id = extracted_feature_record.getString("id");
70
71 //JSONObject ef_metadata = extracted_feature_record.getJSONObject("metadata");
72 //String title= ef_metadata.getString("title");
73
74 JSONObject ef_features = extracted_feature_record.getJSONObject("features");
75
76
77 int ef_page_count = ef_features.getInt("pageCount");
78
79 if (_verbosity >= 1) {
80 System.out.println("Processing: " + json_file_in);
81 System.out.println(" pageCount = " + ef_page_count);
82 }
83
84 JSONArray ef_pages = ef_features.getJSONArray("pages");
85 int ef_num_pages = ef_pages.length();
86 if (ef_num_pages != ef_page_count) {
87 System.err.println("Warning: number of page elements in JSON (" + ef_num_pages + ")"
88 +" does not match 'pageCount' metadata (" + ef_page_count + ")");
89 }
90
91 // Make directory for page-level JSON output
92 String json_dir = ClusterFileIO.removeSuffix(json_file_in,".json.bz2");
93 String page_json_dir = json_dir + "/pages";
94
95 if (_output_dir != null) {
96 // Only need to do this once per volume, so easier to here than in the per-page Map
97 ClusterFileIO.createDirectoryAll(_output_dir + "/" + page_json_dir);
98 }
99 if (_verbosity >= 2) {
100 System.out.print(" Pages: ");
101 }
102
103 for (int i = 0; i < ef_page_count; i++) {
104 String formatted_i = String.format("page-%06d", i);
105 String page_id = volume_id + "." + formatted_i;
106
107 if (_verbosity >= 2) {
108 if (i>0) {
109 System.out.print(", ");
110 }
111 System.out.print(page_id);
112 }
113
114 String output_json_bz2 = page_json_dir +"/" + formatted_i + ".json.bz2";
115
116 if (i==(ef_page_count-1)) {
117 if (_verbosity >= 2) {
118 System.out.println();
119 }
120 System.out.println("Sample output JSON page file: " + output_json_bz2);
121 }
122
123 JSONObject ef_page = ef_pages.getJSONObject(i);
124
125 if (ef_page != null) {
126 // Convert to Solr add form
127 JSONObject solr_add_doc_json
128 = SolrDocJSON.generateSolrDocJSON(volume_id, page_id, ef_page, _whitelist_bloomfilter);
129 solr_add_doc_json.put("filename_json_bz2", output_json_bz2);
130
131 json_pages.add(solr_add_doc_json);
132
133
134 }
135 else {
136 System.err.println("Skipping: " + page_id);
137 }
138
139 }
140 }
141 else {
142 // File did not exist, or could not be parsed
143 String mess = "Failed to read in bzipped JSON file '" + full_json_file_in + "'";
144 if (_strict_file_io) {
145 throw new IOException(mess);
146 }
147 else {
148 System.err.println("Warning: " + mess);
149 System.out.println("Warning: " + mess);
150 }
151 }
152
153 _progress_accum.add(_progress_step);
154
155 return json_pages.iterator();
156 }
157
158
159}
160
Note: See TracBrowser for help on using the repository browser.