source: other-projects/maori-lang-detection/src/org/greenstone/atea/NutchTextDumpToMongoDB.java@ 33811

Last change on this file since 33811 was 33811, checked in by ak19, 4 years ago

Returning to using a single variable, urlContainsLangCodeInPath, to record both whether any page on a site contains /mi(/) OR http(s):mi.* in its URL path.

File size: 15.8 KB
Line 
1package org.greenstone.atea;
2
3import java.io.*;
4import java.lang.ArrayIndexOutOfBoundsException;
5import java.time.LocalDateTime;
6import java.util.ArrayList;
7import java.util.Arrays;
8
9import org.apache.commons.csv.*;
10import org.apache.log4j.Logger;
11
12//import org.bson.types.ObjectId;
13
14import org.greenstone.atea.morphia.*;
15
16
17/**
18 * Class to process the dump text files produced FOR EACH SITE (e.g. site "00001") that
19 * Nutch has finished crawling and whose text has been dumped out to a file called dump.txt.
20 * This reads in the dump.txt file contained in each site folder within the input folder.
21 * (e.g. input folder "crawled" could contain folders 00001 to 01465. Each contains a dump.txt)
22 * Each dump.txt could contain the text contents for an entire site, or for individual pages.
23 * This class then uses class TextDumpPage to parse each webpage within a dump.txt,
24 * which parses out the actual text body content of each webpage's section within a dump.txt.
25 * Finally, MaoriTextDetector is run over that to determine whether the full body text is
26 * likely to be in Maori or not.
27 *
28 * Potential issues: since a web page's text is dumped out by nutch with neither paragraph
29 * nor even newline separator, it's hard to be sure that the entire page is in language.
30 * If it's in multiple languages, there's no way to be sure there aren't promising Maori language
31 * paragraphs contained in a page, if the majority/the remainder happen to be in English.
32 *
33 * So if we're looking for any paragraphs in Maori to store in a DB, perhaps it's better to run
34 * the MaoriTextDetector.isTextInMaori(BufferedReader reader) over two "lines" at a time,
35 * instead of running it over the entire html body's text.
36 *
37 * TO COMPILE OR RUN, FIRST DO:
38 * cd maori-lang-detection/apache-opennlp-1.9.1
39 * export OPENNLP_HOME=`pwd`
40 * cd maori-lang-detection/src
41 *
42 * TO COMPILE:
43 * maori-lang-detection/src$
44 * javac -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB.java
45 *
46 * TO RUN:
47 * maori-lang-detection/src$
48 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small
49 *
50 * or:
51 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small > ../crawled-small/bla.txt 2>&1
52 *
53*/
54public class NutchTextDumpToMongoDB {
55 static Logger logger = Logger.getLogger(org.greenstone.atea.NutchTextDumpToMongoDB.class.getName());
56
57 static boolean DEBUG_MODE = true; // this is set to false in main() at the end of this class
58
59 /** Counter for number of sites.
60 * Should be equal to number of times NutchTextDumpToMongoDB constructor
61 * is called: once per site.
62 */
63 static private int SITE_COUNTER = 0;
64 static private long WEBPAGE_COUNTER = 0;
65
66 private final MaoriTextDetector maoriTxtDetector;
67 private final MongoDBAccess mongodbAccess;
68
69 public final String siteID;
70 public final boolean siteCrawlUnfinished;
71 public final long siteCrawledTimestamp; /** When the crawl of the site terminated */
72
73 private int countOfWebPagesWithBodyText = 0;
74
75 private String geoLocationCountryCode = null; /** 2 letter country code */
76 private boolean urlContainsLangCodeInPath = false; /** If any URL on this site contains a /mi(/) or http(s)://mi.* in its URL path */
77
78 private String domainOfSite;
79 private int numPagesInMRI = 0;
80 private int numPagesContainingMRI = 0;
81
82 /** keep a list to store the text of each page */
83 private ArrayList<TextDumpPage> pages;
84
85
86
87 /** Number of language and confidence results to return for storing in MongoDB
88 * MongoDB runs out of space if storing too many, as we store this info per sentence
89 * and a long text document becomes a very large MongoDB document presumably */
90 private static final int NUM_TOP_LANGUAGES = 3; // 103 max, in current version of opennlp lang model
91
92
93 private boolean isStartOfNewWebPageRecord(String prevLine, String line) {
94 // The start of a new web page's record in nutch's text dump of an entire site
95 // is denoted by a newline followed by a URL (protocol)
96 // or the very start of the file with a URL (protocol)
97 return ((prevLine == null || prevLine.equals(""))
98 && (line.startsWith("http://") || line.startsWith("https://")));
99 }
100
101 public void debugPageDump(StringBuilder pageDump) {
102 if(DEBUG_MODE) {
103 // START DEBUG
104 logger.debug("__________________________________________");
105 logger.debug("@@@ Found page entry: ");
106 logger.debug("__________________________________________");
107 logger.debug(pageDump.toString());
108 logger.debug("------------------------------------------");
109 // END DEBUG
110 }
111 }
112
113 /** A NutchTextDumpToMongoDB processes the dump.txt for one site */
114 public NutchTextDumpToMongoDB(MongoDBAccess mongodbAccess,
115 MaoriTextDetector maoriTxtDetector, String siteID,
116 File txtDumpFile, long lastModified, boolean siteCrawlUnfinished)
117 throws IOException
118 {
119 // increment static counter of sites processed by a NutchTextDumpToMongoDB instance
120 SITE_COUNTER++;
121
122 // siteID is of the form %5d (e.g. 00020) and is just the name of a site folder
123 this.siteID = siteID;
124 this.siteCrawlUnfinished = siteCrawlUnfinished;
125 this.siteCrawledTimestamp = lastModified;
126
127 this.maoriTxtDetector = maoriTxtDetector;
128 this.mongodbAccess = mongodbAccess;
129
130 pages = new ArrayList<TextDumpPage>();
131
132 String line = null;
133 StringBuilder pageDump = null;
134 try (
135 BufferedReader reader = new BufferedReader(new FileReader(txtDumpFile));
136 ) {
137
138 boolean readingText = false;
139 String prevLine = null;
140
141 while((line = reader.readLine()) != null) { // readLine removes newline separator
142 line = line.trim();
143 // iff outside of a page's body text, then an empty line marks the end of a page
144 // in nutch's text dump of a site.
145 // But note, there can be an empty line (or more?) between the start and end
146 // markers of a page's text, though.
147
148 if(isStartOfNewWebPageRecord(prevLine, line)) {
149
150 if(pageDump != null) { // should also be the case then: if(prevLine != null)
151 // finish old pageDump and begin new one
152
153 //debugPageDump(pageDump);
154
155 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
156 // parses the fields and body text of a webpage in nutch's txt dump of entire site
157 //page.parseFields();
158 //page.getText();
159 pages.add(page);
160 inspectPageURLPath(page);
161 pageDump = null;
162
163 }
164
165 // begin new webpage dump
166 pageDump = new StringBuilder();
167 pageDump.append(line);
168 pageDump.append("\n");
169
170 }
171 else if(!line.equals("")) {
172 pageDump.append(line);
173 pageDump.append("\n");
174
175 }
176 // can throw away any newlines between text start and end markers.
177
178 prevLine = line;
179 }
180
181 // process final webpage record:
182 //debugPageDump(pageDump);
183
184 if(pageDump == null) {
185 logger.warn("siteID " + siteID + " had an empty dump.txt file. Reinspect site.");
186 } else {
187 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
188 pages.add(page);
189 inspectPageURLPath(page);
190 pageDump = null;
191 }
192
193
194 } catch (IOException ioe) {
195 logger.error("@@@@@@@@@ Error reading in nutch txtdump file " + txtDumpFile, ioe);
196 }
197
198 // Just do this once: get and store domain of site.
199 // Passing true to get domain with protocol prefix
200 if(pages.size() > 0) {
201 TextDumpPage firstPage = pages.get(0);
202 String url = firstPage.getPageURL();
203 this.domainOfSite = Utility.getDomainForURL(url, true);
204 }
205 else {
206 this.domainOfSite = "UNKNOWN";
207 }
208
209 /* No need to loop again through all pages. Instead, just inspectPageURLPath() as each page is created above.
210 // For any site, we just need to work out if any of its pages contains /mi(/) or http(s)://mi.* in its URL path
211 for(TextDumpPage aPage : pages) {
212 inspectPageURLPath(aPage);
213 }
214 */
215 prepareSiteStats(mongodbAccess);
216 }
217
218 /** for every site, we just need to work out if any of its pages contains /mi(/) or http(s)://mi. in its URL.
219 * This method is called on each page of a site as the page is created. */
220 private void inspectPageURLPath(TextDumpPage page) {
221 String url = page.getPageURL();
222 //logger.debug("@@@@ pageURL: " + url);
223
224 if(!this.urlContainsLangCodeInPath) { // if not already set to true for any previous page in this site,
225 // check if this page of the site contains /mi(/) or http(s)://mi in its URL path
226 if(url.contains("/mi/") || url.endsWith("/mi") || url.startsWith("https://mi.") || url.startsWith("http://mi.")) {
227 this.urlContainsLangCodeInPath = true;
228 }
229 }
230 }
231
232
233 private void prepareSiteStats(MongoDBAccess mongodbAccess) throws IOException {
234
235 TextDumpPage page = null;
236 for(int i = 0; i < pages.size(); i++) {
237
238 page = pages.get(i);
239
240 String text = page.getPageText();
241
242 if(text.equals("")) {
243 // don't care about empty pages
244 continue;
245 }
246 else {
247 WEBPAGE_COUNTER++; // count of cumulative total of webpages for all sites
248 countOfWebPagesWithBodyText++; // of this site alone
249
250 boolean isMRI = maoriTxtDetector.isTextInMaori(text);
251 if(isMRI) {
252 numPagesInMRI++;
253 }
254
255 String[] sentences = maoriTxtDetector.getAllSentences(text);
256 int totalSentences = sentences.length;
257 int numSentencesInMRI = 0;
258 ArrayList<SentenceInfo> singleSentences = maoriTxtDetector.getAllSentencesInfo(sentences, NUM_TOP_LANGUAGES);
259 ArrayList<SentenceInfo> overlappingSentences = maoriTxtDetector.getAllOverlappingSentencesInfo(sentences, NUM_TOP_LANGUAGES);
260
261 WebpageInfo webpage = page.convertStoredDataToWebpageInfo(WEBPAGE_COUNTER/*new ObjectId()*/,
262 this.siteID/*SITE_COUNTER*/,
263 isMRI,
264 totalSentences,
265 singleSentences,
266 overlappingSentences);
267
268
269 for(SentenceInfo si : singleSentences) {
270 //LanguageInfo bestLanguage = si.languagesInfo[0];
271 //if(bestLanguage.langCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
272 if(si.bestLangCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
273 numSentencesInMRI++;
274 }
275 }
276
277
278 webpage.setMRISentenceCount(numSentencesInMRI);
279 webpage.setContainsMRI((numSentencesInMRI > 0));
280 if(numSentencesInMRI > 0) { // if(numSentencesInMRI >= 5) {
281 // Not sure if we can trust that a single sentence detected as Maori on a page is really Maori
282 // But if at least 5 sentences are detected as Maori, it is more likely to be the case to be MRI?
283 numPagesContainingMRI++;
284 }
285
286 //mongodbAccess.insertWebpageInfo(webpage);
287 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
288 mongodbAccess.datastore.save(webpage);
289 }
290 }
291 }
292
293
294 public void websiteDataToDB() {
295
296
297 // https://stackoverflow.com/questions/35183146/how-can-i-create-a-java-8-localdate-from-a-long-epoch-time-in-milliseconds
298 // LocalDateTime date =
299 // LocalDateTime.ofInstant(Instant.ofEpochMilli(this.siteCrawledTimestamp), ZoneId.systemDefault());
300 // String crawlTimestamp =
301 // date.format(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + " " + date.format(DateTimeFormatter.ofPattern("HH:mm:ss"));
302
303 boolean redoCrawl = false;
304
305 if(this.siteCrawlUnfinished) {
306 // arbitrary decision, but need some indication that the MRI content was not close to one-off in the website
307 if(this.numPagesInMRI > 2) {
308 redoCrawl = true;
309 }
310 }
311
312 File geoLiteCityDatFile = new File(this.getClass().getClassLoader().getResource("GeoLiteCity.dat").getFile());
313 try {
314 if(this.domainOfSite.equals("UNKNOWN")) { // for sites that had 0 webpages downloaded, we have no domain
315 this.geoLocationCountryCode = "UNKNOWN";
316 } else {
317 this.geoLocationCountryCode = Utility.getCountryCodeOfDomain(this.domainOfSite, geoLiteCityDatFile);
318 }
319 } catch(Exception e) {
320 logger.error("*** For SiteID " + siteID + ", got exception: " + e.getMessage(), e);
321
322 //if(this.domainOfSite.endsWith(".nz")) { // nz TLDs are worth counting
323 //this.geoLocationCountryCode = "NZ";
324 //}
325
326 // Help along identification of domain's country by construing TLDs if 2 letters after last period mark
327 int periodIndex = domainOfSite.length()-3;
328 // .com|org etc extensions that have 3 chars afte period mark will remain unknown
329 // 2 letter extensions will be considered TLD
330 if(periodIndex >=0 && domainOfSite.charAt(periodIndex) == '.' && ((periodIndex+1) < domainOfSite.length())) {
331 // has a 2 letter TLD. Make it uppercase to match return value of Utility.getCountryCodeOfDomain() above
332 String TLD = domainOfSite.substring(periodIndex+1);
333 this.geoLocationCountryCode = TLD.toUpperCase();
334 } else {
335 this.geoLocationCountryCode = "UNKNOWN"; // couldn't get the country code, so should also be UNKNOWN not null
336 }
337 }
338
339 int totalPages = pages.size();
340
341 WebsiteInfo website = new WebsiteInfo(/*SITE_COUNTER,*/ this.siteID, this.domainOfSite,
342 totalPages, this.countOfWebPagesWithBodyText,
343 this.numPagesInMRI, this.numPagesContainingMRI,
344 this.siteCrawledTimestamp, this.siteCrawlUnfinished, redoCrawl,
345 this.geoLocationCountryCode, this.urlContainsLangCodeInPath);
346
347 //mongodbAccess.insertWebsiteInfo(website);
348 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
349 mongodbAccess.datastore.save(website);
350 }
351
352
353 // --------------- STATIC METHODS AND INNER CLASSED USED BY MAIN -------------- //
354
355 public static void printUsage() {
356 System.err.println("Run this program as:");
357 System.err.println("\tNutchTextDumpToMongoDB <path to 'crawled' folder>");
358 }
359
360 public static void main(String[] args) {
361 if(args.length != 1) {
362 printUsage();
363 return;
364 }
365
366 File sitesDir = new File(args[0]);
367 if(!sitesDir.exists() || !sitesDir.isDirectory()) {
368 logger.error("Error: " + args[0] + " does not exist or is not a directory");
369 return;
370 }
371
372 NutchTextDumpToMongoDB.DEBUG_MODE = false;
373
374
375 try (
376 MongoDBAccess mongodb = new MongoDBAccess();
377 ) {
378
379 mongodb.connectToDB();
380 //mongodb.showCollections();
381
382 // print out the column headers for the websites csv file
383 // https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVPrinter.html
384 // OPTIONAL TODO: creating collections can be done here if dropping and recreating
385
386 MaoriTextDetector mriTxtDetector = new MaoriTextDetector(true); // true: run silent
387 File[] sites = sitesDir.listFiles();
388
389 // sort site folders in alphabetical order
390 // https://stackoverflow.com/questions/7199911/how-to-file-listfiles-in-alphabetical-order
391 Arrays.sort(sites);
392
393 for(File siteDir : sites) { // e.g. 00001
394 if(siteDir.isDirectory()) {
395 // look for dump.txt
396 File txtDumpFile = new File(siteDir, "dump.txt");
397 if(!txtDumpFile.exists()) {
398 logger.error("Text dump file " + txtDumpFile + " did not exist");
399 continue;
400 }
401
402 else {
403 File UNFINISHED_FILE = new File(siteDir, "UNFINISHED");
404
405 String siteID = siteDir.getName();
406 if(siteID.contains("_")) {
407 logger.warn("*** Skipping site " + siteID + " as its dir name indicates it wasn't crawled properly.");
408 continue;
409 }
410
411 long lastModified = siteDir.lastModified();
412 logger.debug("@@@ Processing siteID: " + siteID);
413 NutchTextDumpToMongoDB nutchTxtDump = new NutchTextDumpToMongoDB(
414 mongodb, mriTxtDetector,
415 siteID, txtDumpFile, lastModified, UNFINISHED_FILE.exists());
416 // now it's parsed all the web pages in the site's text dump
417
418 // Let's print stats on each web page's detected language being MRI or not
419 // and how many pages there were in the site in total.
420
421 //nutchTxtDump.printSiteStats();
422
423 nutchTxtDump.websiteDataToDB();
424 }
425 }
426
427 }
428
429 } catch(Exception e) {
430 // can get an exception when instantiating NutchTextDumpToMongoDB instance
431 // or with CSV file
432 logger.error(e.getMessage(), e);
433 }
434 }
435}
Note: See TracBrowser for help on using the repository browser.