source: other-projects/maori-lang-detection/src/org/greenstone/atea/NutchTextDumpToMongoDB.java@ 33810

Last change on this file since 33810 was 33810, checked in by ak19, 4 years ago

Bugfix: mi in url path should be checked for for each page of site, not just last page. Got closing bracket in loop in the wrong place.

File size: 16.5 KB
Line 
1package org.greenstone.atea;
2
3import java.io.*;
4import java.lang.ArrayIndexOutOfBoundsException;
5import java.time.LocalDateTime;
6import java.util.ArrayList;
7import java.util.Arrays;
8
9import org.apache.commons.csv.*;
10import org.apache.log4j.Logger;
11
12//import org.bson.types.ObjectId;
13
14import org.greenstone.atea.morphia.*;
15
16
17/**
18 * Class to process the dump text files produced FOR EACH SITE (e.g. site "00001") that
19 * Nutch has finished crawling and whose text has been dumped out to a file called dump.txt.
20 * This reads in the dump.txt file contained in each site folder within the input folder.
21 * (e.g. input folder "crawled" could contain folders 00001 to 01465. Each contains a dump.txt)
22 * Each dump.txt could contain the text contents for an entire site, or for individual pages.
23 * This class then uses class TextDumpPage to parse each webpage within a dump.txt,
24 * which parses out the actual text body content of each webpage's section within a dump.txt.
25 * Finally, MaoriTextDetector is run over that to determine whether the full body text is
26 * likely to be in Maori or not.
27 *
28 * Potential issues: since a web page's text is dumped out by nutch with neither paragraph
29 * nor even newline separator, it's hard to be sure that the entire page is in language.
30 * If it's in multiple languages, there's no way to be sure there aren't promising Maori language
31 * paragraphs contained in a page, if the majority/the remainder happen to be in English.
32 *
33 * So if we're looking for any paragraphs in Maori to store in a DB, perhaps it's better to run
34 * the MaoriTextDetector.isTextInMaori(BufferedReader reader) over two "lines" at a time,
35 * instead of running it over the entire html body's text.
36 *
37 * TO COMPILE OR RUN, FIRST DO:
38 * cd maori-lang-detection/apache-opennlp-1.9.1
39 * export OPENNLP_HOME=`pwd`
40 * cd maori-lang-detection/src
41 *
42 * TO COMPILE:
43 * maori-lang-detection/src$
44 * javac -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB.java
45 *
46 * TO RUN:
47 * maori-lang-detection/src$
48 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small
49 *
50 * or:
51 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small > ../crawled-small/bla.txt 2>&1
52 *
53*/
54public class NutchTextDumpToMongoDB {
55 static Logger logger = Logger.getLogger(org.greenstone.atea.NutchTextDumpToMongoDB.class.getName());
56
57 static boolean DEBUG_MODE = true; // this is set to false in main() at the end of this class
58
59 /** Counter for number of sites.
60 * Should be equal to number of times NutchTextDumpToMongoDB constructor
61 * is called: once per site.
62 */
63 static private int SITE_COUNTER = 0;
64 static private long WEBPAGE_COUNTER = 0;
65
66 private final MaoriTextDetector maoriTxtDetector;
67 private final MongoDBAccess mongodbAccess;
68
69 public final String siteID;
70 public final boolean siteCrawlUnfinished;
71 public final long siteCrawledTimestamp; /** When the crawl of the site terminated */
72
73 private int countOfWebPagesWithBodyText = 0;
74
75 private String geoLocationCountryCode = null; /** 2 letter country code */
76 private boolean urlContainsLangCodeInPathSuffix = false; /** If any URL on this site contains a /mi(/) in its URL */
77 private boolean urlContainsLangCodeInPathPrefix = false; /** If any URL on this site contains a http(s)://mi.* in its URL */
78
79 private String domainOfSite;
80 private int numPagesInMRI = 0;
81 private int numPagesContainingMRI = 0;
82
83 /** keep a list to store the text of each page */
84 private ArrayList<TextDumpPage> pages;
85
86
87
88 /** Number of language and confidence results to return for storing in MongoDB
89 * MongoDB runs out of space if storing too many, as we store this info per sentence
90 * and a long text document becomes a very large MongoDB document presumably */
91 private static final int NUM_TOP_LANGUAGES = 3; // 103 max, in current version of opennlp lang model
92
93
94 private boolean isStartOfNewWebPageRecord(String prevLine, String line) {
95 // The start of a new web page's record in nutch's text dump of an entire site
96 // is denoted by a newline followed by a URL (protocol)
97 // or the very start of the file with a URL (protocol)
98 return ((prevLine == null || prevLine.equals(""))
99 && (line.startsWith("http://") || line.startsWith("https://")));
100 }
101
102 public void debugPageDump(StringBuilder pageDump) {
103 if(DEBUG_MODE) {
104 // START DEBUG
105 logger.debug("__________________________________________");
106 logger.debug("@@@ Found page entry: ");
107 logger.debug("__________________________________________");
108 logger.debug(pageDump.toString());
109 logger.debug("------------------------------------------");
110 // END DEBUG
111 }
112 }
113
114 /** A NutchTextDumpToMongoDB processes the dump.txt for one site */
115 public NutchTextDumpToMongoDB(MongoDBAccess mongodbAccess,
116 MaoriTextDetector maoriTxtDetector, String siteID,
117 File txtDumpFile, long lastModified, boolean siteCrawlUnfinished)
118 throws IOException
119 {
120 // increment static counter of sites processed by a NutchTextDumpToMongoDB instance
121 SITE_COUNTER++;
122
123 // siteID is of the form %5d (e.g. 00020) and is just the name of a site folder
124 this.siteID = siteID;
125 this.siteCrawlUnfinished = siteCrawlUnfinished;
126 this.siteCrawledTimestamp = lastModified;
127
128 this.maoriTxtDetector = maoriTxtDetector;
129 this.mongodbAccess = mongodbAccess;
130
131 pages = new ArrayList<TextDumpPage>();
132
133 String line = null;
134 StringBuilder pageDump = null;
135 try (
136 BufferedReader reader = new BufferedReader(new FileReader(txtDumpFile));
137 ) {
138
139 boolean readingText = false;
140 String prevLine = null;
141
142 while((line = reader.readLine()) != null) { // readLine removes newline separator
143 line = line.trim();
144 // iff outside of a page's body text, then an empty line marks the end of a page
145 // in nutch's text dump of a site.
146 // But note, there can be an empty line (or more?) between the start and end
147 // markers of a page's text, though.
148
149 if(isStartOfNewWebPageRecord(prevLine, line)) {
150
151 if(pageDump != null) { // should also be the case then: if(prevLine != null)
152 // finish old pageDump and begin new one
153
154 //debugPageDump(pageDump);
155
156 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
157 // parses the fields and body text of a webpage in nutch's txt dump of entire site
158 //page.parseFields();
159 //page.getText();
160 pages.add(page);
161 inspectPageURLPath(page);
162 pageDump = null;
163
164 }
165
166 // begin new webpage dump
167 pageDump = new StringBuilder();
168 pageDump.append(line);
169 pageDump.append("\n");
170
171 }
172 else if(!line.equals("")) {
173 pageDump.append(line);
174 pageDump.append("\n");
175
176 }
177 // can throw away any newlines between text start and end markers.
178
179 prevLine = line;
180 }
181
182 // process final webpage record:
183 //debugPageDump(pageDump);
184
185 if(pageDump == null) {
186 logger.warn("siteID " + siteID + " had an empty dump.txt file. Reinspect site.");
187 } else {
188 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
189 pages.add(page);
190 inspectPageURLPath(page);
191 pageDump = null;
192 }
193
194
195 } catch (IOException ioe) {
196 logger.error("@@@@@@@@@ Error reading in nutch txtdump file " + txtDumpFile, ioe);
197 }
198
199 // Just do this once: get and store domain of site.
200 // Passing true to get domain with protocol prefix
201 if(pages.size() > 0) {
202 TextDumpPage firstPage = pages.get(0);
203 String url = firstPage.getPageURL();
204 this.domainOfSite = Utility.getDomainForURL(url, true);
205 }
206 else {
207 this.domainOfSite = "UNKNOWN";
208 }
209
210 /*
211 // for every site, we just need to work out if any of its pages
212 // contains /mi(/) in its URL
213 for(TextDumpPage aPage : pages) {
214 String url = aPage.getPageURL();
215 logger.debug("@@@@ pageURL: " + url);
216 if(!this.urlContainsLangCodeInPathSuffix && (url.contains("/mi/") || url.endsWith("/mi"))) {
217 this.urlContainsLangCodeInPathSuffix = true;
218 logger.info("*********** URL CONTAINS SUFFIX");
219 }
220 // And if any contains http(s)://mi. in its URL
221 if(!this.urlContainsLangCodeInPathPrefix && (url.startsWith("https://mi.") || url.startsWith("http://mi."))) {
222 this.urlContainsLangCodeInPathPrefix = true;
223 }
224 }
225 */
226 prepareSiteStats(mongodbAccess);
227 }
228
229 /** for every site, we just need to work out if any of its pages contains /mi(/) or http(s)://mi. in its URL.
230 * This method is called on each page of a site as the page is created. */
231 private void inspectPageURLPath(TextDumpPage page) {
232 String url = page.getPageURL();
233 //logger.debug("@@@@ pageURL: " + url);
234
235 // check if each page in site contains /mi(/) in URL, and if so set a site-level variable accordingly
236 if(!this.urlContainsLangCodeInPathSuffix && (url.contains("/mi/") || url.endsWith("/mi"))) {
237 this.urlContainsLangCodeInPathSuffix = true;
238 }
239 // And if any page contains http(s)://mi. in its URL, then set site level variable for this accordingly
240 if(!this.urlContainsLangCodeInPathPrefix && (url.startsWith("https://mi.") || url.startsWith("http://mi."))) {
241 this.urlContainsLangCodeInPathPrefix = true;
242 }
243 }
244
245
246 private void prepareSiteStats(MongoDBAccess mongodbAccess) throws IOException {
247
248 TextDumpPage page = null;
249 for(int i = 0; i < pages.size(); i++) {
250
251 page = pages.get(i);
252
253 String text = page.getPageText();
254
255 if(text.equals("")) {
256 // don't care about empty pages
257 continue;
258 }
259 else {
260 WEBPAGE_COUNTER++; // count of cumulative total of webpages for all sites
261 countOfWebPagesWithBodyText++; // of this site alone
262
263 boolean isMRI = maoriTxtDetector.isTextInMaori(text);
264 if(isMRI) {
265 numPagesInMRI++;
266 }
267
268 String[] sentences = maoriTxtDetector.getAllSentences(text);
269 int totalSentences = sentences.length;
270 int numSentencesInMRI = 0;
271 ArrayList<SentenceInfo> singleSentences = maoriTxtDetector.getAllSentencesInfo(sentences, NUM_TOP_LANGUAGES);
272 ArrayList<SentenceInfo> overlappingSentences = maoriTxtDetector.getAllOverlappingSentencesInfo(sentences, NUM_TOP_LANGUAGES);
273
274 WebpageInfo webpage = page.convertStoredDataToWebpageInfo(WEBPAGE_COUNTER/*new ObjectId()*/,
275 this.siteID/*SITE_COUNTER*/,
276 isMRI,
277 totalSentences,
278 singleSentences,
279 overlappingSentences);
280
281
282 for(SentenceInfo si : singleSentences) {
283 //LanguageInfo bestLanguage = si.languagesInfo[0];
284 //if(bestLanguage.langCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
285 if(si.bestLangCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
286 numSentencesInMRI++;
287 }
288 }
289
290
291 webpage.setMRISentenceCount(numSentencesInMRI);
292 webpage.setContainsMRI((numSentencesInMRI > 0));
293 if(numSentencesInMRI > 0) { // if(numSentencesInMRI >= 5) {
294 // Not sure if we can trust that a single sentence detected as Maori on a page is really Maori
295 // But if at least 5 sentences are detected as Maori, it is more likely to be the case to be MRI?
296 numPagesContainingMRI++;
297 }
298
299 //mongodbAccess.insertWebpageInfo(webpage);
300 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
301 mongodbAccess.datastore.save(webpage);
302 }
303 }
304 }
305
306
307 public void websiteDataToDB() {
308
309
310 // https://stackoverflow.com/questions/35183146/how-can-i-create-a-java-8-localdate-from-a-long-epoch-time-in-milliseconds
311 // LocalDateTime date =
312 // LocalDateTime.ofInstant(Instant.ofEpochMilli(this.siteCrawledTimestamp), ZoneId.systemDefault());
313 // String crawlTimestamp =
314 // date.format(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + " " + date.format(DateTimeFormatter.ofPattern("HH:mm:ss"));
315
316 boolean redoCrawl = false;
317
318 if(this.siteCrawlUnfinished) {
319 // arbitrary decision, but need some indication that the MRI content was not close to one-off in the website
320 if(this.numPagesInMRI > 2) {
321 redoCrawl = true;
322 }
323 }
324
325 File geoLiteCityDatFile = new File(this.getClass().getClassLoader().getResource("GeoLiteCity.dat").getFile());
326 try {
327 if(this.domainOfSite.equals("UNKNOWN")) { // for sites that had 0 webpages downloaded, we have no domain
328 this.geoLocationCountryCode = "UNKNOWN";
329 } else {
330 this.geoLocationCountryCode = Utility.getCountryCodeOfDomain(this.domainOfSite, geoLiteCityDatFile);
331 }
332 } catch(Exception e) {
333 logger.error("*** For SiteID " + siteID + ", got exception: " + e.getMessage(), e);
334
335 //if(this.domainOfSite.endsWith(".nz")) { // nz TLDs are worth counting
336 //this.geoLocationCountryCode = "NZ";
337 //}
338
339 // Help along identification of domain's country by construing TLDs if 2 letters after last period mark
340 int periodIndex = domainOfSite.length()-3;
341 // .com|org etc extensions that have 3 chars afte period mark will remain unknown
342 // 2 letter extensions will be considered TLD
343 if(periodIndex >=0 && domainOfSite.charAt(periodIndex) == '.' && ((periodIndex+1) < domainOfSite.length())) {
344 // has a 2 letter TLD. Make it uppercase to match return value of Utility.getCountryCodeOfDomain() above
345 String TLD = domainOfSite.substring(periodIndex+1);
346 this.geoLocationCountryCode = TLD.toUpperCase();
347 } else {
348 this.geoLocationCountryCode = "UNKNOWN"; // couldn't get the country code, so should also be UNKNOWN not null
349 }
350 }
351
352 int totalPages = pages.size();
353
354 WebsiteInfo website = new WebsiteInfo(/*SITE_COUNTER,*/ this.siteID, this.domainOfSite,
355 totalPages, this.countOfWebPagesWithBodyText,
356 this.numPagesInMRI, this.numPagesContainingMRI,
357 this.siteCrawledTimestamp, this.siteCrawlUnfinished, redoCrawl,
358 this.geoLocationCountryCode, this.urlContainsLangCodeInPathSuffix, this.urlContainsLangCodeInPathPrefix);
359
360 //mongodbAccess.insertWebsiteInfo(website);
361 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
362 mongodbAccess.datastore.save(website);
363 }
364
365
366 // --------------- STATIC METHODS AND INNER CLASSED USED BY MAIN -------------- //
367
368 public static void printUsage() {
369 System.err.println("Run this program as:");
370 System.err.println("\tNutchTextDumpToMongoDB <path to 'crawled' folder>");
371 }
372
373 public static void main(String[] args) {
374 if(args.length != 1) {
375 printUsage();
376 return;
377 }
378
379 File sitesDir = new File(args[0]);
380 if(!sitesDir.exists() || !sitesDir.isDirectory()) {
381 logger.error("Error: " + args[0] + " does not exist or is not a directory");
382 return;
383 }
384
385 NutchTextDumpToMongoDB.DEBUG_MODE = false;
386
387
388 try (
389 MongoDBAccess mongodb = new MongoDBAccess();
390 ) {
391
392 mongodb.connectToDB();
393 //mongodb.showCollections();
394
395 // print out the column headers for the websites csv file
396 // https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVPrinter.html
397 // OPTIONAL TODO: creating collections can be done here if dropping and recreating
398
399 MaoriTextDetector mriTxtDetector = new MaoriTextDetector(true); // true: run silent
400 File[] sites = sitesDir.listFiles();
401
402 // sort site folders in alphabetical order
403 // https://stackoverflow.com/questions/7199911/how-to-file-listfiles-in-alphabetical-order
404 Arrays.sort(sites);
405
406 for(File siteDir : sites) { // e.g. 00001
407 if(siteDir.isDirectory()) {
408 // look for dump.txt
409 File txtDumpFile = new File(siteDir, "dump.txt");
410 if(!txtDumpFile.exists()) {
411 logger.error("Text dump file " + txtDumpFile + " did not exist");
412 continue;
413 }
414
415 else {
416 File UNFINISHED_FILE = new File(siteDir, "UNFINISHED");
417
418 String siteID = siteDir.getName();
419 if(siteID.contains("_")) {
420 logger.warn("*** Skipping site " + siteID + " as its dir name indicates it wasn't crawled properly.");
421 continue;
422 }
423
424 long lastModified = siteDir.lastModified();
425 logger.debug("@@@ Processing siteID: " + siteID);
426 NutchTextDumpToMongoDB nutchTxtDump = new NutchTextDumpToMongoDB(
427 mongodb, mriTxtDetector,
428 siteID, txtDumpFile, lastModified, UNFINISHED_FILE.exists());
429 // now it's parsed all the web pages in the site's text dump
430
431 // Let's print stats on each web page's detected language being MRI or not
432 // and how many pages there were in the site in total.
433
434 //nutchTxtDump.printSiteStats();
435
436 nutchTxtDump.websiteDataToDB();
437 }
438 }
439
440 }
441
442 } catch(Exception e) {
443 // can get an exception when instantiating NutchTextDumpToMongoDB instance
444 // or with CSV file
445 logger.error(e.getMessage(), e);
446 }
447 }
448}
Note: See TracBrowser for help on using the repository browser.