source: other-projects/maori-lang-detection/src/org/greenstone/atea/NutchTextDumpToMongoDB.java

Last change on this file was 34005, checked in by ak19, 4 years ago

InfoOnEmptyPagesNotInMongoDB.txt is now written out to a file, instead of redirecting all system.err into a file. Also it's now a csv file with additional information besides the URL, now including (fetch) status, protocolStatus and parseStatus.

File size: 17.4 KB
Line 
1package org.greenstone.atea;
2
3import java.io.*;
4import java.lang.ArrayIndexOutOfBoundsException;
5import java.time.LocalDateTime;
6import java.util.ArrayList;
7import java.util.Arrays;
8
9import org.apache.commons.csv.*;
10import org.apache.log4j.Logger;
11
12//import org.bson.types.ObjectId;
13
14import org.greenstone.atea.morphia.*;
15
16
17/**
18 * Class to process the dump text files produced FOR EACH SITE (e.g. site "00001") that
19 * Nutch has finished crawling and whose text has been dumped out to a file called dump.txt.
20 * This reads in the dump.txt file contained in each site folder within the input folder.
21 * (e.g. input folder "crawled" could contain folders 00001 to 01465. Each contains a dump.txt)
22 * Each dump.txt could contain the text contents for an entire site, or for individual pages.
23 * This class then uses class TextDumpPage to parse each webpage within a dump.txt,
24 * which parses out the actual text body content of each webpage's section within a dump.txt.
25 * Finally, MaoriTextDetector is run over that to determine whether the full body text is
26 * likely to be in Maori or not.
27 *
28 * Potential issues: since a web page's text is dumped out by nutch with neither paragraph
29 * nor even newline separator, it's hard to be sure that the entire page is in language.
30 * If it's in multiple languages, there's no way to be sure there aren't promising Maori language
31 * paragraphs contained in a page, if the majority/the remainder happen to be in English.
32 *
33 * So if we're looking for any paragraphs in Maori to store in a DB, perhaps it's better to run
34 * the MaoriTextDetector.isTextInMaori(BufferedReader reader) over two "lines" at a time,
35 * instead of running it over the entire html body's text.
36 *
37 * TO COMPILE OR RUN, FIRST DO:
38 * cd maori-lang-detection/apache-opennlp-1.9.1
39 * export OPENNLP_HOME=`pwd`
40 * cd maori-lang-detection/src
41 *
42 * MORE IMPORTANT PRELIMINARIES:
43 * - Make sure the MongoDB is up and running and accessible.
44 * - If you want to keep any existing MongoDB collections called Websites and Webpages, then
45 * first renamed those collections in MongoDB (using Robo3T makes renaming easy) before
46 * running this program.
47 *
48 * TO COMPILE:
49 * maori-lang-detection/src$
50 * javac -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB.java
51 *
52 * TO RUN:
53 * maori-lang-detection/src$
54 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small
55 *
56 * or:
57 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small > ../crawled-small/bla.txt 2>&1
58 *
59*/
60public class NutchTextDumpToMongoDB {
61 static Logger logger = Logger.getLogger(org.greenstone.atea.NutchTextDumpToMongoDB.class.getName());
62
63 static boolean DEBUG_MODE = true; // this is set to false in main() at the end of this class
64
65 /** Counter for number of sites.
66 * Should be equal to number of times NutchTextDumpToMongoDB constructor
67 * is called: once per site.
68 */
69 static private int SITE_COUNTER = 0;
70 static private long WEBPAGE_COUNTER = 0;
71
72 private final MaoriTextDetector maoriTxtDetector;
73 private final MongoDBAccess mongodbAccess;
74
75 public final String siteID;
76 public final boolean siteCrawlUnfinished;
77 public final long siteCrawledTimestamp; /** When the crawl of the site terminated */
78
79 // private handle to a csv writer
80 private CSVPrinter emptyWebPageInfoCSVPrinter;
81
82 private int countOfWebPagesWithBodyText = 0;
83
84 private String geoLocationCountryCode = null; /** 2 letter country code */
85 private boolean urlContainsLangCodeInPath = false; /** If any URL on this site contains a /mi(/) or http(s)://mi.* in its URL path */
86
87 private String domainOfSite;
88 //private String baseSiteDomain; // domainOfSite stripped of any http(s)://www.
89 private int numPagesInMRI = 0;
90 private int numPagesContainingMRI = 0;
91
92 /** keep a list to store the text of each page */
93 private ArrayList<TextDumpPage> pages;
94
95
96
97 /** Number of language and confidence results to return for storing in MongoDB
98 * MongoDB runs out of space if storing too many, as we store this info per sentence
99 * and a long text document becomes a very large MongoDB document presumably */
100 private static final int NUM_TOP_LANGUAGES = 3; // 103 max, in current version of opennlp lang model
101
102
103 private boolean isStartOfNewWebPageRecord(String prevLine, String line) {
104 // The start of a new web page's record in nutch's text dump of an entire site
105 // is denoted by a newline followed by a URL (protocol)
106 // or the very start of the file with a URL (protocol)
107 return ((prevLine == null || prevLine.equals(""))
108 && (line.startsWith("http://") || line.startsWith("https://")));
109 }
110
111 public void debugPageDump(StringBuilder pageDump) {
112 if(DEBUG_MODE) {
113 // START DEBUG
114 logger.debug("__________________________________________");
115 logger.debug("@@@ Found page entry: ");
116 logger.debug("__________________________________________");
117 logger.debug(pageDump.toString());
118 logger.debug("------------------------------------------");
119 // END DEBUG
120 }
121 }
122
123 /** A NutchTextDumpToMongoDB processes the dump.txt for one site */
124 public NutchTextDumpToMongoDB(MongoDBAccess mongodbAccess, CSVPrinter emptyWebPageInfoCSVPrinter,
125 MaoriTextDetector maoriTxtDetector, String siteID,
126 File txtDumpFile, long lastModified, boolean siteCrawlUnfinished)
127 throws IOException
128 {
129 // increment static counter of sites processed by a NutchTextDumpToMongoDB instance
130 SITE_COUNTER++;
131
132 // keep a handle to the csv file writer
133 this.emptyWebPageInfoCSVPrinter = emptyWebPageInfoCSVPrinter;
134
135 // siteID is of the form %5d (e.g. 00020) and is just the name of a site folder
136 this.siteID = siteID;
137 this.siteCrawlUnfinished = siteCrawlUnfinished;
138 this.siteCrawledTimestamp = lastModified;
139
140 this.maoriTxtDetector = maoriTxtDetector;
141 this.mongodbAccess = mongodbAccess;
142
143 pages = new ArrayList<TextDumpPage>();
144
145 String line = null;
146 StringBuilder pageDump = null;
147 try (
148 BufferedReader reader = new BufferedReader(new FileReader(txtDumpFile));
149 ) {
150
151 boolean readingText = false;
152 String prevLine = null;
153
154 while((line = reader.readLine()) != null) { // readLine removes newline separator
155 line = line.trim();
156 // iff outside of a page's body text, then an empty line marks the end of a page
157 // in nutch's text dump of a site.
158 // But note, there can be an empty line (or more?) between the start and end
159 // markers of a page's text, though.
160
161 if(isStartOfNewWebPageRecord(prevLine, line)) {
162
163 if(pageDump != null) { // should also be the case then: if(prevLine != null)
164 // finish old pageDump and begin new one
165
166 //debugPageDump(pageDump);
167
168 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
169 // parses the fields and body text of a webpage in nutch's txt dump of entire site
170 //page.parseFields();
171 //page.getText();
172 pages.add(page);
173 inspectPageURLPath(page);
174 pageDump = null;
175
176 }
177
178 // begin new webpage dump
179 pageDump = new StringBuilder();
180 pageDump.append(line);
181 pageDump.append("\n");
182
183 }
184 else if(!line.equals("")) {
185 pageDump.append(line);
186 pageDump.append("\n");
187
188 }
189 // can throw away any newlines between text start and end markers.
190
191 prevLine = line;
192 }
193
194 // process final webpage record:
195 //debugPageDump(pageDump);
196
197 if(pageDump == null) {
198 logger.warn("siteID " + siteID + " had an empty dump.txt file. Reinspect site.");
199 } else {
200 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
201 pages.add(page);
202 inspectPageURLPath(page);
203 pageDump = null;
204 }
205
206
207 } catch (IOException ioe) {
208 logger.error("@@@@@@@@@ Error reading in nutch txtdump file " + txtDumpFile, ioe);
209 }
210
211 // Just do this once: get and store domain of site.
212 // Passing true to get domain with protocol prefix
213 if(pages.size() > 0) {
214 TextDumpPage firstPage = pages.get(0);
215 String url = firstPage.getPageURL();
216 this.domainOfSite = Utility.getDomainForURL(url, true);
217 //this.baseSiteDomain = Utility.stripProtocolAndWWWFromURL(this.domainOfSite);
218 }
219 else {
220 this.domainOfSite = "UNKNOWN";
221 //this.baseSiteDomain = "UNKNOWN";
222 }
223
224 /* No need to loop again through all pages. Instead, just inspectPageURLPath() as each page is created above.
225 // For any site, we just need to work out if any of its pages contains /mi(/) or http(s)://mi.* in its URL path
226 for(TextDumpPage aPage : pages) {
227 inspectPageURLPath(aPage);
228 }
229 */
230 webPageDataToMongoDB(mongodbAccess);
231 }
232
233
234 /** for every site, we just need to work out if any of its pages contains /mi(/) or http(s)://mi. in its URL.
235 * This method is called on each page of a site as the page is created. */
236 private void inspectPageURLPath(TextDumpPage page) {
237 String url = page.getPageURL();
238 //logger.debug("@@@@ pageURL: " + url);
239
240 if(!this.urlContainsLangCodeInPath) { // if not already set to true for any previous page in this site,
241 // check if this page of the site contains /mi(/) or http(s)://mi in its URL path
242 if(url.contains("/mi/") || url.endsWith("/mi") || url.startsWith("https://mi.") || url.startsWith("http://mi.")) {
243 this.urlContainsLangCodeInPath = true;
244 }
245 }
246 }
247
248
249 private void webPageDataToMongoDB(MongoDBAccess mongodbAccess) throws IOException {
250
251 TextDumpPage page = null;
252 for(int i = 0; i < pages.size(); i++) {
253
254 page = pages.get(i);
255
256 String text = page.getPageText();
257
258 if(text.equals("")) {
259 System.err.println(siteID + ",Empty page " + i + "," + page.getPageURL()
260 + "," + page.get("status")
261 + "," + page.get("protocolStatus")
262 + "," + page.get("parseStatus"));
263 // write information about any empty web page into the emptyPage csv file
264 emptyWebPageInfoCSVPrinter.printRecord(siteID, i, page.getPageURL(),
265 page.get("status"), page.get("protocolStatus"),page.get("parseStatus"));
266
267 // don't care about empty pages
268 continue;
269 }
270 else {
271 WEBPAGE_COUNTER++; // count of cumulative total of webpages for all sites
272 countOfWebPagesWithBodyText++; // of this site alone
273
274 boolean isMRI = maoriTxtDetector.isTextInMaori(text);
275 if(isMRI) {
276 numPagesInMRI++;
277 }
278
279 String[] sentences = maoriTxtDetector.getAllSentences(text);
280 int totalSentences = sentences.length;
281 int numSentencesInMRI = 0;
282 ArrayList<SentenceInfo> singleSentences = maoriTxtDetector.getAllSentencesInfo(sentences, NUM_TOP_LANGUAGES);
283 ArrayList<SentenceInfo> overlappingSentences = maoriTxtDetector.getAllOverlappingSentencesInfo(sentences, NUM_TOP_LANGUAGES);
284
285 WebpageInfo webpage = page.convertStoredDataToWebpageInfo(WEBPAGE_COUNTER/*new ObjectId()*/,
286 this.siteID/*SITE_COUNTER*/,
287 isMRI,
288 totalSentences,
289 singleSentences,
290 overlappingSentences);
291
292
293 for(SentenceInfo si : singleSentences) {
294 //LanguageInfo bestLanguage = si.languagesInfo[0];
295 //if(bestLanguage.langCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
296 if(si.bestLangCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
297 numSentencesInMRI++;
298 }
299 }
300
301
302 webpage.setMRISentenceCount(numSentencesInMRI);
303 webpage.setContainsMRI((numSentencesInMRI > 0));
304 if(numSentencesInMRI > 0) { // if(numSentencesInMRI >= 5) {
305 // Not sure if we can trust that a single sentence detected as Maori on a page is really Maori
306 // But if at least 5 sentences are detected as Maori, it is more likely to be the case to be MRI?
307 numPagesContainingMRI++;
308 }
309
310 //mongodbAccess.insertWebpageInfo(webpage);
311 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
312 mongodbAccess.datastore.save(webpage);
313 }
314 }
315 }
316
317
318 public void websiteDataToDB() {
319
320
321 // https://stackoverflow.com/questions/35183146/how-can-i-create-a-java-8-localdate-from-a-long-epoch-time-in-milliseconds
322 // LocalDateTime date =
323 // LocalDateTime.ofInstant(Instant.ofEpochMilli(this.siteCrawledTimestamp), ZoneId.systemDefault());
324 // String crawlTimestamp =
325 // date.format(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + " " + date.format(DateTimeFormatter.ofPattern("HH:mm:ss"));
326
327 boolean redoCrawl = false;
328
329 if(this.siteCrawlUnfinished) {
330 // arbitrary decision, but need some indication that the MRI content was not close to one-off in the website
331 if(this.numPagesInMRI > 2) {
332 redoCrawl = true;
333 }
334 }
335
336 File geoLiteCityDatFile = new File(this.getClass().getClassLoader().getResource("GeoLiteCity.dat").getFile());
337 try {
338 if(this.domainOfSite.equals("UNKNOWN")) { // for sites that had 0 webpages downloaded, we have no domain
339 this.geoLocationCountryCode = "UNKNOWN";
340 } else {
341 this.geoLocationCountryCode = Utility.getCountryCodeOfDomain(this.domainOfSite, geoLiteCityDatFile);
342 }
343 } catch(Exception e) {
344 logger.error("*** For SiteID " + siteID + ", got exception: " + e.getMessage(), e);
345
346 //if(this.domainOfSite.endsWith(".nz")) { // nz TLDs are worth counting
347 //this.geoLocationCountryCode = "NZ";
348 //}
349
350 // Help along identification of domain's country by construing TLDs if 2 letters after last period mark
351 int periodIndex = domainOfSite.length()-3;
352 // .com|org etc extensions that have 3 chars afte period mark will remain unknown
353 // 2 letter extensions will be considered TLD
354 if(periodIndex >=0 && domainOfSite.charAt(periodIndex) == '.' && ((periodIndex+1) < domainOfSite.length())) {
355 // has a 2 letter TLD. Make it uppercase to match return value of Utility.getCountryCodeOfDomain() above
356 String TLD = domainOfSite.substring(periodIndex+1);
357 this.geoLocationCountryCode = TLD.toUpperCase();
358 } else {
359 this.geoLocationCountryCode = "UNKNOWN"; // couldn't get the country code, so should also be UNKNOWN not null
360 }
361 }
362
363 int totalPages = pages.size();
364
365 WebsiteInfo website = new WebsiteInfo(/*SITE_COUNTER,*/ this.siteID,
366 this.domainOfSite, //this.baseSiteDomain,
367 totalPages, this.countOfWebPagesWithBodyText,
368 this.numPagesInMRI, this.numPagesContainingMRI,
369 this.siteCrawledTimestamp, this.siteCrawlUnfinished, redoCrawl,
370 this.geoLocationCountryCode, this.urlContainsLangCodeInPath);
371
372 //mongodbAccess.insertWebsiteInfo(website);
373 // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
374 mongodbAccess.datastore.save(website);
375 }
376
377
378 // --------------- STATIC METHODS AND INNER CLASSED USED BY MAIN -------------- //
379
380 public static void printUsage() {
381 System.err.println("Run this program as:");
382 System.err.println("\tNutchTextDumpToMongoDB <path to 'crawled' folder>");
383 }
384
385 public static void main(String[] args) {
386 if(args.length != 1) {
387 printUsage();
388 return;
389 }
390
391 File sitesDir = new File(args[0]);
392 if(!sitesDir.exists() || !sitesDir.isDirectory()) {
393 logger.error("Error: " + args[0] + " does not exist or is not a directory");
394 return;
395 }
396
397 NutchTextDumpToMongoDB.DEBUG_MODE = false;
398
399 try (
400 MongoDBAccess mongodb = new MongoDBAccess();
401 CSVPrinter emptyWebPageInfoCSVPrinter = new CSVPrinter(new FileWriter("InfoOnEmptyPagesNotInMongoDB.csv"), CSVFormat.DEFAULT.withQuoteMode(QuoteMode.MINIMAL));
402 ) {
403
404 mongodb.connectToDB();
405 //mongodb.showCollections();
406
407 // write out csv column headings into the csv file on empty web pages
408 emptyWebPageInfoCSVPrinter.printRecord("siteID","pagenum","URL","(fetch)status","protocolStatus","parseStatus");
409
410 // print out the column headers for the websites csv file
411 // https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVPrinter.html
412 // OPTIONAL TODO: creating collections can be done here if dropping and recreating
413
414 MaoriTextDetector mriTxtDetector = new MaoriTextDetector(true); // true: run silent
415 File[] sites = sitesDir.listFiles();
416
417 // sort site folders in alphabetical order
418 // https://stackoverflow.com/questions/7199911/how-to-file-listfiles-in-alphabetical-order
419 Arrays.sort(sites);
420
421 for(File siteDir : sites) { // e.g. 00001
422 if(siteDir.isDirectory()) {
423 // look for dump.txt
424 File txtDumpFile = new File(siteDir, "dump.txt");
425 if(!txtDumpFile.exists()) {
426 logger.error("Text dump file " + txtDumpFile + " did not exist");
427 continue;
428 }
429
430 else {
431 File UNFINISHED_FILE = new File(siteDir, "UNFINISHED");
432
433 String siteID = siteDir.getName();
434 if(siteID.contains("_")) {
435 logger.warn("*** Skipping site " + siteID + " as its dir name indicates it wasn't crawled properly.");
436 continue;
437 }
438
439 long lastModified = siteDir.lastModified();
440 logger.debug("@@@ Processing siteID: " + siteID);
441 NutchTextDumpToMongoDB nutchTxtDump = new NutchTextDumpToMongoDB(
442 mongodb, emptyWebPageInfoCSVPrinter, mriTxtDetector,
443 siteID, txtDumpFile, lastModified, UNFINISHED_FILE.exists());
444 // now it's parsed all the web pages in the site's text dump
445
446 // Let's print stats on each web page's detected language being MRI or not
447 // and how many pages there were in the site in total.
448
449 //nutchTxtDump.printSiteStats();
450
451 nutchTxtDump.websiteDataToDB();
452 }
453 }
454
455 }
456
457 } catch(Exception e) {
458 // can get an exception when instantiating NutchTextDumpToMongoDB instance
459 // or with CSV file
460 logger.error(e.getMessage(), e);
461 }
462 }
463}
Note: See TracBrowser for help on using the repository browser.