source: other-projects/maori-lang-detection/src/org/greenstone/atea/NutchTextDumpToMongoDB.java@ 33657

Last change on this file since 33657 was 33657, checked in by ak19, 4 years ago

Some fixes after brief testing against 1/3 of the crawl. Restarted processing of crawledNode2 set of crawls.

File size: 14.2 KB
Line 
1package org.greenstone.atea;
2
3import java.io.*;
4import java.lang.ArrayIndexOutOfBoundsException;
5import java.time.LocalDateTime;
6import java.util.ArrayList;
7import java.util.Arrays;
8
9import org.apache.commons.csv.*;
10import org.apache.log4j.Logger;
11
12//import org.bson.types.ObjectId;
13
14import org.greenstone.atea.morphia.*;
15
16
17/**
18 * Class to process the dump text files produced FOR EACH SITE (e.g. site "00001") that
19 * Nutch has finished crawling and whose text has been dumped out to a file called dump.txt.
20 * This reads in the dump.txt file contained in each site folder within the input folder.
21 * (e.g. input folder "crawled" could contain folders 00001 to 01465. Each contains a dump.txt)
22 * Each dump.txt could contain the text contents for an entire site, or for individual pages.
23 * This class then uses class TextDumpPage to parse each webpage within a dump.txt,
24 * which parses out the actual text body content of each webpage's section within a dump.txt.
25 * Finally, MaoriTextDetector is run over that to determine whether the full body text is
26 * likely to be in Maori or not.
27 *
28 * Potential issues: since a web page's text is dumped out by nutch with neither paragraph
29 * nor even newline separator, it's hard to be sure that the entire page is in language.
30 * If it's in multiple languages, there's no way to be sure there aren't promising Maori language
31 * paragraphs contained in a page, if the majority/the remainder happen to be in English.
32 *
33 * So if we're looking for any paragraphs in Maori to store in a DB, perhaps it's better to run
34 * the MaoriTextDetector.isTextInMaori(BufferedReader reader) over two "lines" at a time,
35 * instead of running it over the entire html body's text.
36 *
37 * TO COMPILE OR RUN, FIRST DO:
38 * cd maori-lang-detection/apache-opennlp-1.9.1
39 * export OPENNLP_HOME=`pwd`
40 * cd maori-lang-detection/src
41 *
42 * TO COMPILE:
43 * maori-lang-detection/src$
44 * javac -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB.java
45 *
46 * TO RUN:
47 * maori-lang-detection/src$
48 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small
49 *
50 * or:
51 * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small > ../crawled-small/bla.txt 2>&1
52 *
53*/
54public class NutchTextDumpToMongoDB {
55 static Logger logger = Logger.getLogger(org.greenstone.atea.NutchTextDumpToMongoDB.class.getName());
56
57 static boolean DEBUG_MODE = true;
58
59 /** Counter for number of sites.
60 * Should be equal to number of times NutchTextDumpToMongoDB constructor
61 * is called: once per site.
62 */
63 static private int SITE_COUNTER = 0;
64 static private long WEBPAGE_COUNTER = 0;
65
66 private final MaoriTextDetector maoriTxtDetector;
67 private final MongoDBAccess mongodbAccess;
68
69 public final String siteID;
70 public final boolean siteCrawlUnfinished;
71 public final long siteCrawledTimestamp; /** When the crawl of the site terminated */
72
73 private int countOfWebPagesWithBodyText = 0;
74
75 private String geoLocationCountryCode = null; /** 2 letter country code */
76 private boolean urlContainsLangCodeInPath = false; /** If any URL on this site contains a /mi(/) in its URL */
77
78 private String domainOfSite;
79 private int numPagesInMRI = 0;
80
81 /** keep a list to store the text of each page */
82 private ArrayList<TextDumpPage> pages;
83
84 private boolean isStartOfNewWebPageRecord(String prevLine, String line) {
85 // The start of a new web page's record in nutch's text dump of an entire site
86 // is denoted by a newline followed by a URL (protocol)
87 // or the very start of the file with a URL (protocol)
88 return ((prevLine == null || prevLine.equals(""))
89 && (line.startsWith("http://") || line.startsWith("https://")));
90 }
91
92 public void debugPageDump(StringBuilder pageDump) {
93 if(DEBUG_MODE) {
94 // START DEBUG
95 logger.debug("__________________________________________");
96 logger.debug("@@@ Found page entry: ");
97 logger.debug("__________________________________________");
98 logger.debug(pageDump.toString());
99 logger.debug("------------------------------------------");
100 // END DEBUG
101 }
102 }
103
104 /** A NutchTextDumpToMongoDB processes the dump.txt for one site */
105 public NutchTextDumpToMongoDB(MongoDBAccess mongodbAccess,
106 MaoriTextDetector maoriTxtDetector, String siteID,
107 File txtDumpFile, long lastModified, boolean siteCrawlUnfinished)
108 throws IOException
109 {
110 // increment static counter of sites processed by a NutchTextDumpToMongoDB instance
111 SITE_COUNTER++;
112
113 // siteID is of the form %5d (e.g. 00020) and is just the name of a site folder
114 this.siteID = siteID;
115 this.siteCrawlUnfinished = siteCrawlUnfinished;
116 this.siteCrawledTimestamp = lastModified;
117
118 this.maoriTxtDetector = maoriTxtDetector;
119 this.mongodbAccess = mongodbAccess;
120
121 pages = new ArrayList<TextDumpPage>();
122
123 String line = null;
124 StringBuilder pageDump = null;
125 try (
126 BufferedReader reader = new BufferedReader(new FileReader(txtDumpFile));
127 ) {
128
129 boolean readingText = false;
130 String prevLine = null;
131
132 while((line = reader.readLine()) != null) { // readLine removes newline separator
133 line = line.trim();
134 // iff outside of a page's body text, then an empty line marks the end of a page
135 // in nutch's text dump of a site.
136 // But note, there can be an empty line (or more?) between the start and end
137 // markers of a page's text, though.
138
139 if(isStartOfNewWebPageRecord(prevLine, line)) {
140
141 if(pageDump != null) { // should also be the case then: if(prevLine != null)
142 // finish old pageDump and begin new one
143
144 //debugPageDump(pageDump);
145
146 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
147 // parses the fields and body text of a webpage in nutch's txt dump of entire site
148 //page.parseFields();
149 //page.getText();
150 pages.add(page);
151 pageDump = null;
152
153 }
154
155 // begin new webpage dump
156 pageDump = new StringBuilder();
157 pageDump.append(line);
158 pageDump.append("\n");
159
160 }
161 else if(!line.equals("")) {
162 pageDump.append(line);
163 pageDump.append("\n");
164
165 }
166 // can throw away any newlines between text start and end markers.
167
168 prevLine = line;
169 }
170
171 // process final webpage record:
172 //debugPageDump(pageDump);
173
174 if(pageDump == null) {
175 logger.warn("siteID " + siteID + " had an empty dump.txt file. Reinspect site.");
176 } else {
177 TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
178 pages.add(page);
179 pageDump = null;
180
181 // for every site, we just need to work out if any of its pages
182 // contains /mi(/) in its URL
183 String url = page.getPageURL();
184 if(!this.urlContainsLangCodeInPath && (url.contains("/mi/") || url.endsWith("/mi"))) {
185 this.urlContainsLangCodeInPath = true;
186 }
187 }
188
189 } catch (IOException ioe) {
190 logger.error("@@@@@@@@@ Error reading in nutch txtdump file " + txtDumpFile, ioe);
191 }
192
193 // Just do this once: get and store domain of site.
194 // Passing true to get domain with protocol prefix
195 if(pages.size() > 0) {
196 TextDumpPage firstPage = pages.get(0);
197 String url = firstPage.getPageURL();
198 this.domainOfSite = Utility.getDomainForURL(url, true);
199 }
200 else {
201 this.domainOfSite = "UNKNOWN";
202 }
203
204
205 prepareSiteStats(mongodbAccess);
206 }
207
208
209 private void prepareSiteStats(MongoDBAccess mongodbAccess) throws IOException {
210
211 TextDumpPage page = null;
212 for(int i = 0; i < pages.size(); i++) {
213
214 page = pages.get(i);
215
216 String text = page.getPageText();
217
218 if(text.equals("")) {
219 // don't care about empty pages
220 continue;
221 }
222 else {
223 WEBPAGE_COUNTER++; // count of cumulative total of webpages for all sites
224 countOfWebPagesWithBodyText++; // of this site alone
225
226 boolean isMRI = maoriTxtDetector.isTextInMaori(text);
227 if(isMRI) {
228 numPagesInMRI++;
229 }
230
231 String[] sentences = maoriTxtDetector.getAllSentences(text);
232 int totalSentences = sentences.length;
233 int numSentencesInMRI = 0;
234 ArrayList<SentenceInfo> singleSentences = maoriTxtDetector.getAllSentencesInfo(sentences);
235 ArrayList<SentenceInfo> overlappingSentences = maoriTxtDetector.getAllOverlappingSentencesInfo(sentences);
236
237 WebpageInfo webpage = page.convertStoredDataToWebpageInfo(WEBPAGE_COUNTER/*new ObjectId()*/,
238 this.siteID/*SITE_COUNTER*/,
239 isMRI,
240 totalSentences,
241 singleSentences,
242 overlappingSentences);
243
244 for(SentenceInfo si : singleSentences) {
245 if(si.langCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
246 numSentencesInMRI++;
247 }
248 }
249 webpage.setMRISentenceCount(numSentencesInMRI);
250 webpage.setContainsMRI((numSentencesInMRI > 0));
251
252 //mongodbAccess.insertWebpageInfo(webpage);
253 mongodbAccess.datastore.save(webpage);
254 }
255 }
256 }
257
258 /*
259 public void printSiteStats() {
260
261
262 logger.info("------------- " + this.siteID + " SITE STATS -----------");
263
264 logger.info("SITE DOMAIN: " + this.domainOfSite);
265 logger.info("Total number of web pages in site: " + pages.size());
266 logger.info("Of these, the number of pages in Māori (mri) were: " + this.pagesInMRI.size());
267
268 if(pagesInMRI.size() > 0) {
269 logger.info("The following were the pages detected by OpenNLP as being in Māori with " + maoriTxtDetector.MINIMUM_CONFIDENCE + " confidence");
270 for(MRIWebPageStats mriWebPageInfo : pagesInMRI) {
271 logger.info(mriWebPageInfo.toString());
272 }
273 }
274
275 logger.info(" ----------- ");
276 if(pagesContainingMRI.size() > 0) {
277 logger.info("The following pages weren't detected as primarily being in Māori");
278 logger.info("But still contained sentences detected as Māori");
279 for(MRIWebPageStats mriWebPageInfo : pagesContainingMRI) {
280 logger.info(mriWebPageInfo.toString());
281 }
282
283 } else {
284 logger.info("No further pages detected as containing any sentences in MRI");
285 }
286 logger.info(" ----------- ");
287 }
288 */
289
290
291
292 public void websiteDataToDB() {
293
294
295 // https://stackoverflow.com/questions/35183146/how-can-i-create-a-java-8-localdate-from-a-long-epoch-time-in-milliseconds
296 // LocalDateTime date =
297 // LocalDateTime.ofInstant(Instant.ofEpochMilli(this.siteCrawledTimestamp), ZoneId.systemDefault());
298 // String crawlTimestamp =
299 // date.format(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + " " + date.format(DateTimeFormatter.ofPattern("HH:mm:ss"));
300
301 boolean redoCrawl = false;
302
303 if(this.siteCrawlUnfinished) {
304 // arbitrary decision, but need some indication that the MRI content was not close to one-off in the website
305 if(this.numPagesInMRI > 2) {
306 redoCrawl = true;
307 }
308 }
309
310 File geoLiteCityDatFile = new File(this.getClass().getClassLoader().getResource("GeoLiteCity.dat").getFile());
311 try {
312 if(this.domainOfSite.equals("UNKNOWN")) {
313 this.geoLocationCountryCode = "UNKNOWN";
314 } else {
315 this.geoLocationCountryCode = Utility.getCountryCodeOfDomain(this.domainOfSite, geoLiteCityDatFile);
316 }
317 } catch(Exception e) {
318 logger.error("*** For SiteID " + siteID + ", got exception: " + e.getMessage(), e);
319 this.geoLocationCountryCode = null;
320 }
321
322 int totalPages = pages.size();
323
324 WebsiteInfo website = new WebsiteInfo(/*SITE_COUNTER,*/ this.siteID, this.domainOfSite,
325 totalPages, this.countOfWebPagesWithBodyText, this.numPagesInMRI,
326 this.siteCrawledTimestamp, this.siteCrawlUnfinished, redoCrawl,
327 this.geoLocationCountryCode, this.urlContainsLangCodeInPath);
328
329 //mongodbAccess.insertWebsiteInfo(website);
330 mongodbAccess.datastore.save(website);
331 }
332
333
334 // --------------- STATIC METHODS AND INNER CLASSED USED BY MAIN -------------- //
335
336 public static void printUsage() {
337 System.err.println("Run this program as:");
338 System.err.println("\tNutchTextDumpToMongoDB <path to 'crawled' folder>");
339 }
340
341 public static void main(String[] args) {
342 if(args.length != 1) {
343 printUsage();
344 return;
345 }
346
347 File sitesDir = new File(args[0]);
348 if(!sitesDir.exists() || !sitesDir.isDirectory()) {
349 logger.error("Error: " + args[0] + " does not exist or is not a directory");
350 return;
351 }
352
353 NutchTextDumpToMongoDB.DEBUG_MODE = false;
354
355
356 try (
357 MongoDBAccess mongodb = new MongoDBAccess();
358 ) {
359
360 mongodb.connectToDB();
361 //mongodb.showCollections();
362
363 // print out the column headers for the websites csv file
364 // https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVPrinter.html
365 // OPTIONAL TODO: creating collections can be done here if dropping and recreating
366
367 MaoriTextDetector mriTxtDetector = new MaoriTextDetector(true); // true: run silent
368 File[] sites = sitesDir.listFiles();
369
370 // sort site folders in alphabetical order
371 // https://stackoverflow.com/questions/7199911/how-to-file-listfiles-in-alphabetical-order
372 Arrays.sort(sites);
373
374 for(File siteDir : sites) { // e.g. 00001
375 if(siteDir.isDirectory()) {
376 // look for dump.txt
377 File txtDumpFile = new File(siteDir, "dump.txt");
378 if(!txtDumpFile.exists()) {
379 logger.error("Text dump file " + txtDumpFile + " did not exist");
380 continue;
381 }
382
383 else {
384 File UNFINISHED_FILE = new File(siteDir, "UNFINISHED");
385
386 String siteID = siteDir.getName();
387 if(siteID.contains("_")) {
388 logger.warn("*** Skipping site " + siteID + " as its dir name indicates it wasn't crawled properly.");
389 continue;
390 }
391
392 long lastModified = siteDir.lastModified();
393 logger.debug("@@@ Processing siteID: " + siteID);
394 NutchTextDumpToMongoDB nutchTxtDump = new NutchTextDumpToMongoDB(
395 mongodb, mriTxtDetector,
396 siteID, txtDumpFile, lastModified, UNFINISHED_FILE.exists());
397 // now it's parsed all the web pages in the site's text dump
398
399 // Let's print stats on each web page's detected language being MRI or not
400 // and how many pages there were in the site in total.
401
402 //nutchTxtDump.printSiteStats();
403
404 nutchTxtDump.websiteDataToDB();
405 }
406 }
407
408 }
409
410 } catch(Exception e) {
411 // can get an exception when instantiating NutchTextDumpToMongoDB instance
412 // or with CSV file
413 logger.error(e.getMessage(), e);
414 }
415 }
416}
Note: See TracBrowser for help on using the repository browser.