1 | package org.greenstone.atea;
|
---|
2 |
|
---|
3 | import java.io.*;
|
---|
4 | import java.lang.ArrayIndexOutOfBoundsException;
|
---|
5 | import java.time.LocalDateTime;
|
---|
6 | import java.util.ArrayList;
|
---|
7 | import java.util.Arrays;
|
---|
8 |
|
---|
9 | import org.apache.commons.csv.*;
|
---|
10 | import org.apache.log4j.Logger;
|
---|
11 |
|
---|
12 | //import org.bson.types.ObjectId;
|
---|
13 |
|
---|
14 | import org.greenstone.atea.morphia.*;
|
---|
15 |
|
---|
16 |
|
---|
17 | /**
|
---|
18 | * Class to process the dump text files produced FOR EACH SITE (e.g. site "00001") that
|
---|
19 | * Nutch has finished crawling and whose text has been dumped out to a file called dump.txt.
|
---|
20 | * This reads in the dump.txt file contained in each site folder within the input folder.
|
---|
21 | * (e.g. input folder "crawled" could contain folders 00001 to 01465. Each contains a dump.txt)
|
---|
22 | * Each dump.txt could contain the text contents for an entire site, or for individual pages.
|
---|
23 | * This class then uses class TextDumpPage to parse each webpage within a dump.txt,
|
---|
24 | * which parses out the actual text body content of each webpage's section within a dump.txt.
|
---|
25 | * Finally, MaoriTextDetector is run over that to determine whether the full body text is
|
---|
26 | * likely to be in Maori or not.
|
---|
27 | *
|
---|
28 | * Potential issues: since a web page's text is dumped out by nutch with neither paragraph
|
---|
29 | * nor even newline separator, it's hard to be sure that the entire page is in language.
|
---|
30 | * If it's in multiple languages, there's no way to be sure there aren't promising Maori language
|
---|
31 | * paragraphs contained in a page, if the majority/the remainder happen to be in English.
|
---|
32 | *
|
---|
33 | * So if we're looking for any paragraphs in Maori to store in a DB, perhaps it's better to run
|
---|
34 | * the MaoriTextDetector.isTextInMaori(BufferedReader reader) over two "lines" at a time,
|
---|
35 | * instead of running it over the entire html body's text.
|
---|
36 | *
|
---|
37 | * TO COMPILE OR RUN, FIRST DO:
|
---|
38 | * cd maori-lang-detection/apache-opennlp-1.9.1
|
---|
39 | * export OPENNLP_HOME=`pwd`
|
---|
40 | * cd maori-lang-detection/src
|
---|
41 | *
|
---|
42 | * TO COMPILE:
|
---|
43 | * maori-lang-detection/src$
|
---|
44 | * javac -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB.java
|
---|
45 | *
|
---|
46 | * TO RUN:
|
---|
47 | * maori-lang-detection/src$
|
---|
48 | * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small
|
---|
49 | *
|
---|
50 | * or:
|
---|
51 | * java -cp ".:../conf:../lib/*:$OPENNLP_HOME/lib/opennlp-tools-1.9.1.jar" org/greenstone/atea/NutchTextDumpToMongoDB ../crawled-small > ../crawled-small/bla.txt 2>&1
|
---|
52 | *
|
---|
53 | */
|
---|
54 | public class NutchTextDumpToMongoDB {
|
---|
55 | static Logger logger = Logger.getLogger(org.greenstone.atea.NutchTextDumpToMongoDB.class.getName());
|
---|
56 |
|
---|
57 | static boolean DEBUG_MODE = true;
|
---|
58 |
|
---|
59 | /** Counter for number of sites.
|
---|
60 | * Should be equal to number of times NutchTextDumpToMongoDB constructor
|
---|
61 | * is called: once per site.
|
---|
62 | */
|
---|
63 | static private int SITE_COUNTER = 0;
|
---|
64 | static private long WEBPAGE_COUNTER = 0;
|
---|
65 |
|
---|
66 | private final MaoriTextDetector maoriTxtDetector;
|
---|
67 | private final MongoDBAccess mongodbAccess;
|
---|
68 |
|
---|
69 | public final String siteID;
|
---|
70 | public final boolean siteCrawlUnfinished;
|
---|
71 | public final long siteCrawledTimestamp; /** When the crawl of the site terminated */
|
---|
72 |
|
---|
73 | private int countOfWebPagesWithBodyText = 0;
|
---|
74 |
|
---|
75 | private String geoLocationCountryCode = null; /** 2 letter country code */
|
---|
76 | private boolean urlContainsLangCodeInPath = false; /** If any URL on this site contains a /mi(/) in its URL */
|
---|
77 |
|
---|
78 | private String domainOfSite;
|
---|
79 | private int numPagesInMRI = 0;
|
---|
80 | private int numPagesContainingMRI = 0;
|
---|
81 |
|
---|
82 | /** keep a list to store the text of each page */
|
---|
83 | private ArrayList<TextDumpPage> pages;
|
---|
84 |
|
---|
85 |
|
---|
86 |
|
---|
87 | /** Number of language and confidence results to return for storing in MongoDB
|
---|
88 | * MongoDB runs out of space if storing too many, as we store this info per sentence
|
---|
89 | * and a long text document becomes a very large MongoDB document presumably */
|
---|
90 | private static final int NUM_TOP_LANGUAGES = 3; // 103 max, in current version of opennlp lang model
|
---|
91 |
|
---|
92 |
|
---|
93 | private boolean isStartOfNewWebPageRecord(String prevLine, String line) {
|
---|
94 | // The start of a new web page's record in nutch's text dump of an entire site
|
---|
95 | // is denoted by a newline followed by a URL (protocol)
|
---|
96 | // or the very start of the file with a URL (protocol)
|
---|
97 | return ((prevLine == null || prevLine.equals(""))
|
---|
98 | && (line.startsWith("http://") || line.startsWith("https://")));
|
---|
99 | }
|
---|
100 |
|
---|
101 | public void debugPageDump(StringBuilder pageDump) {
|
---|
102 | if(DEBUG_MODE) {
|
---|
103 | // START DEBUG
|
---|
104 | logger.debug("__________________________________________");
|
---|
105 | logger.debug("@@@ Found page entry: ");
|
---|
106 | logger.debug("__________________________________________");
|
---|
107 | logger.debug(pageDump.toString());
|
---|
108 | logger.debug("------------------------------------------");
|
---|
109 | // END DEBUG
|
---|
110 | }
|
---|
111 | }
|
---|
112 |
|
---|
113 | /** A NutchTextDumpToMongoDB processes the dump.txt for one site */
|
---|
114 | public NutchTextDumpToMongoDB(MongoDBAccess mongodbAccess,
|
---|
115 | MaoriTextDetector maoriTxtDetector, String siteID,
|
---|
116 | File txtDumpFile, long lastModified, boolean siteCrawlUnfinished)
|
---|
117 | throws IOException
|
---|
118 | {
|
---|
119 | // increment static counter of sites processed by a NutchTextDumpToMongoDB instance
|
---|
120 | SITE_COUNTER++;
|
---|
121 |
|
---|
122 | // siteID is of the form %5d (e.g. 00020) and is just the name of a site folder
|
---|
123 | this.siteID = siteID;
|
---|
124 | this.siteCrawlUnfinished = siteCrawlUnfinished;
|
---|
125 | this.siteCrawledTimestamp = lastModified;
|
---|
126 |
|
---|
127 | this.maoriTxtDetector = maoriTxtDetector;
|
---|
128 | this.mongodbAccess = mongodbAccess;
|
---|
129 |
|
---|
130 | pages = new ArrayList<TextDumpPage>();
|
---|
131 |
|
---|
132 | String line = null;
|
---|
133 | StringBuilder pageDump = null;
|
---|
134 | try (
|
---|
135 | BufferedReader reader = new BufferedReader(new FileReader(txtDumpFile));
|
---|
136 | ) {
|
---|
137 |
|
---|
138 | boolean readingText = false;
|
---|
139 | String prevLine = null;
|
---|
140 |
|
---|
141 | while((line = reader.readLine()) != null) { // readLine removes newline separator
|
---|
142 | line = line.trim();
|
---|
143 | // iff outside of a page's body text, then an empty line marks the end of a page
|
---|
144 | // in nutch's text dump of a site.
|
---|
145 | // But note, there can be an empty line (or more?) between the start and end
|
---|
146 | // markers of a page's text, though.
|
---|
147 |
|
---|
148 | if(isStartOfNewWebPageRecord(prevLine, line)) {
|
---|
149 |
|
---|
150 | if(pageDump != null) { // should also be the case then: if(prevLine != null)
|
---|
151 | // finish old pageDump and begin new one
|
---|
152 |
|
---|
153 | //debugPageDump(pageDump);
|
---|
154 |
|
---|
155 | TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
|
---|
156 | // parses the fields and body text of a webpage in nutch's txt dump of entire site
|
---|
157 | //page.parseFields();
|
---|
158 | //page.getText();
|
---|
159 | pages.add(page);
|
---|
160 | pageDump = null;
|
---|
161 |
|
---|
162 | }
|
---|
163 |
|
---|
164 | // begin new webpage dump
|
---|
165 | pageDump = new StringBuilder();
|
---|
166 | pageDump.append(line);
|
---|
167 | pageDump.append("\n");
|
---|
168 |
|
---|
169 | }
|
---|
170 | else if(!line.equals("")) {
|
---|
171 | pageDump.append(line);
|
---|
172 | pageDump.append("\n");
|
---|
173 |
|
---|
174 | }
|
---|
175 | // can throw away any newlines between text start and end markers.
|
---|
176 |
|
---|
177 | prevLine = line;
|
---|
178 | }
|
---|
179 |
|
---|
180 | // process final webpage record:
|
---|
181 | //debugPageDump(pageDump);
|
---|
182 |
|
---|
183 | if(pageDump == null) {
|
---|
184 | logger.warn("siteID " + siteID + " had an empty dump.txt file. Reinspect site.");
|
---|
185 | } else {
|
---|
186 | TextDumpPage page = new TextDumpPage(siteID, pageDump.toString());
|
---|
187 | pages.add(page);
|
---|
188 | pageDump = null;
|
---|
189 |
|
---|
190 | // for every site, we just need to work out if any of its pages
|
---|
191 | // contains /mi(/) in its URL
|
---|
192 | String url = page.getPageURL();
|
---|
193 | if(!this.urlContainsLangCodeInPath && (url.contains("/mi/") || url.endsWith("/mi"))) {
|
---|
194 | this.urlContainsLangCodeInPath = true;
|
---|
195 | }
|
---|
196 | }
|
---|
197 |
|
---|
198 | } catch (IOException ioe) {
|
---|
199 | logger.error("@@@@@@@@@ Error reading in nutch txtdump file " + txtDumpFile, ioe);
|
---|
200 | }
|
---|
201 |
|
---|
202 | // Just do this once: get and store domain of site.
|
---|
203 | // Passing true to get domain with protocol prefix
|
---|
204 | if(pages.size() > 0) {
|
---|
205 | TextDumpPage firstPage = pages.get(0);
|
---|
206 | String url = firstPage.getPageURL();
|
---|
207 | this.domainOfSite = Utility.getDomainForURL(url, true);
|
---|
208 | }
|
---|
209 | else {
|
---|
210 | this.domainOfSite = "UNKNOWN";
|
---|
211 | }
|
---|
212 |
|
---|
213 |
|
---|
214 | prepareSiteStats(mongodbAccess);
|
---|
215 | }
|
---|
216 |
|
---|
217 |
|
---|
218 | private void prepareSiteStats(MongoDBAccess mongodbAccess) throws IOException {
|
---|
219 |
|
---|
220 | TextDumpPage page = null;
|
---|
221 | for(int i = 0; i < pages.size(); i++) {
|
---|
222 |
|
---|
223 | page = pages.get(i);
|
---|
224 |
|
---|
225 | String text = page.getPageText();
|
---|
226 |
|
---|
227 | if(text.equals("")) {
|
---|
228 | // don't care about empty pages
|
---|
229 | continue;
|
---|
230 | }
|
---|
231 | else {
|
---|
232 | WEBPAGE_COUNTER++; // count of cumulative total of webpages for all sites
|
---|
233 | countOfWebPagesWithBodyText++; // of this site alone
|
---|
234 |
|
---|
235 | boolean isMRI = maoriTxtDetector.isTextInMaori(text);
|
---|
236 | if(isMRI) {
|
---|
237 | numPagesInMRI++;
|
---|
238 | }
|
---|
239 |
|
---|
240 | String[] sentences = maoriTxtDetector.getAllSentences(text);
|
---|
241 | int totalSentences = sentences.length;
|
---|
242 | int numSentencesInMRI = 0;
|
---|
243 | ArrayList<SentenceInfo> singleSentences = maoriTxtDetector.getAllSentencesInfo(sentences, NUM_TOP_LANGUAGES);
|
---|
244 | ArrayList<SentenceInfo> overlappingSentences = maoriTxtDetector.getAllOverlappingSentencesInfo(sentences, NUM_TOP_LANGUAGES);
|
---|
245 |
|
---|
246 | WebpageInfo webpage = page.convertStoredDataToWebpageInfo(WEBPAGE_COUNTER/*new ObjectId()*/,
|
---|
247 | this.siteID/*SITE_COUNTER*/,
|
---|
248 | isMRI,
|
---|
249 | totalSentences,
|
---|
250 | singleSentences,
|
---|
251 | overlappingSentences);
|
---|
252 |
|
---|
253 |
|
---|
254 | for(SentenceInfo si : singleSentences) {
|
---|
255 | //LanguageInfo bestLanguage = si.languagesInfo[0];
|
---|
256 | //if(bestLanguage.langCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
|
---|
257 | if(si.bestLangCode.equals(MaoriTextDetector.MAORI_3LETTER_CODE)) {
|
---|
258 | numSentencesInMRI++;
|
---|
259 | }
|
---|
260 | }
|
---|
261 |
|
---|
262 |
|
---|
263 | webpage.setMRISentenceCount(numSentencesInMRI);
|
---|
264 | webpage.setContainsMRI((numSentencesInMRI > 0));
|
---|
265 | if(numSentencesInMRI > 0) { // if(numSentencesInMRI >= 5) {
|
---|
266 | // Not sure if we can trust that a single sentence detected as Maori on a page is really Maori
|
---|
267 | // But if at least 5 sentences are detected as Maori, it is more likely to be the case to be MRI?
|
---|
268 | numPagesContainingMRI++;
|
---|
269 | }
|
---|
270 |
|
---|
271 | //mongodbAccess.insertWebpageInfo(webpage);
|
---|
272 | // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
|
---|
273 | mongodbAccess.datastore.save(webpage);
|
---|
274 | }
|
---|
275 | }
|
---|
276 | }
|
---|
277 |
|
---|
278 |
|
---|
279 | public void websiteDataToDB() {
|
---|
280 |
|
---|
281 |
|
---|
282 | // https://stackoverflow.com/questions/35183146/how-can-i-create-a-java-8-localdate-from-a-long-epoch-time-in-milliseconds
|
---|
283 | // LocalDateTime date =
|
---|
284 | // LocalDateTime.ofInstant(Instant.ofEpochMilli(this.siteCrawledTimestamp), ZoneId.systemDefault());
|
---|
285 | // String crawlTimestamp =
|
---|
286 | // date.format(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + " " + date.format(DateTimeFormatter.ofPattern("HH:mm:ss"));
|
---|
287 |
|
---|
288 | boolean redoCrawl = false;
|
---|
289 |
|
---|
290 | if(this.siteCrawlUnfinished) {
|
---|
291 | // arbitrary decision, but need some indication that the MRI content was not close to one-off in the website
|
---|
292 | if(this.numPagesInMRI > 2) {
|
---|
293 | redoCrawl = true;
|
---|
294 | }
|
---|
295 | }
|
---|
296 |
|
---|
297 | File geoLiteCityDatFile = new File(this.getClass().getClassLoader().getResource("GeoLiteCity.dat").getFile());
|
---|
298 | try {
|
---|
299 | if(this.domainOfSite.equals("UNKNOWN")) { // for sites that had 0 webpages downloaded, we have no domain
|
---|
300 | this.geoLocationCountryCode = "UNKNOWN";
|
---|
301 | } else {
|
---|
302 | this.geoLocationCountryCode = Utility.getCountryCodeOfDomain(this.domainOfSite, geoLiteCityDatFile);
|
---|
303 | }
|
---|
304 | } catch(Exception e) {
|
---|
305 | logger.error("*** For SiteID " + siteID + ", got exception: " + e.getMessage(), e);
|
---|
306 |
|
---|
307 | //if(this.domainOfSite.endsWith(".nz")) { // nz TLDs are worth counting
|
---|
308 | //this.geoLocationCountryCode = "NZ";
|
---|
309 | //}
|
---|
310 |
|
---|
311 | // Help along identification of domain's country by construing TLDs if 2 letters after last period mark
|
---|
312 | int periodIndex = domainOfSite.length()-3;
|
---|
313 | // .com|org etc extensions that have 3 chars afte period mark will remain unknown
|
---|
314 | // 2 letter extensions will be considered TLD
|
---|
315 | if(periodIndex >=0 && domainOfSite.charAt(periodIndex) == '.' && ((periodIndex+1) < domainOfSite.length())) {
|
---|
316 | // has a 2 letter TLD. Make it uppercase to match return value of Utility.getCountryCodeOfDomain() above
|
---|
317 | String TLD = domainOfSite.substring(periodIndex+1);
|
---|
318 | this.geoLocationCountryCode = TLD.toUpperCase();
|
---|
319 | } else {
|
---|
320 | this.geoLocationCountryCode = "UNKNOWN"; // couldn't get the country code, so should also be UNKNOWN not null
|
---|
321 | }
|
---|
322 | }
|
---|
323 |
|
---|
324 | int totalPages = pages.size();
|
---|
325 |
|
---|
326 | WebsiteInfo website = new WebsiteInfo(/*SITE_COUNTER,*/ this.siteID, this.domainOfSite,
|
---|
327 | totalPages, this.countOfWebPagesWithBodyText,
|
---|
328 | this.numPagesInMRI, this.numPagesContainingMRI,
|
---|
329 | this.siteCrawledTimestamp, this.siteCrawlUnfinished, redoCrawl,
|
---|
330 | this.geoLocationCountryCode, this.urlContainsLangCodeInPath);
|
---|
331 |
|
---|
332 | //mongodbAccess.insertWebsiteInfo(website);
|
---|
333 | // Uses morphia to save to mongodb, see https://www.baeldung.com/mongodb-morphia
|
---|
334 | mongodbAccess.datastore.save(website);
|
---|
335 | }
|
---|
336 |
|
---|
337 |
|
---|
338 | // --------------- STATIC METHODS AND INNER CLASSED USED BY MAIN -------------- //
|
---|
339 |
|
---|
340 | public static void printUsage() {
|
---|
341 | System.err.println("Run this program as:");
|
---|
342 | System.err.println("\tNutchTextDumpToMongoDB <path to 'crawled' folder>");
|
---|
343 | }
|
---|
344 |
|
---|
345 | public static void main(String[] args) {
|
---|
346 | if(args.length != 1) {
|
---|
347 | printUsage();
|
---|
348 | return;
|
---|
349 | }
|
---|
350 |
|
---|
351 | File sitesDir = new File(args[0]);
|
---|
352 | if(!sitesDir.exists() || !sitesDir.isDirectory()) {
|
---|
353 | logger.error("Error: " + args[0] + " does not exist or is not a directory");
|
---|
354 | return;
|
---|
355 | }
|
---|
356 |
|
---|
357 | NutchTextDumpToMongoDB.DEBUG_MODE = false;
|
---|
358 |
|
---|
359 |
|
---|
360 | try (
|
---|
361 | MongoDBAccess mongodb = new MongoDBAccess();
|
---|
362 | ) {
|
---|
363 |
|
---|
364 | mongodb.connectToDB();
|
---|
365 | //mongodb.showCollections();
|
---|
366 |
|
---|
367 | // print out the column headers for the websites csv file
|
---|
368 | // https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVPrinter.html
|
---|
369 | // OPTIONAL TODO: creating collections can be done here if dropping and recreating
|
---|
370 |
|
---|
371 | MaoriTextDetector mriTxtDetector = new MaoriTextDetector(true); // true: run silent
|
---|
372 | File[] sites = sitesDir.listFiles();
|
---|
373 |
|
---|
374 | // sort site folders in alphabetical order
|
---|
375 | // https://stackoverflow.com/questions/7199911/how-to-file-listfiles-in-alphabetical-order
|
---|
376 | Arrays.sort(sites);
|
---|
377 |
|
---|
378 | for(File siteDir : sites) { // e.g. 00001
|
---|
379 | if(siteDir.isDirectory()) {
|
---|
380 | // look for dump.txt
|
---|
381 | File txtDumpFile = new File(siteDir, "dump.txt");
|
---|
382 | if(!txtDumpFile.exists()) {
|
---|
383 | logger.error("Text dump file " + txtDumpFile + " did not exist");
|
---|
384 | continue;
|
---|
385 | }
|
---|
386 |
|
---|
387 | else {
|
---|
388 | File UNFINISHED_FILE = new File(siteDir, "UNFINISHED");
|
---|
389 |
|
---|
390 | String siteID = siteDir.getName();
|
---|
391 | if(siteID.contains("_")) {
|
---|
392 | logger.warn("*** Skipping site " + siteID + " as its dir name indicates it wasn't crawled properly.");
|
---|
393 | continue;
|
---|
394 | }
|
---|
395 |
|
---|
396 | long lastModified = siteDir.lastModified();
|
---|
397 | logger.debug("@@@ Processing siteID: " + siteID);
|
---|
398 | NutchTextDumpToMongoDB nutchTxtDump = new NutchTextDumpToMongoDB(
|
---|
399 | mongodb, mriTxtDetector,
|
---|
400 | siteID, txtDumpFile, lastModified, UNFINISHED_FILE.exists());
|
---|
401 | // now it's parsed all the web pages in the site's text dump
|
---|
402 |
|
---|
403 | // Let's print stats on each web page's detected language being MRI or not
|
---|
404 | // and how many pages there were in the site in total.
|
---|
405 |
|
---|
406 | //nutchTxtDump.printSiteStats();
|
---|
407 |
|
---|
408 | nutchTxtDump.websiteDataToDB();
|
---|
409 | }
|
---|
410 | }
|
---|
411 |
|
---|
412 | }
|
---|
413 |
|
---|
414 | } catch(Exception e) {
|
---|
415 | // can get an exception when instantiating NutchTextDumpToMongoDB instance
|
---|
416 | // or with CSV file
|
---|
417 | logger.error(e.getMessage(), e);
|
---|
418 | }
|
---|
419 | }
|
---|
420 | }
|
---|