1 | package org.greenstone.atea;
|
---|
2 |
|
---|
3 |
|
---|
4 | import java.io.*;
|
---|
5 | import java.util.Properties;
|
---|
6 | import java.util.zip.GZIPInputStream;
|
---|
7 | import java.util.Iterator;
|
---|
8 | import java.util.HashMap;
|
---|
9 | import java.util.Map;
|
---|
10 | import java.util.Set;
|
---|
11 | import java.util.TreeMap;
|
---|
12 | import java.util.TreeSet;
|
---|
13 |
|
---|
14 | import org.apache.log4j.Logger;
|
---|
15 |
|
---|
16 | /**
|
---|
17 | * The main() method of this class takes a folder of warc.wet(.gz) files and goes through
|
---|
18 | * the WET records in each, putting each WET record into a file. Each file is put into a
|
---|
19 | * keep or discard or greyListed folder, and its url listed written into a keep, discard
|
---|
20 | * or greylisted text file, based on based on
|
---|
21 | *
|
---|
22 | * 1. whether it's whitelisted, else greylisted else blacklisted
|
---|
23 | * 2. and if explicitly whitelisted or else not greylisted or blacklisted and there's
|
---|
24 | * enough content. Formerly, content-length and number of lines were used to determine if
|
---|
25 | * the content was sufficient. Now it's just word count and number of MAX characters
|
---|
26 | * (not MINIMUM characters) that determine a string is a word. These settings can be adjusted
|
---|
27 | * in conf/config.properties.
|
---|
28 | *
|
---|
29 | * Put a url-blacklist-filter.txt and/or url-greylist-filter.txt and/or url-whitelist-filter.txt
|
---|
30 | * into the conf folder to control any url patterns that are explicitly included or excluded or
|
---|
31 | * set aside for inspecting later. These filter text files don't use regexes, instead their
|
---|
32 | * format is:
|
---|
33 | * - precede URL by ^ to blacklist urls that match the given prefix
|
---|
34 | * - succeed URL by $ to blacklist urls that match the given suffix
|
---|
35 | * - ^url$ will blacklist urls that match the given url completely
|
---|
36 | * - Without either ^ or $ symbol, urls containing the given url will get blacklisted
|
---|
37 | *
|
---|
38 | * WETProcessor.java's current implementation is that explicit whitelisting has precedence
|
---|
39 | * over greylisting and which takes precedence over blacklisting in turn. However, even
|
---|
40 | * explicitly whitelisted urls still need to have sufficient content to end up in keepURLs.txt
|
---|
41 | * and in the seedURLs.txt file used for nutch, along with its domain in regex-urlfilter.txt
|
---|
42 | * also for nutch.
|
---|
43 | *
|
---|
44 | * A CCWETProcessor instance can be configured to process all the .warc.wet(.gz) files
|
---|
45 | * in the given input folder. Then use a single instance of the WETProcessor class to process
|
---|
46 | * each single unzipped warc.wet file.
|
---|
47 | *
|
---|
48 | * To compile, including the jars in lib/ for compiling.
|
---|
49 | * maori-lang-detection/src$ javac -cp ".:../lib/*" org/greenstone/atea/CCWETProcessor.java
|
---|
50 | *
|
---|
51 | * To run, passing the log4j and other properties files in conf/ folder:
|
---|
52 | * maori-lang-detection/src$ java -cp ".:../conf:../lib/*" org.greenstone.atea.CCWETProcessor <folder containing warc.wet(.gz) files> <outputFolder>
|
---|
53 | *
|
---|
54 | * e.g.
|
---|
55 | * - java -cp ".:../conf:../lib/*" org.greenstone.atea.CCWETProcessor ../tmp/processWET /Scratch/ak19/gs3-extensions/maori-lang-detection/tmp/processedWET
|
---|
56 | * - java -cp ".:../conf:../lib/*" org.greenstone.atea.CCWETProcessor ../tmp/processWET /Scratch/ak19/gs3-extensions/maori-lang-detection/tmp/processedWET 2>&1 | less
|
---|
57 | *
|
---|
58 | */
|
---|
59 |
|
---|
60 | public class CCWETProcessor {
|
---|
61 | private static Logger logger = Logger.getLogger(org.greenstone.atea.CCWETProcessor.class.getName());
|
---|
62 |
|
---|
63 | // Properties shared across WETProcessor instances
|
---|
64 | public final int MAX_WORD_LENGTH;
|
---|
65 | public final int MIN_NUM_WORDS;
|
---|
66 | public final int MAX_WORDS_CAMELCASE;
|
---|
67 |
|
---|
68 | private Properties configProperties = new Properties();
|
---|
69 |
|
---|
70 | // File paths shared across WETProcessor instances
|
---|
71 | public final File commoncrawlDir;
|
---|
72 | public final File outputFolder;
|
---|
73 | public final File discardFolder;
|
---|
74 | public final File keepFolder;
|
---|
75 | public final File greyListedFolder;
|
---|
76 | public final File keepURLsFile;
|
---|
77 | public final File discardURLsFile;
|
---|
78 | public final File greyListedFile;
|
---|
79 |
|
---|
80 | /** Possible values stored in the blackList/whiteList/greyList Maps */
|
---|
81 | private final Integer LIST_ENTRY_CONTAINS = new Integer(0);
|
---|
82 | private final Integer LIST_ENTRY_STARTSWITH = new Integer(1);
|
---|
83 | private final Integer LIST_ENTRY_ENDSWITH = new Integer(2);
|
---|
84 | private final Integer LIST_ENTRY_MATCHES = new Integer(3);
|
---|
85 |
|
---|
86 | /**
|
---|
87 | * Store url patterns as keys and values indicated whether a url should
|
---|
88 | * match it exactly, start/end with it, or contain it
|
---|
89 | */
|
---|
90 | private HashMap<String, Integer> blackList;
|
---|
91 | private HashMap<String, Integer> greyList;
|
---|
92 | private HashMap<String, Integer> whiteList;
|
---|
93 |
|
---|
94 | /** map of topsites with allowable regexes: sites too big to exhaustively crawl
|
---|
95 | * with optional regex defining allowed exceptions, like subdomains or url suffixes
|
---|
96 | * off that top site. For example, wikipedia.org is a topsite, but mi.wikipedia.org
|
---|
97 | * is relevant. Or blogspot.com is a top site, but someone's pages in Maori off blogspot
|
---|
98 | * would be relevant.
|
---|
99 | * The map would store top site domain suffix and an optional regex string for allowable
|
---|
100 | * url patterns.
|
---|
101 | */
|
---|
102 | private HashMap<String, String> topSitesMap;
|
---|
103 |
|
---|
104 | /** Map of domains we keep and the full urls we're keeping that are of that domain.
|
---|
105 | * No need to use a TreeMap which preserves natural (alphabetical) ordering of keys,
|
---|
106 | * while a HashMap has no notion of ordering, because we just need to store urls with
|
---|
107 | * their domains. Whether the domains are sorted or the urls per domain are sorted becomes
|
---|
108 | * irrelevant. (Does it really? What if we have urls followed vs preceded by urls with the
|
---|
109 | * same prefix, e.g. pinky.com/toto/index.html and pinky.com/toto/nono/file.html
|
---|
110 | * Is there any benefit to nutch when crawling if these seedURLs are ordered or not?)
|
---|
111 | */
|
---|
112 | private Map<String, Set<String>> domainsToURLsMap;
|
---|
113 |
|
---|
114 | // Keep a count of all the records that all WETProcessors instantiated
|
---|
115 | // by our main method combined have processed
|
---|
116 | private int totalRecordCount = 0;
|
---|
117 |
|
---|
118 | private int wetFileCount = 0;
|
---|
119 |
|
---|
120 | public CCWETProcessor(File inFolder, File outFolder) throws Exception {
|
---|
121 | this.commoncrawlDir = inFolder;
|
---|
122 | this.outputFolder = outFolder;
|
---|
123 |
|
---|
124 | // load up the properties from the config file
|
---|
125 | try (InputStream infile = org.greenstone.atea.CCWETProcessor.class.getClassLoader().getResourceAsStream("config.properties")) {
|
---|
126 | configProperties = new Properties();
|
---|
127 | configProperties.load(infile);
|
---|
128 | //infile.close(); // not explicitly called in examples of try-with-resources
|
---|
129 |
|
---|
130 | } catch(Exception e) {
|
---|
131 | System.err.println("Exception attempting to read properties from config.properties.");
|
---|
132 | logger.error("Exception attempting to read properties from config.properties.");
|
---|
133 | e.printStackTrace();
|
---|
134 | }
|
---|
135 |
|
---|
136 | if(configProperties.size() == 0) {
|
---|
137 | System.err.println("*** Warning: no values read into config properties. Using defaults.");
|
---|
138 | }
|
---|
139 |
|
---|
140 | MAX_WORD_LENGTH = Integer.parseInt(configProperties.getProperty("WETprocessor.max.word.length", "15"));
|
---|
141 | MIN_NUM_WORDS = Integer.parseInt(configProperties.getProperty("WETprocessor.min.num.words", "20"));
|
---|
142 | MAX_WORDS_CAMELCASE = Integer.parseInt(configProperties.getProperty("WETprocessor.max.words.camelcase", "10"));
|
---|
143 |
|
---|
144 |
|
---|
145 | this.discardFolder = new File(outFolder, "discard");
|
---|
146 | if(!discardFolder.exists()) {
|
---|
147 | discardFolder.mkdir();
|
---|
148 | }
|
---|
149 | this.keepFolder = new File(outFolder, "keep");
|
---|
150 | if(!keepFolder.exists()) {
|
---|
151 | keepFolder.mkdir();
|
---|
152 | }
|
---|
153 |
|
---|
154 | this.greyListedFolder = new File(outFolder, "greylisted");
|
---|
155 | if(!greyListedFolder.exists()) {
|
---|
156 | greyListedFolder.mkdir();
|
---|
157 | }
|
---|
158 |
|
---|
159 | this.keepURLsFile = new File(outFolder, "keepURLs.txt");
|
---|
160 | if(keepURLsFile.exists() && !keepURLsFile.delete()) {
|
---|
161 | throw new Exception("Warning: Unable to delete " + this.keepURLsFile + ". Unable to proceed.");
|
---|
162 | }
|
---|
163 | this.discardURLsFile = new File(outFolder, "discardURLs.txt");
|
---|
164 | if(discardURLsFile.exists() && !discardURLsFile.delete()) {
|
---|
165 | throw new Exception ("Warning Unable to delete " + discardURLsFile + ". Unable to proceed.");
|
---|
166 | }
|
---|
167 | this.greyListedFile = new File(outFolder, "greyListed.txt");
|
---|
168 | if(greyListedFile.exists() && !greyListedFile.delete()) {
|
---|
169 | throw new Exception ("Warning Unable to delete " + greyListedFile + ". Unable to proceed.");
|
---|
170 | }
|
---|
171 |
|
---|
172 | // prepare our blacklist, greylist (for inspection) and whitelist
|
---|
173 | System.err.println("Loading blacklist.");
|
---|
174 | blackList = new HashMap<String, Integer>();
|
---|
175 | initURLFilterList(blackList, "url-blacklist-filter.txt");
|
---|
176 |
|
---|
177 | System.err.println("Loading greylist.");
|
---|
178 | greyList = new HashMap<String, Integer>();
|
---|
179 | initURLFilterList(greyList, "url-greylist-filter.txt");
|
---|
180 |
|
---|
181 | System.err.println("Loading whitelist.");
|
---|
182 | whiteList = new HashMap<String, Integer>();
|
---|
183 | initURLFilterList(whiteList, "url-whitelist-filter.txt");
|
---|
184 |
|
---|
185 | // Create the map of topSites
|
---|
186 | System.err.println("Loading map of topsites with regex of allowable url patterns for each topsite.");
|
---|
187 | topSitesMap = new HashMap<String, String>();
|
---|
188 | //File topSitesFile = new File(outFolder, "sites-too-big-to-exhaustively-crawl.txt");
|
---|
189 |
|
---|
190 | try (
|
---|
191 | BufferedReader reader = new BufferedReader(new InputStreamReader(org.greenstone.atea.CCWETProcessor.class.getClassLoader().getResourceAsStream("sites-too-big-to-exhaustively-crawl.txt"), "UTF-8"));
|
---|
192 | ) {
|
---|
193 |
|
---|
194 | String str = null;
|
---|
195 | while((str = reader.readLine()) != null) {
|
---|
196 | str = str.trim();
|
---|
197 | if(str.equals("") || str.startsWith("#")) {
|
---|
198 | continue;
|
---|
199 | }
|
---|
200 |
|
---|
201 | int tabindex = str.indexOf("\t");
|
---|
202 | if(tabindex == -1) {
|
---|
203 | topSitesMap.put(str, "");
|
---|
204 | } else {
|
---|
205 | String topsite = str.substring(0, tabindex).trim();
|
---|
206 | String allowed_url_pattern = str.substring(tabindex+1).trim();
|
---|
207 | topSitesMap.put(topsite, allowed_url_pattern);
|
---|
208 | }
|
---|
209 | }
|
---|
210 | } catch (IOException ioe) {
|
---|
211 | ioe.printStackTrace();
|
---|
212 | System.err.println("\n@@@@@@@@@ Error reading in from top sites file conf/sites-too-big-to-exhaustively-crawl.txt");
|
---|
213 | }
|
---|
214 |
|
---|
215 | //System.err.println("Prematurely terminating for testing purposes.");
|
---|
216 | //System.exit(-1);
|
---|
217 | }
|
---|
218 |
|
---|
219 | /** Work out the 'domain' for a given url.
|
---|
220 | * This retains any www. or subdomain prefix.
|
---|
221 | */
|
---|
222 | private String getDomainForURL(String url) {
|
---|
223 | int startIndex = url.indexOf("//"); // http:// or https:// prefix
|
---|
224 | startIndex = (startIndex == -1) ? 0 : (startIndex+2); // skip past the protocol's // portion
|
---|
225 | String domain = url.substring(startIndex);
|
---|
226 | int endIndex = domain.indexOf("/");
|
---|
227 | if(endIndex == -1) endIndex = domain.length();
|
---|
228 | domain = domain.substring(0, endIndex);
|
---|
229 |
|
---|
230 | return domain;
|
---|
231 | }
|
---|
232 |
|
---|
233 | /**
|
---|
234 | * Using the keepURLs.txt file generated by running WETProcessor instances, this produces
|
---|
235 | * as output the URL seed list and regex-urlfilter text files required by nutch, see
|
---|
236 | * https://cwiki.apache.org/confluence/display/nutch/NutchTutorial
|
---|
237 | */
|
---|
238 | public void createSeedURLsFiles(File seedURLsFile, File urlFilterFile,
|
---|
239 | File domainURLsFile, File topSiteMatchesFile) {
|
---|
240 | // Maintain Sets of unique domains and urls
|
---|
241 | // TreeSet: by default, "the elements are ordered using their natural ordering"
|
---|
242 | // (or by a Comparator provided at set creation time).
|
---|
243 | // Whereas HashSet doesn't guarantee ordering.
|
---|
244 | // So we get alphabetic sorting for free. And guaranteed log(n) for basic operations.
|
---|
245 |
|
---|
246 | //Set<String> domainsSet = new TreeSet<String>();
|
---|
247 | //Set<String> urlsSet = new TreeSet<String>();
|
---|
248 | domainsToURLsMap = new TreeMap<String, Set<String>>();
|
---|
249 |
|
---|
250 | final String FILTER_REGEX_PREFIX = "+https?://([a-z0-9-]+\\.)*"; // https?://([a-z0-9-]+\.)* for nutch's regex-urlfilter.txt
|
---|
251 |
|
---|
252 | try (
|
---|
253 | BufferedReader reader = new BufferedReader(new FileReader(this.keepURLsFile));
|
---|
254 | ) {
|
---|
255 |
|
---|
256 | // read a URL at a time from urlsFile
|
---|
257 | String url = null;
|
---|
258 | String domain = null;
|
---|
259 | while((url = reader.readLine()) != null) { // readLine removes newline separator
|
---|
260 |
|
---|
261 | // work out domain. This retains any www. or subdomain prefix
|
---|
262 | domain = getDomainForURL(url);
|
---|
263 |
|
---|
264 | //urlsSet.add(url);
|
---|
265 | //domainsSet.add(domain);
|
---|
266 | Set<String> urlsSet;
|
---|
267 | if(!domainsToURLsMap.containsKey(domain)) {
|
---|
268 | urlsSet = new TreeSet<String>();
|
---|
269 | urlsSet.add(url);
|
---|
270 | domainsToURLsMap.put(domain, urlsSet);
|
---|
271 | } else {
|
---|
272 | urlsSet = domainsToURLsMap.get(domain);
|
---|
273 | urlsSet.add(url);
|
---|
274 | }
|
---|
275 |
|
---|
276 | }
|
---|
277 | } catch (IOException ioe) {
|
---|
278 | ioe.printStackTrace();
|
---|
279 | System.err.println("\n@@@@@@@@@ Error reading in urls from file " + this.keepURLsFile);
|
---|
280 | }
|
---|
281 |
|
---|
282 | // We'd have pruned out duplicates by now and have a sorted list of domains,
|
---|
283 | // each of which maps to seed URLs in the commoncrawl for that domain
|
---|
284 |
|
---|
285 | int domainCount = 0;
|
---|
286 | File sitesFolder = new File(outputFolder, "sites");
|
---|
287 | if(!sitesFolder.exists()) {
|
---|
288 | sitesFolder.mkdir();
|
---|
289 | }
|
---|
290 | final String FORMATSTR = "%05d";
|
---|
291 |
|
---|
292 | // write out each domain followed in sequence by all urls we found in that domain
|
---|
293 | // (urls with tab up front)
|
---|
294 | try (
|
---|
295 | // global lists of all domains, seedURLs and regex-urlfilters across all wet files of all commoncrawls
|
---|
296 | // Also a global file listing any urls that matched top sites that didn't specify
|
---|
297 | // allowed regex patterns
|
---|
298 | BufferedWriter domainURLsWriter = new BufferedWriter(new FileWriter(domainURLsFile));
|
---|
299 | BufferedWriter seedURLsWriter = new BufferedWriter(new FileWriter(seedURLsFile));
|
---|
300 | BufferedWriter urlFilterWriter = new BufferedWriter(new FileWriter(urlFilterFile));
|
---|
301 | BufferedWriter topSiteMatchesWriter = new BufferedWriter(new FileWriter(topSiteMatchesFile))
|
---|
302 | ) {
|
---|
303 |
|
---|
304 | // initialise topSiteMatchesFile with some instructional text.
|
---|
305 | topSiteMatchesWriter.write("The following domain with seedURLs are on a major/top 500 site\n");
|
---|
306 | topSiteMatchesWriter.write("for which no allowed URL pattern regex has been specified.\n");
|
---|
307 | topSiteMatchesWriter.write("Specify one for this domain in the tab-spaced sites-too-big-to-exhaustively-crawl.txt file\n");
|
---|
308 |
|
---|
309 | //Set<Map.Entry<String, Set<String>>> domainsSet = domainsToURLsMap.keySet();
|
---|
310 | Set<String> domainsSet = domainsToURLsMap.keySet();
|
---|
311 | Iterator<String> domainIterator = domainsSet.iterator();
|
---|
312 |
|
---|
313 | /*
|
---|
314 | // DEBUG
|
---|
315 | String value = topSitesMap.get("wikipedia.org");
|
---|
316 | if(value == null) {
|
---|
317 | System.err.println("### wikipedia.org had null value");
|
---|
318 | } else {
|
---|
319 | System.err.println("### wikipedia.org had value: " + value);
|
---|
320 | } // DEBUG
|
---|
321 | */
|
---|
322 |
|
---|
323 | while(domainIterator.hasNext()) {
|
---|
324 | String domain = domainIterator.next();
|
---|
325 |
|
---|
326 | String allowedURLPatternRegex = isURLinTopSitesMap(domain);
|
---|
327 | // If the domain is of a topsite for which no allowed URL pattern has been provided
|
---|
328 | // in sites-too-big-to-exhaustively-crawl.txt,
|
---|
329 | // then we don't know how to crawl the site. Warn the user by writing the affected
|
---|
330 | // domain and seedURLs to the topSiteMatchesFile.
|
---|
331 | if(allowedURLPatternRegex != null && allowedURLPatternRegex.equals("")) {
|
---|
332 | // topsite, but we don't (yet) know what portion can be crawled
|
---|
333 | // Append the top site and url to a global/toplevel file that
|
---|
334 | // the user needs to check later and we're done with this domain as it
|
---|
335 | // won't go into any other file hereafter
|
---|
336 |
|
---|
337 | Set<String> urlsForDomainSet = domainsToURLsMap.get(domain);
|
---|
338 | Iterator<String> urlIterator = urlsForDomainSet.iterator();
|
---|
339 | while(urlIterator.hasNext()) {
|
---|
340 | String url = urlIterator.next();
|
---|
341 | topSiteMatchesWriter.write("\t" + url + "\n");
|
---|
342 | }
|
---|
343 |
|
---|
344 | continue; // done with this domain
|
---|
345 | }
|
---|
346 |
|
---|
347 | // start counting the domains we're actually going to process
|
---|
348 | domainCount++;
|
---|
349 |
|
---|
350 | String siteID = String.format(FORMATSTR, domainCount);
|
---|
351 | File domainFolder = new File(sitesFolder, siteID);
|
---|
352 | domainFolder.mkdir();
|
---|
353 |
|
---|
354 | // write out the domain
|
---|
355 | //seedURLsWriter.write(domain + "\n");
|
---|
356 |
|
---|
357 |
|
---|
358 | // for every domain, we need a sites/0000x/ folder, where x is domain#, containing
|
---|
359 | // its own INDIVIDUAL seedURLs.txt and regex-urlfilter.txt
|
---|
360 | // We still have a global seedURLs.txt and regex-urlfilter.txt too.
|
---|
361 | File siteSeedsFile = new File(domainFolder, "seedURLs.txt"); // e.g. sites/00001/seedURLs.txt
|
---|
362 | File siteRegexFile = new File(domainFolder, "regex-urlfilter.txt"); // e.g. sites/00001/regex-urlfilter.txt
|
---|
363 | try (
|
---|
364 | BufferedWriter siteURLsWriter = new BufferedWriter(new FileWriter(siteSeedsFile));
|
---|
365 | BufferedWriter siteRegexWriter = new BufferedWriter(new FileWriter(siteRegexFile));
|
---|
366 | ) {
|
---|
367 |
|
---|
368 | // write all sorted unique domains into global domains file
|
---|
369 | domainURLsWriter.write(domain + "\n");
|
---|
370 |
|
---|
371 | // Only write urls and no domain into single global seedurls file
|
---|
372 | // But write domain and tabbed urls into individual sites/0000#/seedURLs.txt
|
---|
373 | // files (and write regexed domain into each sites/0000#/regex-urlfilter.txt)
|
---|
374 | // If we ever run nutch on a single seedURLs listing containing
|
---|
375 | // all seed pages to crawl sites from, the above two files will work for that.
|
---|
376 |
|
---|
377 |
|
---|
378 | if(allowedURLPatternRegex == null) { // entire site can be crawled
|
---|
379 | siteURLsWriter.write(domain + "\n");
|
---|
380 |
|
---|
381 | // Write out filter in the following form for a site, e.g. for nutch.apache.org:
|
---|
382 | // nutch.apache.org => +^https?://([a-z0-9-]+\.)*nutch\.apache\.org/
|
---|
383 | String regexed_domain = FILTER_REGEX_PREFIX + domain.replace(".", "\\.") + "/";
|
---|
384 | urlFilterWriter.write(regexed_domain + "\n"); //global file
|
---|
385 | siteRegexWriter.write(regexed_domain + "\n"); // site file
|
---|
386 | }
|
---|
387 | else { // domain belongs to a top site where only portion of site can be crawled
|
---|
388 |
|
---|
389 | if(allowedURLPatternRegex.equals("COPY")) { // COPY existing domain as url-filter
|
---|
390 | siteURLsWriter.write(domain + "\n");
|
---|
391 | // e.g. pinky.blogspot.com will add a filter for pinky.blogspot.com
|
---|
392 | // and not for all of blogspot.com
|
---|
393 |
|
---|
394 | String regexed_domain = "+https?://"+domain.replace(".", "\\.") + "/";
|
---|
395 | urlFilterWriter.write(regexed_domain + "\n");
|
---|
396 | siteRegexWriter.write(regexed_domain + "\n");
|
---|
397 |
|
---|
398 | } else if(allowedURLPatternRegex.equals("SINGLEPAGE")) {
|
---|
399 | // don't write out domain. We want individual pages
|
---|
400 | //DON'T DO: siteURLsWriter.write(domain + "\n");
|
---|
401 |
|
---|
402 | // don't write out domain as a regex expression url filter
|
---|
403 | // write out the individual seed urls for the domain instead
|
---|
404 | // since we will only be downloading the single page
|
---|
405 |
|
---|
406 | Set<String> urlsForDomainSet = domainsToURLsMap.get(domain);
|
---|
407 | for(String urlInDomain : urlsForDomainSet) {
|
---|
408 | String regexed_url = "+^"+urlInDomain.replace(".", "\\.");
|
---|
409 | urlFilterWriter.write(regexed_url + "\n");
|
---|
410 | siteRegexWriter.write(regexed_url + "\n");
|
---|
411 | }
|
---|
412 | } else { // allowedURLPatternRegex is a url-form - convert to regex
|
---|
413 | String regexed_pattern = "+^https?://"+allowedURLPatternRegex.replace(".", "\\.");
|
---|
414 | siteURLsWriter.write(domain + "\n");
|
---|
415 | urlFilterWriter.write(regexed_pattern + "\n");
|
---|
416 | siteRegexWriter.write(regexed_pattern + "\n");
|
---|
417 |
|
---|
418 | }
|
---|
419 | }
|
---|
420 |
|
---|
421 | // next write out the urls for the domain into the sites/0000x/seedURLs.txt file
|
---|
422 | // also write into the global seeds file
|
---|
423 | // (with a tab prefixed to each url?)
|
---|
424 | Set<String> urlsForDomainSet = domainsToURLsMap.get(domain);
|
---|
425 | Iterator<String> urlIterator = urlsForDomainSet.iterator();
|
---|
426 | while(urlIterator.hasNext()) {
|
---|
427 | String url = urlIterator.next();
|
---|
428 | seedURLsWriter.write("\t" + url + "\n"); // global seedURLs file
|
---|
429 | siteURLsWriter.write("\t" + url + "\n");
|
---|
430 | }
|
---|
431 | } catch (IOException ioe) {
|
---|
432 | ioe.printStackTrace();
|
---|
433 | System.err.println("\n@@@@@@@@@ Error writing to " + siteSeedsFile + " or " + siteRegexFile);
|
---|
434 | }
|
---|
435 |
|
---|
436 | }
|
---|
437 |
|
---|
438 | } catch (IOException ioe) {
|
---|
439 | ioe.printStackTrace();
|
---|
440 | System.err.println("\n@@@@@@@@@ Error writing to " + seedURLsFile + " or " + urlFilterFile);
|
---|
441 | }
|
---|
442 |
|
---|
443 | // write out domains as regular expressions into "regex-urlfilter.txt" file
|
---|
444 | try (BufferedWriter urlFilterWriter = new BufferedWriter(new FileWriter(urlFilterFile))) {
|
---|
445 | Set<String> domainsSet = domainsToURLsMap.keySet();
|
---|
446 | Iterator<String> i = domainsSet.iterator();
|
---|
447 | // nutch.apache.org => +^https?://([a-z0-9-]+\.)*nutch\.apache\.org/
|
---|
448 | while(i.hasNext()) {
|
---|
449 | String domain = i.next();
|
---|
450 | domain = FILTER_REGEX_PREFIX + domain.replace(".", "\\.") + "/";
|
---|
451 | urlFilterWriter.write(domain + "\n");
|
---|
452 | }
|
---|
453 |
|
---|
454 | } catch (IOException ioe) {
|
---|
455 | ioe.printStackTrace();
|
---|
456 | System.err.println("\n@@@@@@@@@ Error writing to " + urlFilterFile);
|
---|
457 | }
|
---|
458 |
|
---|
459 | /*
|
---|
460 | // BEGIN DEBUG
|
---|
461 | System.err.println("@@@@ TopSitesMap contains: ");
|
---|
462 | for(Map.Entry<String, String> entry : topSitesMap.entrySet()) {
|
---|
463 | String topSite = entry.getKey();
|
---|
464 | String urlPattern = entry.getValue();
|
---|
465 | System.err.println(topSite + " - " + urlPattern);
|
---|
466 | } // END DEBUG
|
---|
467 | */
|
---|
468 | }
|
---|
469 |
|
---|
470 | private String stripSubDomain(String url) {
|
---|
471 | int index = url.indexOf(".");
|
---|
472 | if(index != -1) {
|
---|
473 | url = url.substring(index+1);
|
---|
474 | }
|
---|
475 | return url;
|
---|
476 | }
|
---|
477 |
|
---|
478 | /**
|
---|
479 | * Check if the domain of the url, either in its entirety or when stripped of www/subdomains,
|
---|
480 | * is in the list of top sites.
|
---|
481 | * If it is, and the given url matches the regex for that topsite, then add the url to the
|
---|
482 | * whitelist and a regex disallowing the rest of the topsite to the url regex filter file.
|
---|
483 |
|
---|
484 | */
|
---|
485 | private String isURLinTopSitesMap(String domain) {
|
---|
486 | boolean keepLooping = true;
|
---|
487 |
|
---|
488 | // domain aprameter will have retained www or subdomains, but is stripped of protocol
|
---|
489 |
|
---|
490 | // keep looping, stripping subdomains from url and checking if it matches a topsite domain
|
---|
491 | // if it does, return the value for that topsite domain in the topSitesMap
|
---|
492 | // If no match at all, return null.
|
---|
493 | do {
|
---|
494 | if(domain.contains("pinterest.com")) {
|
---|
495 | System.err.println("@@@@@@@@@ Checking for url " + domain + " in the top sites map");
|
---|
496 | }
|
---|
497 |
|
---|
498 | String allowed_url_pattern = topSitesMap.get(domain);
|
---|
499 | if(allowed_url_pattern != null) { // if topSitesMap.containsKey(domain);
|
---|
500 | // there's an entry for the URL in the topSitesMap
|
---|
501 | System.err.println("##### A top site matches URL domain " + domain);
|
---|
502 | return allowed_url_pattern;
|
---|
503 | }
|
---|
504 | // else, no entry for the URL in the topSitesMap
|
---|
505 | // Not done: strip subDomain from URL and check it against topSitesMap
|
---|
506 |
|
---|
507 | String newURL = stripSubDomain(domain);
|
---|
508 | if(domain.equals(newURL)) keepLooping = false;
|
---|
509 | else domain = newURL;
|
---|
510 | } while(keepLooping);
|
---|
511 |
|
---|
512 | // url in entirety or stripped of subdomains did not match any of the topsites
|
---|
513 | return null;
|
---|
514 | }
|
---|
515 |
|
---|
516 | private boolean isListedInFilterList(Map<String, Integer> filterListMap, String url) {
|
---|
517 | //Set<Map.Entry<String,Integer>> entries = filterListMap.entrySet();
|
---|
518 | //Iterator<Map.Entry<String, Integer>> i = entries.iterator();
|
---|
519 | //while(i.hasNext()) {
|
---|
520 | // Map.Entry<String, Integer> entry = i.next();
|
---|
521 | for(Map.Entry<String,Integer> entry : filterListMap.entrySet()) {
|
---|
522 | String urlPattern = entry.getKey();
|
---|
523 | Integer matchRule = entry.getValue();
|
---|
524 |
|
---|
525 | if(matchRule == LIST_ENTRY_CONTAINS && url.contains(urlPattern)) {
|
---|
526 | return true;
|
---|
527 | }
|
---|
528 | else if(matchRule == LIST_ENTRY_STARTSWITH && url.startsWith(urlPattern)) {
|
---|
529 | return true;
|
---|
530 | }
|
---|
531 | else if(matchRule == LIST_ENTRY_ENDSWITH && url.endsWith(urlPattern)) {
|
---|
532 | return true;
|
---|
533 | }
|
---|
534 | else if(matchRule == LIST_ENTRY_MATCHES && url.equals(urlPattern)) {
|
---|
535 | return true;
|
---|
536 | }
|
---|
537 | // else check the rest of the filter list against this url
|
---|
538 | // before returning false to be certain it's not been listed in the filter list
|
---|
539 | }
|
---|
540 |
|
---|
541 | return false;
|
---|
542 | }
|
---|
543 |
|
---|
544 | /**
|
---|
545 | * Returns true if the url or pattern is found in the blacklist file.
|
---|
546 | * Note that if eventually the same url pattern is found in the greylist or whitelist too,
|
---|
547 | * it won't get blacklisted after all. But that's not implemented here.
|
---|
548 | */
|
---|
549 | public boolean isBlacklisted(String url) {
|
---|
550 | return isListedInFilterList(blackList, url);
|
---|
551 | }
|
---|
552 |
|
---|
553 | /**
|
---|
554 | * Returns true if the url or pattern is explicitly mentioned in the greylist file.
|
---|
555 | * Will eventually take precedence over if the same URL pattern was mentioned in the blacklist.
|
---|
556 | * Will eventually be pre-empted into the whitelist if mentioned in the whitelist.
|
---|
557 | */
|
---|
558 | public boolean isGreylisted(String url) {
|
---|
559 | // auto-translated product sites
|
---|
560 | return isListedInFilterList(greyList, url);
|
---|
561 | }
|
---|
562 |
|
---|
563 | /**
|
---|
564 | * Returns true if the url or pattern is explicitly mentioned in the whitelist file
|
---|
565 | * Its mention in a whitelist moreover overrides any mention in the blacklist and greylist.
|
---|
566 | */
|
---|
567 | public boolean isWhitelisted(String url) {
|
---|
568 | return isListedInFilterList(whiteList, url);
|
---|
569 | }
|
---|
570 |
|
---|
571 | /**
|
---|
572 | * Checks URL parameter against each line ("filter") of conf/url-black|grey|whitelist-filter.txt to decide
|
---|
573 | * whether it is in the mentioned black|grey|white list.
|
---|
574 | * Filters don't represent actual regex, just ^ and $ as start and end terminators.
|
---|
575 | * By not having this method deal with actual regex for filters, this has the advantage that
|
---|
576 | * we don't have to remember to escape or double escape each filter to turn it into a regex.
|
---|
577 | */
|
---|
578 | public void initURLFilterList(Map<String, Integer> list, String filterListFilename) {
|
---|
579 |
|
---|
580 | // if filterListFilename does not exist in the conf folder, just return
|
---|
581 | if(org.greenstone.atea.CCWETProcessor.class.getClassLoader().getResource(filterListFilename) == null) {
|
---|
582 | System.err.println(filterListFilename + " does not exist");
|
---|
583 | return;
|
---|
584 | }
|
---|
585 |
|
---|
586 | try (
|
---|
587 | BufferedReader reader = new BufferedReader(new InputStreamReader(org.greenstone.atea.CCWETProcessor.class.getClassLoader().getResourceAsStream(filterListFilename), "UTF-8"));
|
---|
588 | ) {
|
---|
589 | String filter = null;
|
---|
590 | while((filter = reader.readLine()) != null) {
|
---|
591 | // skip comments and empty lines
|
---|
592 | filter = filter.trim();
|
---|
593 | if(filter.equals("") || filter.startsWith("#")) {
|
---|
594 | continue;
|
---|
595 | }
|
---|
596 |
|
---|
597 | if(filter.startsWith("^") && filter.endsWith("$")) {
|
---|
598 | filter = filter.substring(1, filter.length()-1);
|
---|
599 | list.put(filter, LIST_ENTRY_MATCHES);
|
---|
600 | }
|
---|
601 | else if(filter.startsWith("^")) {
|
---|
602 | filter = filter.substring(1);
|
---|
603 | list.put(filter, LIST_ENTRY_STARTSWITH);
|
---|
604 | System.err.println("Match filter startswith: " + filter);
|
---|
605 | }
|
---|
606 | else if(filter.endsWith("$")) {
|
---|
607 | filter = filter.substring(0, filter.length()-1);
|
---|
608 | list.put(filter, LIST_ENTRY_ENDSWITH);
|
---|
609 | }
|
---|
610 | else {
|
---|
611 | list.put(filter, LIST_ENTRY_CONTAINS);
|
---|
612 | }
|
---|
613 | //System.err.println("Got filter: " + filter);
|
---|
614 | }
|
---|
615 |
|
---|
616 | } catch (IOException ioe) {
|
---|
617 | ioe.printStackTrace();
|
---|
618 | System.err.println("\n@@@@@@@@@ Error reading into map from file " + filterListFilename);
|
---|
619 | }
|
---|
620 |
|
---|
621 | }
|
---|
622 |
|
---|
623 | /** Maintain a count of all WET files processed. */
|
---|
624 | public void setWETFileCount(int count) { this.wetFileCount = count; }
|
---|
625 |
|
---|
626 | /** Maintain a count of all WET records processed. */
|
---|
627 | //public int getRecordCount() { return this.totalRecordCount; }
|
---|
628 | //public void addToRecordCount(int count) { this.totalRecordCount += count; }
|
---|
629 | public void setRecordCount(int count) { this.totalRecordCount = count; }
|
---|
630 |
|
---|
631 | public void processAllWETFilesOfCrawl(File ccrawlWETFileDir) {
|
---|
632 |
|
---|
633 | // Will list all the warc.wet files in the input directory or else their gzipped versions
|
---|
634 | File[] WETFiles = ccrawlWETFileDir.listFiles(new WETFilenameFilter());
|
---|
635 |
|
---|
636 | int wetRecordCount = 0;
|
---|
637 | int wetFileCount = 0;
|
---|
638 |
|
---|
639 | for(int i = 0; i < WETFiles.length; i++) {
|
---|
640 | File WETFile = WETFiles[i];
|
---|
641 | logger.debug("Processing WETfile: " + WETFile);
|
---|
642 |
|
---|
643 | // Any .gz files listed means they haven't been unzipped yet. So unzip.
|
---|
644 | String WETFilename = WETFile.toString();
|
---|
645 | if(WETFilename.endsWith(".gz")) {
|
---|
646 | File GZippedWETFile = WETFile;
|
---|
647 | String WETGZippedFilename = WETFilename;
|
---|
648 | WETFilename = WETFilename.substring(0, WETFilename.lastIndexOf(".gz"));
|
---|
649 |
|
---|
650 | WETFile = new File(WETFilename);
|
---|
651 | Utility.unzipFile(GZippedWETFile, WETFile);
|
---|
652 | }
|
---|
653 | // hereafter all WETFiles should refer to the unzipped version
|
---|
654 | // Check the unzipped WETFile exists
|
---|
655 |
|
---|
656 | if(!WETFile.exists() || !WETFile.isFile()) {
|
---|
657 | System.err.println("Error: " + WETFile + " does not exist (failure to unzip?)");
|
---|
658 | logger.error("Error: " + WETFile + " does not exist (failure to unzip?)");
|
---|
659 | return;
|
---|
660 | }
|
---|
661 |
|
---|
662 | // Finally, we can process this WETFile's records into the keep and discard pile
|
---|
663 | wetFileCount++;
|
---|
664 | logger.debug("Off to process " + WETFile);
|
---|
665 | String crawlID = ccrawlWETFileDir.getName(); // something like CC-MAIN-YYYY-##-wet-files
|
---|
666 | crawlID = crawlID.substring("CC-MAIN-".length(), crawlID.indexOf("-wet-files")); // YYYY-##
|
---|
667 | WETProcessor wetFileProcessor = new WETProcessor(WETFile, crawlID, this);
|
---|
668 | wetFileProcessor.processWETFile();
|
---|
669 | wetRecordCount += wetFileProcessor.getRecordCount();
|
---|
670 | }
|
---|
671 |
|
---|
672 | // for information purposes
|
---|
673 | this.setWETFileCount(wetFileCount);
|
---|
674 | this.setRecordCount(wetRecordCount);
|
---|
675 | }
|
---|
676 |
|
---|
677 | public static void printUsage() {
|
---|
678 | System.err.println("Run this program as:");
|
---|
679 | System.err.println("\tWetProcessor <folder containing wet(.gz) files> <output folder path>");
|
---|
680 | }
|
---|
681 |
|
---|
682 | /** Filename filter to only list warc.wet files or else warc.wet.gz files
|
---|
683 | * for which unzipped warc.wet equivalents don't yet exist.
|
---|
684 | */
|
---|
685 | private static class WETFilenameFilter implements FilenameFilter {
|
---|
686 |
|
---|
687 | public boolean accept(File dir, String name) {
|
---|
688 | if(name.endsWith(".warc.wet")) {
|
---|
689 | logger.debug("Will include " + name + " for processing.");
|
---|
690 | return true;
|
---|
691 | }
|
---|
692 |
|
---|
693 | if(name.endsWith(".warc.wet.gz")) {
|
---|
694 | String nameWithoutGZext = name.substring(0, name.lastIndexOf(".gz"));
|
---|
695 | File unzippedVersion = new File(dir, nameWithoutGZext);
|
---|
696 | if(unzippedVersion.exists()) {
|
---|
697 | logger.debug("--- Unzipped version " + unzippedVersion + " exists.");
|
---|
698 | logger.debug("Skipping " + name);
|
---|
699 | return false; // don't count gzipped version if unzipped version exists.
|
---|
700 | }
|
---|
701 | else {
|
---|
702 | logger.debug("Only zipped version " + name + " exists.");
|
---|
703 | return true; // No unzipped version, so have to work with gzipped version
|
---|
704 | }
|
---|
705 | }
|
---|
706 |
|
---|
707 | // we're not even interested in any other file extensions
|
---|
708 | logger.debug("Not a WET file. Skipping " + name);
|
---|
709 | return false;
|
---|
710 | }
|
---|
711 | }
|
---|
712 |
|
---|
713 |
|
---|
714 | private static class CCrawlWETFolderFilenameFilter implements FilenameFilter {
|
---|
715 |
|
---|
716 | public boolean accept(File dir, String name) {
|
---|
717 | File f = new File (dir, name);
|
---|
718 | if(f.isDirectory()) {
|
---|
719 | if(name.matches("CC-MAIN-\\d{4}-\\d{2}-wet-files")) {
|
---|
720 | return true;
|
---|
721 | }
|
---|
722 | }
|
---|
723 | else {
|
---|
724 | System.err.println("File " + f + " is not a directory");
|
---|
725 | }
|
---|
726 | return false;
|
---|
727 | }
|
---|
728 | }
|
---|
729 |
|
---|
730 | public static void main(String[] args) {
|
---|
731 | if(args.length != 2) {
|
---|
732 | printUsage();
|
---|
733 | return;
|
---|
734 | }
|
---|
735 |
|
---|
736 | File commoncrawlDir = new File(args[0]);
|
---|
737 | if(!commoncrawlDir.exists() || !commoncrawlDir.isDirectory()) {
|
---|
738 | System.out.println("Error: " + args[0] + " does not exist or is not a directory");
|
---|
739 | return;
|
---|
740 | }
|
---|
741 |
|
---|
742 | File outFolder = new File(args[1]);
|
---|
743 | if(!outFolder.exists() || !outFolder.isDirectory()) {
|
---|
744 | System.out.println("Error: " + args[1] + " does not exist or is not a directory.");
|
---|
745 | return;
|
---|
746 | }
|
---|
747 |
|
---|
748 | try {
|
---|
749 | CCWETProcessor ccWETFilesProcessor = new CCWETProcessor(commoncrawlDir, outFolder);
|
---|
750 |
|
---|
751 | File[] ccrawlFolders = commoncrawlDir.listFiles(new CCrawlWETFolderFilenameFilter());
|
---|
752 |
|
---|
753 | for(int i = 0; i < ccrawlFolders.length; i++) {
|
---|
754 | File ccrawlFolder = ccrawlFolders[i];
|
---|
755 | System.err.println("About to process commoncrawl WET files folder: " + ccrawlFolder);
|
---|
756 | ccWETFilesProcessor.processAllWETFilesOfCrawl(ccrawlFolder);
|
---|
757 | }
|
---|
758 |
|
---|
759 |
|
---|
760 | // create the global files of all domains, seedURLs and regex-urlfilters across all wet files of all commoncrawls
|
---|
761 | // The former is the only unique one. seedURLs and regex-urlfilters are
|
---|
762 | // repeated on a per site/domain basis too, stored in the sites folder
|
---|
763 | File seedURLsFile = new File(outFolder, "seedURLs.txt");
|
---|
764 | File urlFilterFile = new File(outFolder, "regex-urlfilter.txt");
|
---|
765 | File domainURLsFile = new File(outFolder, "all-domain-urls.txt");
|
---|
766 | File topSitesMatchedFile = new File(outFolder, "unprocessed-topsite-matches.txt");
|
---|
767 |
|
---|
768 | ccWETFilesProcessor.createSeedURLsFiles(seedURLsFile, urlFilterFile, domainURLsFile, topSitesMatchedFile);
|
---|
769 |
|
---|
770 | System.out.println("\n*** Inspect urls in greylist at " + ccWETFilesProcessor.greyListedFile + "\n");
|
---|
771 |
|
---|
772 | System.out.println("\n*** Check " + topSitesMatchedFile + " for sites not prepared for crawling because they matched top sites but had no regex of allowed url patterns.\n");
|
---|
773 |
|
---|
774 |
|
---|
775 | } catch(Exception e) {
|
---|
776 | // can get an exception when instantiating CCWETProcessor instance
|
---|
777 | e.printStackTrace();
|
---|
778 | System.err.println(e.getMessage());
|
---|
779 | }
|
---|
780 |
|
---|
781 | return;
|
---|
782 |
|
---|
783 | }
|
---|
784 | }
|
---|