source: gs2-extensions/parallel-building/trunk/src/bin/script/hadoop_import.pl@ 26949

Last change on this file since 26949 was 26949, checked in by jmt12, 11 years ago

Parallel import using Hadoop

  • Property svn:executable set to *
File size: 7.6 KB
Line 
1#!/usr/bin/perl
2use strict;
3use warnings;
4
5# Requires setup.bash to have been sourced
6BEGIN
7{
8 die "GSDLHOME not set\n" unless defined $ENV{'GSDLHOME'};
9 die "GSDLOS not set\n" unless defined $ENV{'GSDLOS'};
10}
11
12print "===== Greenstone Import using Hadoop =====\n";
13
14# 0. Init
15my $collection = 'test';
16my $debug = 0;
17my $dry_run = 0;
18my $cluster_head = ''; # i.e. 'medusa.local';
19
20my $gsdl_home = $ENV{'GSDLHOME'};
21my $gsdl_hadoop_ext = $ENV{'GEXTHADOOP_INSTALLED'};
22my $hadoop_exe = 'hadoop'; # you may add path
23my $hdfs_fs_prefix = 'hdfs://localhost:54310';
24my $refresh_import = 0;
25my $username = `whoami`;
26chomp($username);
27
28# 1. Read and validate parameters
29if (defined $ARGV[0])
30{
31 $collection = $ARGV[0];
32}
33my $gs_collection_dir = $gsdl_home . '/collect/' . $collection;
34my $gs_import_dir = $gs_collection_dir . '/import';
35if (!-d $gs_import_dir)
36{
37 die("Error! Collection's import directory cannot be found: " . $gs_import_dir . "\n");
38}
39my $gs_results_dir = $gs_collection_dir . '/results';
40if (!-d $gs_results_dir)
41{
42 mkdir($gs_results_dir, 0755);
43}
44$gs_results_dir .= '/' . time();
45if (!-d $gs_results_dir)
46{
47 mkdir($gs_results_dir, 0755);
48}
49# - directories within HDFS
50my $hdfs_input_dir = &urlCat($hdfs_fs_prefix, 'user', $username, 'gsdl', 'collect', $collection, 'import');
51my $hdfs_output_dir = &urlCat($hdfs_fs_prefix, 'user', $username, 'gsdl', 'collect', $collection, 'archives');
52
53# 2. Copy the import directory into HDFS
54print " * Replicating import directory in HDFS...";
55# - check if import directory already exists
56my $hdfs_import_exists = &hdfsTest('d', 0, $hdfs_input_dir);
57if ($refresh_import || !$hdfs_import_exists)
58{
59 # - clear out the old import directory
60 if ($hdfs_import_exists)
61 {
62 &hdfsCommand('rmr', $hdfs_input_dir);
63 }
64 # - now recursively copy the contents of import directory into HDFS ensuring
65 # that relative paths are maintained
66 my $file_count = &recursiveCopy($gs_import_dir, $hdfs_input_dir);
67 &debugPrint($file_count . " files 'putted'");
68 print "Done!\n";
69}
70else
71{
72 print "Already exists!\n";
73}
74# - clear out the archives regardless
75if (&hdfsTest('d', 0, $hdfs_output_dir))
76{
77 print " * Clearing existing archives directory for this collection... ";
78 &hdfsCommand('rmr', $hdfs_output_dir);
79 print "Done!\n";
80}
81
82# 3. Special case for *Server type infodbs (namely TDBServer and GDBMServer)
83# where we start the server now to ensure it lives on the head node
84my $server_host = 'localhost';
85my $server_port = '8191';
86my $configuration_path = $gs_collection_dir . '/etc/collect.cfg';
87my $infodbtype = `grep -P "^infodbtype" $configuration_path`;
88my $server_prefix = '';
89if ($infodbtype =~ /^infodbtype\s+(gdbm|tdb)server/i)
90{
91 $server_prefix = uc($1);
92 print " * Starting " . $server_prefix . "Server... ";
93 # - start the server on the head node and retrieve the host and port from
94 # the output
95 my $launcher_command = $server_prefix . "Server.pl " . $$ . " " . $collection;
96 my $launcher_output = &shellCommand($launcher_command);
97 if ($launcher_output =~ /Server now listening on ([^:]+):(\d+)/)
98 {
99 $server_host = $1;
100 $server_port = $2;
101 print "running on " . $server_host . ":" . $server_port . "\n";
102 }
103 else
104 {
105 print "Failed!\n";
106 exit;
107 }
108 # - use the client tool to add ourselve as a listener
109 print " * Registering as listener... ";
110 my $client_command = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#a:" . $$ . "\"";
111 &shellCommand($client_command);
112 print "Done!\n";
113}
114else
115{
116 print "Error! True Hadoop processing is only available when Greenstone is\n";
117 print " configured to use either GDBMServer or TDBServer.\n";
118 exit;
119}
120
121# 4. Running Hadoop - we hand in the import dirctory (within HDFS) as the input
122# and allow the FileInputFormat to split it up into files to be processed
123# in Greenstone. This works for collections with one file per document, like
124# Lorem and ReplayMe, but might not work well with multiple file documents
125# such as the Demo collection
126print " * Running import using Hadoop...";
127my $hadoop_log = $gs_results_dir . '/hadoop.log';
128my $hadoop_command = $hadoop_exe . " jar " . $gsdl_hadoop_ext . "/lib/hadoop-greenstone.jar org.nzdl.gsdl.HadoopGreenstoneIngest " . $gsdl_home . " " . $collection . " " . $hdfs_input_dir . " " . $hdfs_output_dir . " > " . $hadoop_log . " 2>&1";
129&shellCommand($hadoop_command);
130print "Done!\n";
131
132# 5. If we ran *Server infodbs, we now need to shut them down
133if ($server_prefix ne '')
134{
135 print " * Deregistering as listener and shutting down... ";
136 # - deregister as a listener
137 my $client_command1 = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#r:" . $$ . "\"";
138 &shellCommand($client_command1);
139 # - send quit command
140 my $client_command2 = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#q:" . $$ . "\"";
141 &shellCommand($client_command2);
142 print "Done!\n";
143}
144
145
146# 6. Gather logs
147print " * Gathering logs from compute nodes... ";
148# - local files
149&shellCommand('cp /tmp/greenstone/*.* ' . $gs_results_dir);
150if (-d $gs_collection_dir . '/logs')
151{
152 &shellCommand('cp ' . $gs_collection_dir . '/logs/*.* ' . $gs_results_dir);
153}
154# - remote files
155if ($cluster_head ne '')
156{
157 &shellCommand('rocks run host "scp /tmp/greenstone/*.* ' $cluster_head . ':' . $gs_results_dir . '"');
158&shellCommand('rocks run host "scp /tmp/gsimport-*/logs/*.log ' . $cluster_head . ':' . $gs_results_dir . '"');
159}
160print "Done!\n";
161
162# 7. Done - clean up
163print " * Cleaning up temporary files... ";
164&shellCommand('rm -rf /tmp/greenstone');
165if ($cluster)
166{
167 &shellCommand('rocks run host "rm -rf /tmp/greenstone"');
168}
169print "Done!\n";
170print "Complete!\n\n";
171
172exit;
173
174# /** @function debugPrint
175# */
176sub debugPrint
177{
178 my $msg = shift(@_);
179 if ($debug)
180 {
181 print "[Debug] " . $msg . "\n";
182 }
183}
184# /** debugPrint() **/
185
186# /** @function hdfsCommand
187# */
188sub hdfsCommand
189{
190 my $command = shift(@_);
191 my $paths = '"' . join('" "', @_) . '"';
192 my $hdfs_command = $hadoop_exe . ' fs -' . $command . ' ' . $paths . ' 2>&1';
193 &shellCommand($hdfs_command);
194 return $?;
195}
196# /** hdfsCommand() **/
197
198# /** @function hdfsTest
199# */
200sub hdfsTest
201{
202 my $command = shift(@_);
203 my $test_target = shift(@_);
204 my $result = &hdfsCommand('test -' . $command, @_);
205 return ($result == $test_target);
206}
207# /** hdfsTest() **/
208
209# /**
210# */
211sub printUsage
212{
213 print "usage: hadoop_import.pl <collection> [<refresh_import>] [<\"removeold\"|\"keepold\">]\n";
214 exit;
215}
216# /** printUsage() **/
217
218# /**
219# */
220sub recursiveCopy
221{
222 my ($src_dir, $hdfs_dir) = @_;
223 my $file_count = 0;
224 # - create the directory in HDFS
225 &hdfsCommand('mkdir', $hdfs_dir);
226 # - search $src_dir for files
227 opendir(DH, $src_dir) or die("Error! Cannot open directory for reading: " . $src_dir);
228 my @files = readdir(DH);
229 closedir(DH);
230 foreach my $file (@files)
231 {
232 # - skip dot prefix files
233 if ($file !~ /^\./)
234 {
235 my $src_path = $src_dir . '/' . $file;
236 # - recurse directories, remembering to extend HDFS dir too
237 if (-d $src_path)
238 {
239 my $new_hdfs_dir = $hdfs_dir . '/' . $file;
240 $file_count += &recursiveCopy($src_path, $new_hdfs_dir);
241 }
242 # - and use 'put' to copy files
243 else
244 {
245 my $hdfs_path = $hdfs_dir . '/' . $file;
246 &hdfsCommand('put', $src_path, $hdfs_path);
247 $file_count++;
248 }
249 }
250 }
251 return $file_count;
252}
253# /** recursiveCopy() **/
254
255# /** @function shellCommand
256# */
257sub shellCommand
258{
259 my $cmd = shift(@_);
260 my $output = '';
261 &debugPrint($cmd);
262 if (!$dry_run)
263 {
264 $output = `$cmd`;
265 }
266 return $output;
267}
268# /** shellCommand() **/
269
270# /** @function urlCat
271# */
272sub urlCat
273{
274 my $url = join('/', @_);
275 return $url;
276}
277# /** urlCat() **/
Note: See TracBrowser for help on using the repository browser.