[26949] | 1 | #!/usr/bin/perl
|
---|
| 2 | use strict;
|
---|
| 3 | use warnings;
|
---|
| 4 |
|
---|
| 5 | # Requires setup.bash to have been sourced
|
---|
| 6 | BEGIN
|
---|
| 7 | {
|
---|
[27644] | 8 | die "GSDLHOME not set\n" unless (defined $ENV{'GSDLHOME'} && $ENV{'GSDLHOME'} ne '');
|
---|
[26949] | 9 | die "GSDLOS not set\n" unless defined $ENV{'GSDLOS'};
|
---|
[27594] | 10 | die "GEXTPARALLELBUILDING not set\n" unless defined $ENV{'GEXTPARALLELBUILDING'};
|
---|
| 11 | die "GEXTPARALLELBUILDING_INSTALLED not set\n" unless defined $ENV{'GEXTPARALLELBUILDING_INSTALLED'};
|
---|
[27001] | 12 | die "HDFS HOST not set (set in <gsdl>/ext/parallel_processing/setup.bash)\n" unless defined $ENV{'HDFSHOST'};
|
---|
| 13 | die "HDFS PORT not set (set in <gsdl>/ext/parallel_processing/setup.bash)\n" unless defined $ENV{'HDFSPORT'};
|
---|
[26949] | 14 | }
|
---|
| 15 |
|
---|
| 16 | print "===== Greenstone Import using Hadoop =====\n";
|
---|
| 17 |
|
---|
| 18 | # 0. Init
|
---|
| 19 | my $collection = 'test';
|
---|
[28015] | 20 | my $use_thrift = 0;
|
---|
[27644] | 21 | my $start_thrift = 0;
|
---|
[26949] | 22 | my $debug = 0;
|
---|
| 23 | my $dry_run = 0;
|
---|
[27654] | 24 | my $stagger = 0;
|
---|
[27644] | 25 | my $flush_diskcache = 0;
|
---|
| 26 | my $use_nfs = 0;
|
---|
| 27 |
|
---|
[26949] | 28 | my $gsdl_home = $ENV{'GSDLHOME'};
|
---|
[27001] | 29 | my $gsdl_hadoop_ext = $ENV{'GEXTPARALLELBUILDING_INSTALLED'};
|
---|
[26949] | 30 | my $hadoop_exe = 'hadoop'; # you may add path
|
---|
[27913] | 31 | my $java_library = 'HadoopGreenstoneIngest2';
|
---|
[27001] | 32 | my $cluster_head = $ENV{'HDFSHOST'}; # may not be true on advanced configs
|
---|
[27414] | 33 | my $hdfs_fs_prefix = 'HDThriftFS://';
|
---|
[26949] | 34 | my $refresh_import = 0;
|
---|
[30354] | 35 | my $remove_old = 0;
|
---|
[26949] | 36 | my $username = `whoami`;
|
---|
| 37 | chomp($username);
|
---|
[28015] | 38 | my $gs_results_dir = '';
|
---|
[26949] | 39 |
|
---|
[27001] | 40 | `rocks > /dev/null 2>&1`;
|
---|
| 41 | my $is_rocks_cluster = ($? == 0);
|
---|
| 42 |
|
---|
[26949] | 43 | # 1. Read and validate parameters
|
---|
[27686] | 44 | print 'Options: ' . join(' ', @ARGV) . "\n";
|
---|
[27644] | 45 | if (defined $ARGV[0] && $ARGV[0] =~ /^[a-z0-9]+$/i)
|
---|
[26949] | 46 | {
|
---|
| 47 | $collection = $ARGV[0];
|
---|
[27686] | 48 | print ' collection: ' . $collection . "\n";
|
---|
[26949] | 49 | }
|
---|
[27414] | 50 | else
|
---|
| 51 | {
|
---|
[30354] | 52 | print STDERR "usage: hadoop_import.pl <collection> [-debug] [-enable_thrift] [-dry_run] [-start_thrift] [-refresh_import] [-flush_diskcache] [-use_nfs] [-stagger] [-removeold]\n";
|
---|
[27654] | 53 | print STDERR "where: [debug] print more debug messages to STDERR\n";
|
---|
| 54 | print STDERR " [dry_run] don't actually perform an file actions\n";
|
---|
[27644] | 55 | exit;
|
---|
[27414] | 56 | }
|
---|
| 57 | my $offset = 1;
|
---|
| 58 | while (defined $ARGV[$offset])
|
---|
| 59 | {
|
---|
| 60 | if ($ARGV[$offset] eq '-debug')
|
---|
| 61 | {
|
---|
| 62 | $debug = 1;
|
---|
| 63 | }
|
---|
[30354] | 64 | if ($ARGV[$offset] eq '-enable_thrift')
|
---|
[27414] | 65 | {
|
---|
[30354] | 66 | $use_thrift = 1;
|
---|
[27414] | 67 | }
|
---|
| 68 | if ($ARGV[$offset] eq '-dry_run')
|
---|
| 69 | {
|
---|
| 70 | $dry_run = 1;
|
---|
| 71 | }
|
---|
| 72 | if ($ARGV[$offset] eq '-refresh_import')
|
---|
| 73 | {
|
---|
| 74 | $refresh_import = 1;
|
---|
| 75 | }
|
---|
[27654] | 76 | if ($ARGV[$offset] eq '-stagger')
|
---|
| 77 | {
|
---|
| 78 | $stagger = 1;
|
---|
| 79 | }
|
---|
[27644] | 80 | if ($ARGV[$offset] eq '-flush_diskcache')
|
---|
| 81 | {
|
---|
| 82 | $flush_diskcache = 1;
|
---|
| 83 | }
|
---|
| 84 | if ($ARGV[$offset] eq '-start_thrift')
|
---|
| 85 | {
|
---|
| 86 | $start_thrift = 1;
|
---|
| 87 | }
|
---|
| 88 | if ($ARGV[$offset] eq '-use_nfs')
|
---|
| 89 | {
|
---|
| 90 | $use_nfs = 1;
|
---|
| 91 | }
|
---|
[30354] | 92 | if ($ARGV[$offset] eq '-removeold')
|
---|
| 93 | {
|
---|
| 94 | $remove_old = 1;
|
---|
| 95 | }
|
---|
[28015] | 96 | if ($ARGV[$offset] eq '-logdir')
|
---|
| 97 | {
|
---|
| 98 | $offset++;
|
---|
| 99 | $gs_results_dir = $ARGV[$offset];
|
---|
| 100 | }
|
---|
[27414] | 101 | $offset++;
|
---|
| 102 | }
|
---|
| 103 |
|
---|
| 104 | if (!$use_thrift)
|
---|
| 105 | {
|
---|
| 106 | $hdfs_fs_prefix = 'HDFSShell://';
|
---|
| 107 | }
|
---|
[27644] | 108 | if ($use_nfs)
|
---|
| 109 | {
|
---|
| 110 | $hdfs_fs_prefix = '/hdfs';
|
---|
| 111 | }
|
---|
[27414] | 112 |
|
---|
[26949] | 113 | my $gs_collection_dir = $gsdl_home . '/collect/' . $collection;
|
---|
| 114 | my $gs_import_dir = $gs_collection_dir . '/import';
|
---|
| 115 | if (!-d $gs_import_dir)
|
---|
| 116 | {
|
---|
| 117 | die("Error! Collection's import directory cannot be found: " . $gs_import_dir . "\n");
|
---|
| 118 | }
|
---|
[28015] | 119 | if ($gs_results_dir eq '')
|
---|
[26949] | 120 | {
|
---|
[28015] | 121 | $gs_results_dir = $gs_collection_dir . '/results';
|
---|
| 122 | if (!-d $gs_results_dir)
|
---|
| 123 | {
|
---|
| 124 | mkdir($gs_results_dir, 0755);
|
---|
| 125 | }
|
---|
| 126 | $gs_results_dir .= '/' . time();
|
---|
[26949] | 127 | }
|
---|
| 128 | if (!-d $gs_results_dir)
|
---|
| 129 | {
|
---|
| 130 | mkdir($gs_results_dir, 0755);
|
---|
| 131 | }
|
---|
| 132 | # - directories within HDFS
|
---|
[27644] | 133 | my $hdfs_input_dir = &urlCat('hdfs://' . $ENV{'HDFSHOST'} . ':' . $ENV{'HDFSPORT'}, 'user', $username, 'gsdl', 'collect', $collection, 'import');
|
---|
| 134 | print "HDFS Import Directory: " . $hdfs_input_dir . "\n";
|
---|
| 135 | my $nfs_input_dir = &urlCat('/hdfs', 'user', $username, 'gsdl', 'collect', $collection, 'import');
|
---|
| 136 | if ($use_nfs)
|
---|
| 137 | {
|
---|
| 138 | print "=> NFS Import Directory: " . $nfs_input_dir . "\n";
|
---|
| 139 | }
|
---|
| 140 | my $hdfs_output_dir = &urlCat('hdfs://' . $ENV{'HDFSHOST'} . ':' . $ENV{'HDFSPORT'}, 'user', $username, 'gsdl', 'collect', $collection, 'archives');
|
---|
| 141 | print "HDFS Archives Directory: " . $hdfs_output_dir . "\n";
|
---|
| 142 | my $nfs_output_dir = &urlCat('/hdfs', 'user', $username, 'gsdl', 'collect', $collection, 'archives');
|
---|
| 143 | if ($use_nfs)
|
---|
| 144 | {
|
---|
| 145 | print "=> NFS Archives Directory: " . $nfs_output_dir . "\n";
|
---|
| 146 | }
|
---|
[26949] | 147 |
|
---|
| 148 | # 2. Copy the import directory into HDFS
|
---|
| 149 | print " * Replicating import directory in HDFS...";
|
---|
| 150 | # - check if import directory already exists
|
---|
[27644] | 151 | my $hdfs_import_exists = 0;
|
---|
| 152 | if ($use_nfs)
|
---|
| 153 | {
|
---|
| 154 | if (-d $nfs_input_dir)
|
---|
| 155 | {
|
---|
| 156 | $hdfs_import_exists = 1;
|
---|
| 157 | }
|
---|
| 158 | }
|
---|
| 159 | else
|
---|
| 160 | {
|
---|
| 161 | $hdfs_import_exists = &hdfsTest('d', 0, $hdfs_input_dir);
|
---|
| 162 | }
|
---|
[26949] | 163 | if ($refresh_import || !$hdfs_import_exists)
|
---|
| 164 | {
|
---|
| 165 | # - clear out the old import directory
|
---|
| 166 | if ($hdfs_import_exists)
|
---|
| 167 | {
|
---|
[27644] | 168 | if ($use_nfs)
|
---|
| 169 | {
|
---|
| 170 | &recursiveDelete($nfs_input_dir, '/hdfs');
|
---|
| 171 | }
|
---|
| 172 | else
|
---|
| 173 | {
|
---|
| 174 | &hdfsCommand('rmr', $hdfs_input_dir);
|
---|
| 175 | }
|
---|
[26949] | 176 | }
|
---|
| 177 | # - now recursively copy the contents of import directory into HDFS ensuring
|
---|
| 178 | # that relative paths are maintained
|
---|
[27644] | 179 | my $file_count = 0;
|
---|
| 180 | if ($use_nfs)
|
---|
| 181 | {
|
---|
| 182 | $file_count = &recursiveCopy($gs_import_dir, $nfs_input_dir);
|
---|
| 183 | }
|
---|
| 184 | else
|
---|
| 185 | {
|
---|
| 186 | $file_count = &recursiveCopy($gs_import_dir, $hdfs_input_dir);
|
---|
| 187 | }
|
---|
[26949] | 188 | &debugPrint($file_count . " files 'putted'");
|
---|
| 189 | print "Done!\n";
|
---|
| 190 | }
|
---|
| 191 | else
|
---|
| 192 | {
|
---|
| 193 | print "Already exists!\n";
|
---|
| 194 | }
|
---|
[27644] | 195 |
|
---|
[26949] | 196 | # - clear out the archives regardless
|
---|
[27530] | 197 | my $gs_archives_dir = $gs_collection_dir . '/archives';
|
---|
| 198 | my $deleted_archives = 0;
|
---|
[27001] | 199 | if (-e $gs_archives_dir)
|
---|
| 200 | {
|
---|
[27530] | 201 | print " * Clearing existing archives directory for this collection... ";
|
---|
[27644] | 202 | &recursiveDelete($gs_archives_dir, $gsdl_home);
|
---|
[27530] | 203 | $deleted_archives = 1;
|
---|
[27001] | 204 | }
|
---|
| 205 | mkdir($gs_archives_dir, 0755);
|
---|
[27644] | 206 | my $hdfs_archives_exists = 0;
|
---|
| 207 | if ($use_nfs)
|
---|
[26949] | 208 | {
|
---|
[27644] | 209 | if (-d $nfs_output_dir)
|
---|
| 210 | {
|
---|
| 211 | $hdfs_archives_exists = 1;
|
---|
| 212 | }
|
---|
| 213 | }
|
---|
| 214 | else
|
---|
| 215 | {
|
---|
| 216 | $hdfs_archives_exists = &hdfsTest('d', 0, $hdfs_output_dir)
|
---|
| 217 | }
|
---|
| 218 | if ($hdfs_archives_exists)
|
---|
| 219 | {
|
---|
[27530] | 220 | if (!$deleted_archives)
|
---|
| 221 | {
|
---|
| 222 | print " * Clearing existing archives directory for this collection... ";
|
---|
| 223 | }
|
---|
[27644] | 224 | if ($use_nfs)
|
---|
| 225 | {
|
---|
| 226 | &recursiveDelete($nfs_output_dir, '/hdfs');
|
---|
| 227 | }
|
---|
| 228 | else
|
---|
| 229 | {
|
---|
| 230 | &hdfsCommand('rmr', $hdfs_output_dir);
|
---|
| 231 | }
|
---|
[27530] | 232 | $deleted_archives = 1;
|
---|
[26949] | 233 | }
|
---|
[27530] | 234 | if ($deleted_archives)
|
---|
| 235 | {
|
---|
| 236 | print "Done!\n";
|
---|
| 237 | }
|
---|
[27644] | 238 |
|
---|
[27530] | 239 | # - watch for cached directories for Media based collections
|
---|
| 240 | my $gs_cached_dir = $gs_collection_dir . '/cached';
|
---|
| 241 | if (-e $gs_cached_dir)
|
---|
| 242 | {
|
---|
| 243 | print " * Clearing existing cached media directory for this collection... ";
|
---|
[27644] | 244 | &recursiveDelete($gs_cached_dir, $gsdl_home);
|
---|
[27530] | 245 | print "Done!\n";
|
---|
| 246 | }
|
---|
| 247 |
|
---|
[27001] | 248 | # - clear out any old logs
|
---|
[27530] | 249 | print " * Clearing existing logs for this collection... ";
|
---|
| 250 | my $gs_logs_dir = $gs_collection_dir . '/logs';
|
---|
| 251 | if (!&dirIsEmpty($gs_logs_dir))
|
---|
| 252 | {
|
---|
[27644] | 253 | &recursiveDelete($gs_logs_dir . '/*.*', $gsdl_home);
|
---|
[27530] | 254 | }
|
---|
[27001] | 255 | if (!&dirIsEmpty('/tmp/greenstone'))
|
---|
| 256 | {
|
---|
[27584] | 257 | &shellCommand('rm -f /tmp/greenstone/*.*');
|
---|
| 258 | &shellCommand('rm -rf /tmp/gsimport*');
|
---|
[27644] | 259 | &shellCommand('rm -rf /tmp/thrift');
|
---|
[27001] | 260 | }
|
---|
| 261 | if ($is_rocks_cluster)
|
---|
| 262 | {
|
---|
[27584] | 263 | &shellCommand('rocks run host "rm -f /tmp/greenstone/*.*"');
|
---|
| 264 | &shellCommand('rocks run host "rm -rf /tmp/gsimport*"');
|
---|
[27644] | 265 | &shellCommand('rocks run host "rm -rf /tmp/thrift"');
|
---|
[27001] | 266 | }
|
---|
[27126] | 267 | print "Done!\n";
|
---|
[26949] | 268 |
|
---|
[27126] | 269 | # - flush DNS cache too, so we are playing on a level field
|
---|
[27644] | 270 | if ($flush_diskcache)
|
---|
[27414] | 271 | {
|
---|
[27644] | 272 | print " * Flushing disk cache... ";
|
---|
| 273 | &shellCommand('flush_caches.pl');
|
---|
| 274 | if ($is_rocks_cluster)
|
---|
| 275 | {
|
---|
| 276 | &shellCommand('rocks run host "flush_caches.pl"');
|
---|
| 277 | }
|
---|
| 278 | print "Done!\n";
|
---|
[27414] | 279 | }
|
---|
[27126] | 280 |
|
---|
[27654] | 281 | # - If we've been asked to Stagger start-up, add "delay.me" files to the
|
---|
| 282 | # compute nodes
|
---|
| 283 | if ($is_rocks_cluster && $stagger)
|
---|
| 284 | {
|
---|
| 285 | &shellCommand('rocks run host "touch /tmp/greenstone/delay.me && chmod a+rw /tmp/greenstone/delay.me"');
|
---|
| 286 | }
|
---|
| 287 |
|
---|
[26949] | 288 | # 3. Special case for *Server type infodbs (namely TDBServer and GDBMServer)
|
---|
| 289 | # where we start the server now to ensure it lives on the head node
|
---|
[27001] | 290 | my $server_host = '';
|
---|
| 291 | my $server_port = '';
|
---|
[26949] | 292 | my $configuration_path = $gs_collection_dir . '/etc/collect.cfg';
|
---|
| 293 | my $infodbtype = `grep -P "^infodbtype" $configuration_path`;
|
---|
| 294 | my $server_prefix = '';
|
---|
| 295 | if ($infodbtype =~ /^infodbtype\s+(gdbm|tdb)server/i)
|
---|
| 296 | {
|
---|
| 297 | $server_prefix = uc($1);
|
---|
| 298 | print " * Starting " . $server_prefix . "Server... ";
|
---|
| 299 | # - start the server on the head node and retrieve the host and port from
|
---|
| 300 | # the output
|
---|
| 301 | my $launcher_command = $server_prefix . "Server.pl " . $$ . " " . $collection;
|
---|
| 302 | my $launcher_output = &shellCommand($launcher_command);
|
---|
| 303 | if ($launcher_output =~ /Server now listening on ([^:]+):(\d+)/)
|
---|
| 304 | {
|
---|
| 305 | $server_host = $1;
|
---|
| 306 | $server_port = $2;
|
---|
| 307 | print "running on " . $server_host . ":" . $server_port . "\n";
|
---|
| 308 | }
|
---|
| 309 | else
|
---|
| 310 | {
|
---|
| 311 | print "Failed!\n";
|
---|
| 312 | exit;
|
---|
| 313 | }
|
---|
[27001] | 314 | # - use the client tool to add ourselves as a listener
|
---|
[26949] | 315 | print " * Registering as listener... ";
|
---|
| 316 | my $client_command = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#a:" . $$ . "\"";
|
---|
| 317 | &shellCommand($client_command);
|
---|
| 318 | print "Done!\n";
|
---|
| 319 | }
|
---|
[27913] | 320 | elsif ($infodbtype =~ /stdoutxml/)
|
---|
| 321 | {
|
---|
| 322 | print " * InfoDB messages will be written to STDOUT... Cool bananas!\n";
|
---|
| 323 | }
|
---|
[26949] | 324 | else
|
---|
| 325 | {
|
---|
| 326 | print "Error! True Hadoop processing is only available when Greenstone is\n";
|
---|
| 327 | print " configured to use either GDBMServer or TDBServer.\n";
|
---|
| 328 | exit;
|
---|
| 329 | }
|
---|
| 330 |
|
---|
[27594] | 331 | # 3.5 Start up the thrift server(s) if we've been asked to
|
---|
[27654] | 332 | my $thrift_log = $gs_results_dir . '/thriftctl.log';
|
---|
[27594] | 333 | if ($start_thrift)
|
---|
| 334 | {
|
---|
| 335 | if ($is_rocks_cluster)
|
---|
| 336 | {
|
---|
| 337 | print " * Starting Thrift Servers (on compute nodes)... ";
|
---|
[27654] | 338 | &shellCommand('rocks run host "cd ' . $ENV{'GEXTPARALLELBUILDING'} . '/packages/ThriftFS-0.9.0/bin && ./thriftctl.sh start" > "' . $thrift_log . '" 2>&1');
|
---|
[27594] | 339 | }
|
---|
| 340 | # single server
|
---|
| 341 | else
|
---|
| 342 | {
|
---|
| 343 | print " * Starting Thrift Server... ";
|
---|
[27654] | 344 | &shellCommand('cd ' . $ENV{'GEXTPARALLELBUILDING'} . '/packages/ThriftFS-0.9.0/bin && thriftctl.sh start > "' . $thrift_log . '" 2>&1');
|
---|
[27594] | 345 | }
|
---|
| 346 | print "Done!\n";
|
---|
| 347 | }
|
---|
| 348 |
|
---|
[27644] | 349 | my $actual_archives_dir;
|
---|
| 350 | if ($use_nfs)
|
---|
| 351 | {
|
---|
| 352 | $actual_archives_dir = $nfs_output_dir;
|
---|
| 353 | }
|
---|
| 354 | else
|
---|
| 355 | {
|
---|
| 356 | $actual_archives_dir = $hdfs_output_dir;
|
---|
| 357 | $actual_archives_dir =~ s/hdfs:\/\//$hdfs_fs_prefix/;
|
---|
| 358 | }
|
---|
| 359 |
|
---|
[26949] | 360 | # 4. Running Hadoop - we hand in the import dirctory (within HDFS) as the input
|
---|
| 361 | # and allow the FileInputFormat to split it up into files to be processed
|
---|
| 362 | # in Greenstone. This works for collections with one file per document, like
|
---|
| 363 | # Lorem and ReplayMe, but might not work well with multiple file documents
|
---|
| 364 | # such as the Demo collection
|
---|
| 365 | print " * Running import using Hadoop...";
|
---|
| 366 | my $hadoop_log = $gs_results_dir . '/hadoop.log';
|
---|
[27644] | 367 | &shellCommand('echo "host:' . $ENV{'HDFSHOST'} . '" > ' . $hadoop_log);
|
---|
[27913] | 368 | my $hadoop_command = $hadoop_exe . ' jar ' . $gsdl_hadoop_ext . '/lib/hadoop-greenstone.jar org.nzdl.gsdl.' . $java_library . ' ';
|
---|
[27644] | 369 | $hadoop_command .= '"' . $gsdl_home . '" '; # Greenstone's home dir
|
---|
| 370 | $hadoop_command .= '"' . $ENV{'HADOOP_PREFIX'} . '" '; # Hadoop's home dir
|
---|
| 371 | $hadoop_command .= $collection . ' '; # The collection name
|
---|
| 372 | $hadoop_command .= '"' . $actual_archives_dir . '" '; # Collection archive dir
|
---|
| 373 | $hadoop_command .= '"' . $hdfs_fs_prefix . '" '; # Prefix for talking to HDFS (driver)
|
---|
| 374 | $hadoop_command .= '"' . $hdfs_input_dir . '" '; # HDFS in
|
---|
| 375 | $hadoop_command .= '"' . $hdfs_output_dir . '" '; # HDFS out
|
---|
| 376 | $hadoop_command .= ' >> ' . $hadoop_log . ' 2>&1'; # Redirect to log
|
---|
[26949] | 377 | &shellCommand($hadoop_command);
|
---|
| 378 | print "Done!\n";
|
---|
| 379 |
|
---|
| 380 | # 5. If we ran *Server infodbs, we now need to shut them down
|
---|
| 381 | if ($server_prefix ne '')
|
---|
| 382 | {
|
---|
| 383 | print " * Deregistering as listener and shutting down... ";
|
---|
| 384 | # - deregister as a listener
|
---|
| 385 | my $client_command1 = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#r:" . $$ . "\"";
|
---|
| 386 | &shellCommand($client_command1);
|
---|
| 387 | # - send quit command
|
---|
| 388 | my $client_command2 = $server_prefix . "Client.pl " . $server_host . " " . $server_port . " \"#q:" . $$ . "\"";
|
---|
| 389 | &shellCommand($client_command2);
|
---|
| 390 | print "Done!\n";
|
---|
| 391 | }
|
---|
| 392 |
|
---|
[27594] | 393 | # 5.5 We started them - so we better stop the thrift servers too
|
---|
| 394 | # 3.5 Start up the thrift server(s) if we've been asked to
|
---|
| 395 | if ($start_thrift)
|
---|
| 396 | {
|
---|
| 397 | if ($is_rocks_cluster)
|
---|
| 398 | {
|
---|
| 399 | print " * Stopping Thrift Servers (on compute nodes)... ";
|
---|
[27654] | 400 | &shellCommand('rocks run host "cd ' . $ENV{'GEXTPARALLELBUILDING'} . '/packages/ThriftFS-0.9.0/bin && ./thriftctl.sh stop" >> "' . $thrift_log . '" 2>&1 ');
|
---|
[27594] | 401 | }
|
---|
| 402 | # single server
|
---|
| 403 | else
|
---|
| 404 | {
|
---|
| 405 | print " * Stoping Thrift Server... ";
|
---|
[27654] | 406 | &shellCommand('cd ' . $ENV{'GEXTPARALLELBUILDING'} . '/packages/ThriftFS-0.9.0/bin && thriftctl.sh start >> "' . $thrift_log . '" 2>&1');
|
---|
[27594] | 407 | }
|
---|
| 408 | print "Done!\n";
|
---|
| 409 | }
|
---|
[26949] | 410 |
|
---|
| 411 | # 6. Gather logs
|
---|
| 412 | print " * Gathering logs from compute nodes... ";
|
---|
| 413 | # - local files
|
---|
[27001] | 414 | if (!&dirIsEmpty('/tmp/greenstone'))
|
---|
| 415 | {
|
---|
| 416 | &shellCommand('cp /tmp/greenstone/*.* ' . $gs_results_dir);
|
---|
| 417 | }
|
---|
[28015] | 418 | if (-d $gs_collection_dir . '/logs' && !&dirIsEmpty($gs_collection_dir . '/logs'))
|
---|
[26949] | 419 | {
|
---|
| 420 | &shellCommand('cp ' . $gs_collection_dir . '/logs/*.* ' . $gs_results_dir);
|
---|
| 421 | }
|
---|
[27654] | 422 | if ($start_thrift && -d '/tmp/thrift')
|
---|
| 423 | {
|
---|
| 424 | &shellCommand('cp /tmp/thrift/*.log ' . $gs_results_dir);
|
---|
| 425 | }
|
---|
[26949] | 426 | # - remote files
|
---|
[27001] | 427 | if ($is_rocks_cluster)
|
---|
[26949] | 428 | {
|
---|
[27001] | 429 | &shellCommand('rocks run host "scp /tmp/greenstone/*.* ' . $cluster_head . ':' . $gs_results_dir . '"');
|
---|
[27654] | 430 | &shellCommand('rocks run host "scp /tmp/gsimport-*/logs/*.log ' . $cluster_head . ':' . $gs_results_dir . '"');
|
---|
| 431 | if ($start_thrift)
|
---|
| 432 | {
|
---|
| 433 | &shellCommand('rocks run host "scp /tmp/thrift/*.log ' . $cluster_head . ':' . $gs_results_dir . '"');
|
---|
| 434 | }
|
---|
[26949] | 435 | }
|
---|
| 436 | print "Done!\n";
|
---|
[27654] | 437 |
|
---|
| 438 | # - generate data locality report...
|
---|
[27732] | 439 | if (!$use_nfs && !$use_thrift)
|
---|
| 440 | {
|
---|
| 441 | &shellCommand('parse_task_info_from_hadoop_log.pl "' . $gs_results_dir . '"');
|
---|
| 442 | }
|
---|
[26949] | 443 |
|
---|
[27654] | 444 | # - hadoop report...
|
---|
| 445 | &shellCommand('hadoop_report.pl "' . $gs_results_dir . '"');
|
---|
| 446 |
|
---|
| 447 | # - and gantt chart
|
---|
| 448 | &shellCommand('generate_gantt.pl -width 1600 "' . $gs_results_dir . '"');
|
---|
| 449 |
|
---|
[26949] | 450 | # 7. Done - clean up
|
---|
| 451 | print " * Cleaning up temporary files... ";
|
---|
| 452 | &shellCommand('rm -rf /tmp/greenstone');
|
---|
[27126] | 453 | &shellCommand('rm -rf /tmp/gsimport*');
|
---|
[27001] | 454 | if ($is_rocks_cluster)
|
---|
[26949] | 455 | {
|
---|
| 456 | &shellCommand('rocks run host "rm -rf /tmp/greenstone"');
|
---|
[27126] | 457 | &shellCommand('rocks run host "rm -rf /tmp/gsimport*"');
|
---|
[26949] | 458 | }
|
---|
| 459 | print "Done!\n";
|
---|
| 460 | print "Complete!\n\n";
|
---|
| 461 |
|
---|
| 462 | exit;
|
---|
| 463 |
|
---|
| 464 | # /** @function debugPrint
|
---|
| 465 | # */
|
---|
| 466 | sub debugPrint
|
---|
| 467 | {
|
---|
| 468 | my $msg = shift(@_);
|
---|
| 469 | if ($debug)
|
---|
| 470 | {
|
---|
| 471 | print "[Debug] " . $msg . "\n";
|
---|
| 472 | }
|
---|
| 473 | }
|
---|
| 474 | # /** debugPrint() **/
|
---|
| 475 |
|
---|
| 476 | # /** @function hdfsCommand
|
---|
| 477 | # */
|
---|
| 478 | sub hdfsCommand
|
---|
| 479 | {
|
---|
| 480 | my $command = shift(@_);
|
---|
| 481 | my $paths = '"' . join('" "', @_) . '"';
|
---|
| 482 | my $hdfs_command = $hadoop_exe . ' fs -' . $command . ' ' . $paths . ' 2>&1';
|
---|
| 483 | &shellCommand($hdfs_command);
|
---|
| 484 | return $?;
|
---|
| 485 | }
|
---|
| 486 | # /** hdfsCommand() **/
|
---|
| 487 |
|
---|
| 488 | # /** @function hdfsTest
|
---|
| 489 | # */
|
---|
| 490 | sub hdfsTest
|
---|
| 491 | {
|
---|
| 492 | my $command = shift(@_);
|
---|
| 493 | my $test_target = shift(@_);
|
---|
| 494 | my $result = &hdfsCommand('test -' . $command, @_);
|
---|
| 495 | return ($result == $test_target);
|
---|
| 496 | }
|
---|
| 497 | # /** hdfsTest() **/
|
---|
| 498 |
|
---|
| 499 | # /**
|
---|
| 500 | # */
|
---|
| 501 | sub printUsage
|
---|
| 502 | {
|
---|
| 503 | print "usage: hadoop_import.pl <collection> [<refresh_import>] [<\"removeold\"|\"keepold\">]\n";
|
---|
| 504 | exit;
|
---|
| 505 | }
|
---|
| 506 | # /** printUsage() **/
|
---|
| 507 |
|
---|
[27644] | 508 |
|
---|
| 509 | ## @function recursiveCopy()
|
---|
| 510 | #
|
---|
[26949] | 511 | sub recursiveCopy
|
---|
| 512 | {
|
---|
| 513 | my ($src_dir, $hdfs_dir) = @_;
|
---|
| 514 | my $file_count = 0;
|
---|
| 515 | # - create the directory in HDFS
|
---|
[27644] | 516 | if ($use_nfs)
|
---|
| 517 | {
|
---|
| 518 | &shellCommand('mkdir "' . $hdfs_dir . '"');
|
---|
| 519 | }
|
---|
| 520 | else
|
---|
| 521 | {
|
---|
| 522 | &hdfsCommand('mkdir', $hdfs_dir);
|
---|
| 523 | }
|
---|
[26949] | 524 | # - search $src_dir for files
|
---|
| 525 | opendir(DH, $src_dir) or die("Error! Cannot open directory for reading: " . $src_dir);
|
---|
| 526 | my @files = readdir(DH);
|
---|
| 527 | closedir(DH);
|
---|
| 528 | foreach my $file (@files)
|
---|
| 529 | {
|
---|
| 530 | # - skip dot prefix files
|
---|
| 531 | if ($file !~ /^\./)
|
---|
| 532 | {
|
---|
| 533 | my $src_path = $src_dir . '/' . $file;
|
---|
| 534 | # - recurse directories, remembering to extend HDFS dir too
|
---|
| 535 | if (-d $src_path)
|
---|
| 536 | {
|
---|
| 537 | my $new_hdfs_dir = $hdfs_dir . '/' . $file;
|
---|
| 538 | $file_count += &recursiveCopy($src_path, $new_hdfs_dir);
|
---|
| 539 | }
|
---|
| 540 | # - and use 'put' to copy files
|
---|
| 541 | else
|
---|
| 542 | {
|
---|
| 543 | my $hdfs_path = $hdfs_dir . '/' . $file;
|
---|
[27644] | 544 | if ($use_nfs)
|
---|
| 545 | {
|
---|
[27732] | 546 | &shellCommand('nice -n 5 cp "' . $src_path . '" "' . $hdfs_path . '"');
|
---|
[27644] | 547 | }
|
---|
| 548 | else
|
---|
| 549 | {
|
---|
| 550 | &hdfsCommand('put', $src_path, $hdfs_path);
|
---|
| 551 | }
|
---|
[26949] | 552 | $file_count++;
|
---|
| 553 | }
|
---|
| 554 | }
|
---|
| 555 | }
|
---|
| 556 | return $file_count;
|
---|
| 557 | }
|
---|
[27644] | 558 | ## recursiveCopy() ##
|
---|
[26949] | 559 |
|
---|
[27644] | 560 |
|
---|
[26949] | 561 | # /** @function shellCommand
|
---|
| 562 | # */
|
---|
| 563 | sub shellCommand
|
---|
| 564 | {
|
---|
| 565 | my $cmd = shift(@_);
|
---|
| 566 | my $output = '';
|
---|
| 567 | &debugPrint($cmd);
|
---|
| 568 | if (!$dry_run)
|
---|
| 569 | {
|
---|
| 570 | $output = `$cmd`;
|
---|
| 571 | }
|
---|
| 572 | return $output;
|
---|
| 573 | }
|
---|
| 574 | # /** shellCommand() **/
|
---|
| 575 |
|
---|
| 576 | # /** @function urlCat
|
---|
| 577 | # */
|
---|
| 578 | sub urlCat
|
---|
| 579 | {
|
---|
| 580 | my $url = join('/', @_);
|
---|
| 581 | return $url;
|
---|
| 582 | }
|
---|
| 583 | # /** urlCat() **/
|
---|
[27001] | 584 |
|
---|
| 585 | # /**
|
---|
| 586 | # */
|
---|
| 587 | sub dirIsEmpty
|
---|
| 588 | {
|
---|
| 589 | my $dir = shift(@_);
|
---|
| 590 | my @files;
|
---|
| 591 | if (-e $dir)
|
---|
| 592 | {
|
---|
| 593 | opendir(DIR, $dir) or die $!;
|
---|
| 594 | @files = grep { !m/\A\.{1,2}\Z/} readdir(DIR);
|
---|
| 595 | closedir(DIR);
|
---|
| 596 | }
|
---|
| 597 | @files ? 0 : 1;
|
---|
| 598 | }
|
---|
| 599 | # /** dirIsEmpty() **/
|
---|
[27644] | 600 |
|
---|
| 601 |
|
---|
| 602 | ## @function recursiveDelete()
|
---|
| 603 | #
|
---|
| 604 | sub recursiveDelete
|
---|
| 605 | {
|
---|
| 606 | my ($dir, $prefix) = @_;
|
---|
| 607 | if ($dir =~ /^$prefix/)
|
---|
| 608 | {
|
---|
| 609 | &shellCommand('rm -rf "' . $dir . '"');
|
---|
| 610 | }
|
---|
| 611 | }
|
---|
| 612 | ## recursiveDelete() ##
|
---|