1 | # -*- Mode: cperl; cperl-indent-level: 4 -*-
|
---|
2 |
|
---|
3 | package Test::Harness;
|
---|
4 |
|
---|
5 | require 5.00405;
|
---|
6 | use Test::Harness::Straps;
|
---|
7 | use Test::Harness::Assert;
|
---|
8 | use Exporter;
|
---|
9 | use Benchmark;
|
---|
10 | use Config;
|
---|
11 | use strict;
|
---|
12 |
|
---|
13 |
|
---|
14 | use vars qw(
|
---|
15 | $VERSION
|
---|
16 | @ISA @EXPORT @EXPORT_OK
|
---|
17 | $Verbose $Switches $Debug
|
---|
18 | $verbose $switches $debug
|
---|
19 | $Curtest
|
---|
20 | $Columns
|
---|
21 | $Timer
|
---|
22 | $ML $Last_ML_Print
|
---|
23 | $Strap
|
---|
24 | $has_time_hires
|
---|
25 | );
|
---|
26 |
|
---|
27 | BEGIN {
|
---|
28 | eval "use Time::HiRes 'time'";
|
---|
29 | $has_time_hires = !$@;
|
---|
30 | }
|
---|
31 |
|
---|
32 | =head1 NAME
|
---|
33 |
|
---|
34 | Test::Harness - Run Perl standard test scripts with statistics
|
---|
35 |
|
---|
36 | =head1 VERSION
|
---|
37 |
|
---|
38 | Version 2.56
|
---|
39 |
|
---|
40 | =cut
|
---|
41 |
|
---|
42 | $VERSION = "2.56";
|
---|
43 |
|
---|
44 | # Backwards compatibility for exportable variable names.
|
---|
45 | *verbose = *Verbose;
|
---|
46 | *switches = *Switches;
|
---|
47 | *debug = *Debug;
|
---|
48 |
|
---|
49 | $ENV{HARNESS_ACTIVE} = 1;
|
---|
50 | $ENV{HARNESS_VERSION} = $VERSION;
|
---|
51 |
|
---|
52 | END {
|
---|
53 | # For VMS.
|
---|
54 | delete $ENV{HARNESS_ACTIVE};
|
---|
55 | delete $ENV{HARNESS_VERSION};
|
---|
56 | }
|
---|
57 |
|
---|
58 | # Some experimental versions of OS/2 build have broken $?
|
---|
59 | my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
|
---|
60 |
|
---|
61 | my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
|
---|
62 |
|
---|
63 | $Strap = Test::Harness::Straps->new;
|
---|
64 |
|
---|
65 | sub strap { return $Strap };
|
---|
66 |
|
---|
67 | @ISA = ('Exporter');
|
---|
68 | @EXPORT = qw(&runtests);
|
---|
69 | @EXPORT_OK = qw($verbose $switches);
|
---|
70 |
|
---|
71 | $Verbose = $ENV{HARNESS_VERBOSE} || 0;
|
---|
72 | $Debug = $ENV{HARNESS_DEBUG} || 0;
|
---|
73 | $Switches = "-w";
|
---|
74 | $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
|
---|
75 | $Columns--; # Some shells have trouble with a full line of text.
|
---|
76 | $Timer = $ENV{HARNESS_TIMER} || 0;
|
---|
77 |
|
---|
78 | =head1 SYNOPSIS
|
---|
79 |
|
---|
80 | use Test::Harness;
|
---|
81 |
|
---|
82 | runtests(@test_files);
|
---|
83 |
|
---|
84 | =head1 DESCRIPTION
|
---|
85 |
|
---|
86 | B<STOP!> If all you want to do is write a test script, consider
|
---|
87 | using Test::Simple. Test::Harness is the module that reads the
|
---|
88 | output from Test::Simple, Test::More and other modules based on
|
---|
89 | Test::Builder. You don't need to know about Test::Harness to use
|
---|
90 | those modules.
|
---|
91 |
|
---|
92 | Test::Harness runs tests and expects output from the test in a
|
---|
93 | certain format. That format is called TAP, the Test Anything
|
---|
94 | Protocol. It is defined in L<Test::Harness::TAP>.
|
---|
95 |
|
---|
96 | C<Test::Harness::runtests(@tests)> runs all the testscripts named
|
---|
97 | as arguments and checks standard output for the expected strings
|
---|
98 | in TAP format.
|
---|
99 |
|
---|
100 | The F<prove> utility is a thin wrapper around Test::Harness.
|
---|
101 |
|
---|
102 | =head2 Taint mode
|
---|
103 |
|
---|
104 | Test::Harness will honor the C<-T> or C<-t> in the #! line on your
|
---|
105 | test files. So if you begin a test with:
|
---|
106 |
|
---|
107 | #!perl -T
|
---|
108 |
|
---|
109 | the test will be run with taint mode on.
|
---|
110 |
|
---|
111 | =head2 Configuration variables.
|
---|
112 |
|
---|
113 | These variables can be used to configure the behavior of
|
---|
114 | Test::Harness. They are exported on request.
|
---|
115 |
|
---|
116 | =over 4
|
---|
117 |
|
---|
118 | =item C<$Test::Harness::Verbose>
|
---|
119 |
|
---|
120 | The package variable C<$Test::Harness::Verbose> is exportable and can be
|
---|
121 | used to let C<runtests()> display the standard output of the script
|
---|
122 | without altering the behavior otherwise. The F<prove> utility's C<-v>
|
---|
123 | flag will set this.
|
---|
124 |
|
---|
125 | =item C<$Test::Harness::switches>
|
---|
126 |
|
---|
127 | The package variable C<$Test::Harness::switches> is exportable and can be
|
---|
128 | used to set perl command line options used for running the test
|
---|
129 | script(s). The default value is C<-w>. It overrides C<HARNESS_SWITCHES>.
|
---|
130 |
|
---|
131 | =item C<$Test::Harness::Timer>
|
---|
132 |
|
---|
133 | If set to true, and C<Time::HiRes> is available, print elapsed seconds
|
---|
134 | after each test file.
|
---|
135 |
|
---|
136 | =back
|
---|
137 |
|
---|
138 |
|
---|
139 | =head2 Failure
|
---|
140 |
|
---|
141 | When tests fail, analyze the summary report:
|
---|
142 |
|
---|
143 | t/base..............ok
|
---|
144 | t/nonumbers.........ok
|
---|
145 | t/ok................ok
|
---|
146 | t/test-harness......ok
|
---|
147 | t/waterloo..........dubious
|
---|
148 | Test returned status 3 (wstat 768, 0x300)
|
---|
149 | DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
|
---|
150 | Failed 10/20 tests, 50.00% okay
|
---|
151 | Failed Test Stat Wstat Total Fail Failed List of Failed
|
---|
152 | -----------------------------------------------------------------------
|
---|
153 | t/waterloo.t 3 768 20 10 50.00% 1 3 5 7 9 11 13 15 17 19
|
---|
154 | Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
|
---|
155 |
|
---|
156 | Everything passed but F<t/waterloo.t>. It failed 10 of 20 tests and
|
---|
157 | exited with non-zero status indicating something dubious happened.
|
---|
158 |
|
---|
159 | The columns in the summary report mean:
|
---|
160 |
|
---|
161 | =over 4
|
---|
162 |
|
---|
163 | =item B<Failed Test>
|
---|
164 |
|
---|
165 | The test file which failed.
|
---|
166 |
|
---|
167 | =item B<Stat>
|
---|
168 |
|
---|
169 | If the test exited with non-zero, this is its exit status.
|
---|
170 |
|
---|
171 | =item B<Wstat>
|
---|
172 |
|
---|
173 | The wait status of the test.
|
---|
174 |
|
---|
175 | =item B<Total>
|
---|
176 |
|
---|
177 | Total number of tests expected to run.
|
---|
178 |
|
---|
179 | =item B<Fail>
|
---|
180 |
|
---|
181 | Number which failed, either from "not ok" or because they never ran.
|
---|
182 |
|
---|
183 | =item B<Failed>
|
---|
184 |
|
---|
185 | Percentage of the total tests which failed.
|
---|
186 |
|
---|
187 | =item B<List of Failed>
|
---|
188 |
|
---|
189 | A list of the tests which failed. Successive failures may be
|
---|
190 | abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
|
---|
191 | 20 failed).
|
---|
192 |
|
---|
193 | =back
|
---|
194 |
|
---|
195 |
|
---|
196 | =head2 Functions
|
---|
197 |
|
---|
198 | Test::Harness currently only has one function, here it is.
|
---|
199 |
|
---|
200 | =over 4
|
---|
201 |
|
---|
202 | =item B<runtests>
|
---|
203 |
|
---|
204 | my $allok = runtests(@test_files);
|
---|
205 |
|
---|
206 | This runs all the given I<@test_files> and divines whether they passed
|
---|
207 | or failed based on their output to STDOUT (details above). It prints
|
---|
208 | out each individual test which failed along with a summary report and
|
---|
209 | a how long it all took.
|
---|
210 |
|
---|
211 | It returns true if everything was ok. Otherwise it will C<die()> with
|
---|
212 | one of the messages in the DIAGNOSTICS section.
|
---|
213 |
|
---|
214 | =cut
|
---|
215 |
|
---|
216 | sub runtests {
|
---|
217 | my(@tests) = @_;
|
---|
218 |
|
---|
219 | local ($\, $,);
|
---|
220 |
|
---|
221 | my($tot, $failedtests) = _run_all_tests(@tests);
|
---|
222 | _show_results($tot, $failedtests);
|
---|
223 |
|
---|
224 | my $ok = _all_ok($tot);
|
---|
225 |
|
---|
226 | assert(($ok xor keys %$failedtests),
|
---|
227 | q{ok status jives with $failedtests});
|
---|
228 |
|
---|
229 | return $ok;
|
---|
230 | }
|
---|
231 |
|
---|
232 | =begin _private
|
---|
233 |
|
---|
234 | =item B<_all_ok>
|
---|
235 |
|
---|
236 | my $ok = _all_ok(\%tot);
|
---|
237 |
|
---|
238 | Tells you if this test run is overall successful or not.
|
---|
239 |
|
---|
240 | =cut
|
---|
241 |
|
---|
242 | sub _all_ok {
|
---|
243 | my($tot) = shift;
|
---|
244 |
|
---|
245 | return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
|
---|
246 | }
|
---|
247 |
|
---|
248 | =item B<_globdir>
|
---|
249 |
|
---|
250 | my @files = _globdir $dir;
|
---|
251 |
|
---|
252 | Returns all the files in a directory. This is shorthand for backwards
|
---|
253 | compatibility on systems where C<glob()> doesn't work right.
|
---|
254 |
|
---|
255 | =cut
|
---|
256 |
|
---|
257 | sub _globdir {
|
---|
258 | opendir DIRH, shift;
|
---|
259 | my @f = readdir DIRH;
|
---|
260 | closedir DIRH;
|
---|
261 |
|
---|
262 | return @f;
|
---|
263 | }
|
---|
264 |
|
---|
265 | =item B<_run_all_tests>
|
---|
266 |
|
---|
267 | my($total, $failed) = _run_all_tests(@test_files);
|
---|
268 |
|
---|
269 | Runs all the given C<@test_files> (as C<runtests()>) but does it
|
---|
270 | quietly (no report). $total is a hash ref summary of all the tests
|
---|
271 | run. Its keys and values are this:
|
---|
272 |
|
---|
273 | bonus Number of individual todo tests unexpectedly passed
|
---|
274 | max Number of individual tests ran
|
---|
275 | ok Number of individual tests passed
|
---|
276 | sub_skipped Number of individual tests skipped
|
---|
277 | todo Number of individual todo tests
|
---|
278 |
|
---|
279 | files Number of test files ran
|
---|
280 | good Number of test files passed
|
---|
281 | bad Number of test files failed
|
---|
282 | tests Number of test files originally given
|
---|
283 | skipped Number of test files skipped
|
---|
284 |
|
---|
285 | If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
|
---|
286 | got a successful test.
|
---|
287 |
|
---|
288 | $failed is a hash ref of all the test scripts which failed. Each key
|
---|
289 | is the name of a test script, each value is another hash representing
|
---|
290 | how that script failed. Its keys are these:
|
---|
291 |
|
---|
292 | name Name of the test which failed
|
---|
293 | estat Script's exit value
|
---|
294 | wstat Script's wait status
|
---|
295 | max Number of individual tests
|
---|
296 | failed Number which failed
|
---|
297 | percent Percentage of tests which failed
|
---|
298 | canon List of tests which failed (as string).
|
---|
299 |
|
---|
300 | C<$failed> should be empty if everything passed.
|
---|
301 |
|
---|
302 | B<NOTE> Currently this function is still noisy. I'm working on it.
|
---|
303 |
|
---|
304 | =cut
|
---|
305 |
|
---|
306 | # Turns on autoflush for the handle passed
|
---|
307 | sub _autoflush {
|
---|
308 | my $flushy_fh = shift;
|
---|
309 | my $old_fh = select $flushy_fh;
|
---|
310 | $| = 1;
|
---|
311 | select $old_fh;
|
---|
312 | }
|
---|
313 |
|
---|
314 | sub _run_all_tests {
|
---|
315 | my @tests = @_;
|
---|
316 |
|
---|
317 | _autoflush(\*STDOUT);
|
---|
318 | _autoflush(\*STDERR);
|
---|
319 |
|
---|
320 | my(%failedtests);
|
---|
321 |
|
---|
322 | # Test-wide totals.
|
---|
323 | my(%tot) = (
|
---|
324 | bonus => 0,
|
---|
325 | max => 0,
|
---|
326 | ok => 0,
|
---|
327 | files => 0,
|
---|
328 | bad => 0,
|
---|
329 | good => 0,
|
---|
330 | tests => scalar @tests,
|
---|
331 | sub_skipped => 0,
|
---|
332 | todo => 0,
|
---|
333 | skipped => 0,
|
---|
334 | bench => 0,
|
---|
335 | );
|
---|
336 |
|
---|
337 | my @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
|
---|
338 | my $run_start_time = new Benchmark;
|
---|
339 |
|
---|
340 | my $width = _leader_width(@tests);
|
---|
341 | foreach my $tfile (@tests) {
|
---|
342 | $Last_ML_Print = 0; # so each test prints at least once
|
---|
343 | my($leader, $ml) = _mk_leader($tfile, $width);
|
---|
344 | local $ML = $ml;
|
---|
345 |
|
---|
346 | print $leader;
|
---|
347 |
|
---|
348 | $tot{files}++;
|
---|
349 |
|
---|
350 | $Strap->{_seen_header} = 0;
|
---|
351 | if ( $Test::Harness::Debug ) {
|
---|
352 | print "# Running: ", $Strap->_command_line($tfile), "\n";
|
---|
353 | }
|
---|
354 | my $test_start_time = $Timer ? time : 0;
|
---|
355 | my %results = $Strap->analyze_file($tfile) or
|
---|
356 | do { warn $Strap->{error}, "\n"; next };
|
---|
357 | my $elapsed;
|
---|
358 | if ( $Timer ) {
|
---|
359 | $elapsed = time - $test_start_time;
|
---|
360 | if ( $has_time_hires ) {
|
---|
361 | $elapsed = sprintf( " %8.3fs", $elapsed );
|
---|
362 | }
|
---|
363 | else {
|
---|
364 | $elapsed = sprintf( " %8ss", $elapsed ? $elapsed : "<1" );
|
---|
365 | }
|
---|
366 | }
|
---|
367 | else {
|
---|
368 | $elapsed = "";
|
---|
369 | }
|
---|
370 |
|
---|
371 | # state of the current test.
|
---|
372 | my @failed = grep { !$results{details}[$_-1]{ok} }
|
---|
373 | 1..@{$results{details}};
|
---|
374 | my %test = (
|
---|
375 | ok => $results{ok},
|
---|
376 | 'next' => $Strap->{'next'},
|
---|
377 | max => $results{max},
|
---|
378 | failed => \@failed,
|
---|
379 | bonus => $results{bonus},
|
---|
380 | skipped => $results{skip},
|
---|
381 | skip_reason => $results{skip_reason},
|
---|
382 | skip_all => $Strap->{skip_all},
|
---|
383 | ml => $ml,
|
---|
384 | );
|
---|
385 |
|
---|
386 | $tot{bonus} += $results{bonus};
|
---|
387 | $tot{max} += $results{max};
|
---|
388 | $tot{ok} += $results{ok};
|
---|
389 | $tot{todo} += $results{todo};
|
---|
390 | $tot{sub_skipped} += $results{skip};
|
---|
391 |
|
---|
392 | my($estatus, $wstatus) = @results{qw(exit wait)};
|
---|
393 |
|
---|
394 | if ($results{passing}) {
|
---|
395 | # XXX Combine these first two
|
---|
396 | if ($test{max} and $test{skipped} + $test{bonus}) {
|
---|
397 | my @msg;
|
---|
398 | push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
|
---|
399 | if $test{skipped};
|
---|
400 | push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
|
---|
401 | if $test{bonus};
|
---|
402 | print "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
|
---|
403 | }
|
---|
404 | elsif ( $test{max} ) {
|
---|
405 | print "$test{ml}ok$elapsed\n";
|
---|
406 | }
|
---|
407 | elsif ( defined $test{skip_all} and length $test{skip_all} ) {
|
---|
408 | print "skipped\n all skipped: $test{skip_all}\n";
|
---|
409 | $tot{skipped}++;
|
---|
410 | }
|
---|
411 | else {
|
---|
412 | print "skipped\n all skipped: no reason given\n";
|
---|
413 | $tot{skipped}++;
|
---|
414 | }
|
---|
415 | $tot{good}++;
|
---|
416 | }
|
---|
417 | else {
|
---|
418 | # List unrun tests as failures.
|
---|
419 | if ($test{'next'} <= $test{max}) {
|
---|
420 | push @{$test{failed}}, $test{'next'}..$test{max};
|
---|
421 | }
|
---|
422 | # List overruns as failures.
|
---|
423 | else {
|
---|
424 | my $details = $results{details};
|
---|
425 | foreach my $overrun ($test{max}+1..@$details) {
|
---|
426 | next unless ref $details->[$overrun-1];
|
---|
427 | push @{$test{failed}}, $overrun
|
---|
428 | }
|
---|
429 | }
|
---|
430 |
|
---|
431 | if ($wstatus) {
|
---|
432 | $failedtests{$tfile} = _dubious_return(\%test, \%tot,
|
---|
433 | $estatus, $wstatus);
|
---|
434 | $failedtests{$tfile}{name} = $tfile;
|
---|
435 | }
|
---|
436 | elsif($results{seen}) {
|
---|
437 | if (@{$test{failed}} and $test{max}) {
|
---|
438 | my ($txt, $canon) = _canonfailed($test{max},$test{skipped},
|
---|
439 | @{$test{failed}});
|
---|
440 | print "$test{ml}$txt";
|
---|
441 | $failedtests{$tfile} = { canon => $canon,
|
---|
442 | max => $test{max},
|
---|
443 | failed => scalar @{$test{failed}},
|
---|
444 | name => $tfile,
|
---|
445 | percent => 100*(scalar @{$test{failed}})/$test{max},
|
---|
446 | estat => '',
|
---|
447 | wstat => '',
|
---|
448 | };
|
---|
449 | }
|
---|
450 | else {
|
---|
451 | print "Don't know which tests failed: got $test{ok} ok, ".
|
---|
452 | "expected $test{max}\n";
|
---|
453 | $failedtests{$tfile} = { canon => '??',
|
---|
454 | max => $test{max},
|
---|
455 | failed => '??',
|
---|
456 | name => $tfile,
|
---|
457 | percent => undef,
|
---|
458 | estat => '',
|
---|
459 | wstat => '',
|
---|
460 | };
|
---|
461 | }
|
---|
462 | $tot{bad}++;
|
---|
463 | }
|
---|
464 | else {
|
---|
465 | print "FAILED before any test output arrived\n";
|
---|
466 | $tot{bad}++;
|
---|
467 | $failedtests{$tfile} = { canon => '??',
|
---|
468 | max => '??',
|
---|
469 | failed => '??',
|
---|
470 | name => $tfile,
|
---|
471 | percent => undef,
|
---|
472 | estat => '',
|
---|
473 | wstat => '',
|
---|
474 | };
|
---|
475 | }
|
---|
476 | }
|
---|
477 |
|
---|
478 | if (defined $Files_In_Dir) {
|
---|
479 | my @new_dir_files = _globdir $Files_In_Dir;
|
---|
480 | if (@new_dir_files != @dir_files) {
|
---|
481 | my %f;
|
---|
482 | @f{@new_dir_files} = (1) x @new_dir_files;
|
---|
483 | delete @f{@dir_files};
|
---|
484 | my @f = sort keys %f;
|
---|
485 | print "LEAKED FILES: @f\n";
|
---|
486 | @dir_files = @new_dir_files;
|
---|
487 | }
|
---|
488 | }
|
---|
489 | } # foreach test
|
---|
490 | $tot{bench} = timediff(new Benchmark, $run_start_time);
|
---|
491 |
|
---|
492 | $Strap->_restore_PERL5LIB;
|
---|
493 |
|
---|
494 | return(\%tot, \%failedtests);
|
---|
495 | }
|
---|
496 |
|
---|
497 | =item B<_mk_leader>
|
---|
498 |
|
---|
499 | my($leader, $ml) = _mk_leader($test_file, $width);
|
---|
500 |
|
---|
501 | Generates the 't/foo........' leader for the given C<$test_file> as well
|
---|
502 | as a similar version which will overwrite the current line (by use of
|
---|
503 | \r and such). C<$ml> may be empty if Test::Harness doesn't think you're
|
---|
504 | on TTY.
|
---|
505 |
|
---|
506 | The C<$width> is the width of the "yada/blah.." string.
|
---|
507 |
|
---|
508 | =cut
|
---|
509 |
|
---|
510 | sub _mk_leader {
|
---|
511 | my($te, $width) = @_;
|
---|
512 | chomp($te);
|
---|
513 | $te =~ s/\.\w+$/./;
|
---|
514 |
|
---|
515 | if ($^O eq 'VMS') {
|
---|
516 | $te =~ s/^.*\.t\./\[.t./s;
|
---|
517 | }
|
---|
518 | my $leader = "$te" . '.' x ($width - length($te));
|
---|
519 | my $ml = "";
|
---|
520 |
|
---|
521 | if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) {
|
---|
522 | $ml = "\r" . (' ' x 77) . "\r$leader"
|
---|
523 | }
|
---|
524 |
|
---|
525 | return($leader, $ml);
|
---|
526 | }
|
---|
527 |
|
---|
528 | =item B<_leader_width>
|
---|
529 |
|
---|
530 | my($width) = _leader_width(@test_files);
|
---|
531 |
|
---|
532 | Calculates how wide the leader should be based on the length of the
|
---|
533 | longest test name.
|
---|
534 |
|
---|
535 | =cut
|
---|
536 |
|
---|
537 | sub _leader_width {
|
---|
538 | my $maxlen = 0;
|
---|
539 | my $maxsuflen = 0;
|
---|
540 | foreach (@_) {
|
---|
541 | my $suf = /\.(\w+)$/ ? $1 : '';
|
---|
542 | my $len = length;
|
---|
543 | my $suflen = length $suf;
|
---|
544 | $maxlen = $len if $len > $maxlen;
|
---|
545 | $maxsuflen = $suflen if $suflen > $maxsuflen;
|
---|
546 | }
|
---|
547 | # + 3 : we want three dots between the test name and the "ok"
|
---|
548 | return $maxlen + 3 - $maxsuflen;
|
---|
549 | }
|
---|
550 |
|
---|
551 |
|
---|
552 | sub _show_results {
|
---|
553 | my($tot, $failedtests) = @_;
|
---|
554 |
|
---|
555 | my $pct;
|
---|
556 | my $bonusmsg = _bonusmsg($tot);
|
---|
557 |
|
---|
558 | if (_all_ok($tot)) {
|
---|
559 | print "All tests successful$bonusmsg.\n";
|
---|
560 | }
|
---|
561 | elsif (!$tot->{tests}){
|
---|
562 | die "FAILED--no tests were run for some reason.\n";
|
---|
563 | }
|
---|
564 | elsif (!$tot->{max}) {
|
---|
565 | my $blurb = $tot->{tests}==1 ? "script" : "scripts";
|
---|
566 | die "FAILED--$tot->{tests} test $blurb could be run, ".
|
---|
567 | "alas--no output ever seen\n";
|
---|
568 | }
|
---|
569 | else {
|
---|
570 | $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
|
---|
571 | my $percent_ok = 100*$tot->{ok}/$tot->{max};
|
---|
572 | my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
|
---|
573 | $tot->{max} - $tot->{ok}, $tot->{max},
|
---|
574 | $percent_ok;
|
---|
575 |
|
---|
576 | my($fmt_top, $fmt) = _create_fmts($failedtests);
|
---|
577 |
|
---|
578 | # Now write to formats
|
---|
579 | for my $script (sort keys %$failedtests) {
|
---|
580 | $Curtest = $failedtests->{$script};
|
---|
581 | write;
|
---|
582 | }
|
---|
583 | if ($tot->{bad}) {
|
---|
584 | $bonusmsg =~ s/^,\s*//;
|
---|
585 | print "$bonusmsg.\n" if $bonusmsg;
|
---|
586 | die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
|
---|
587 | "$subpct\n";
|
---|
588 | }
|
---|
589 | }
|
---|
590 |
|
---|
591 | printf("Files=%d, Tests=%d, %s\n",
|
---|
592 | $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
|
---|
593 | }
|
---|
594 |
|
---|
595 |
|
---|
596 | my %Handlers = (
|
---|
597 | header => \&header_handler,
|
---|
598 | test => \&test_handler,
|
---|
599 | bailout => \&bailout_handler,
|
---|
600 | );
|
---|
601 |
|
---|
602 | $Strap->{callback} = \&strap_callback;
|
---|
603 | sub strap_callback {
|
---|
604 | my($self, $line, $type, $totals) = @_;
|
---|
605 | print $line if $Verbose;
|
---|
606 |
|
---|
607 | my $meth = $Handlers{$type};
|
---|
608 | $meth->($self, $line, $type, $totals) if $meth;
|
---|
609 | };
|
---|
610 |
|
---|
611 |
|
---|
612 | sub header_handler {
|
---|
613 | my($self, $line, $type, $totals) = @_;
|
---|
614 |
|
---|
615 | warn "Test header seen more than once!\n" if $self->{_seen_header};
|
---|
616 |
|
---|
617 | $self->{_seen_header}++;
|
---|
618 |
|
---|
619 | warn "1..M can only appear at the beginning or end of tests\n"
|
---|
620 | if $totals->{seen} &&
|
---|
621 | $totals->{max} < $totals->{seen};
|
---|
622 | };
|
---|
623 |
|
---|
624 | sub test_handler {
|
---|
625 | my($self, $line, $type, $totals) = @_;
|
---|
626 |
|
---|
627 | my $curr = $totals->{seen};
|
---|
628 | my $next = $self->{'next'};
|
---|
629 | my $max = $totals->{max};
|
---|
630 | my $detail = $totals->{details}[-1];
|
---|
631 |
|
---|
632 | if( $detail->{ok} ) {
|
---|
633 | _print_ml_less("ok $curr/$max");
|
---|
634 |
|
---|
635 | if( $detail->{type} eq 'skip' ) {
|
---|
636 | $totals->{skip_reason} = $detail->{reason}
|
---|
637 | unless defined $totals->{skip_reason};
|
---|
638 | $totals->{skip_reason} = 'various reasons'
|
---|
639 | if $totals->{skip_reason} ne $detail->{reason};
|
---|
640 | }
|
---|
641 | }
|
---|
642 | else {
|
---|
643 | _print_ml("NOK $curr");
|
---|
644 | }
|
---|
645 |
|
---|
646 | if( $curr > $next ) {
|
---|
647 | print "Test output counter mismatch [test $curr]\n";
|
---|
648 | }
|
---|
649 | elsif( $curr < $next ) {
|
---|
650 | print "Confused test output: test $curr answered after ".
|
---|
651 | "test ", $next - 1, "\n";
|
---|
652 | }
|
---|
653 |
|
---|
654 | };
|
---|
655 |
|
---|
656 | sub bailout_handler {
|
---|
657 | my($self, $line, $type, $totals) = @_;
|
---|
658 |
|
---|
659 | die "FAILED--Further testing stopped" .
|
---|
660 | ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
|
---|
661 | };
|
---|
662 |
|
---|
663 |
|
---|
664 | sub _print_ml {
|
---|
665 | print join '', $ML, @_ if $ML;
|
---|
666 | }
|
---|
667 |
|
---|
668 |
|
---|
669 | # Print updates only once per second.
|
---|
670 | sub _print_ml_less {
|
---|
671 | my $now = CORE::time;
|
---|
672 | if ( $Last_ML_Print != $now ) {
|
---|
673 | _print_ml(@_);
|
---|
674 | $Last_ML_Print = $now;
|
---|
675 | }
|
---|
676 | }
|
---|
677 |
|
---|
678 | sub _bonusmsg {
|
---|
679 | my($tot) = @_;
|
---|
680 |
|
---|
681 | my $bonusmsg = '';
|
---|
682 | $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
|
---|
683 | " UNEXPECTEDLY SUCCEEDED)")
|
---|
684 | if $tot->{bonus};
|
---|
685 |
|
---|
686 | if ($tot->{skipped}) {
|
---|
687 | $bonusmsg .= ", $tot->{skipped} test"
|
---|
688 | . ($tot->{skipped} != 1 ? 's' : '');
|
---|
689 | if ($tot->{sub_skipped}) {
|
---|
690 | $bonusmsg .= " and $tot->{sub_skipped} subtest"
|
---|
691 | . ($tot->{sub_skipped} != 1 ? 's' : '');
|
---|
692 | }
|
---|
693 | $bonusmsg .= ' skipped';
|
---|
694 | }
|
---|
695 | elsif ($tot->{sub_skipped}) {
|
---|
696 | $bonusmsg .= ", $tot->{sub_skipped} subtest"
|
---|
697 | . ($tot->{sub_skipped} != 1 ? 's' : '')
|
---|
698 | . " skipped";
|
---|
699 | }
|
---|
700 |
|
---|
701 | return $bonusmsg;
|
---|
702 | }
|
---|
703 |
|
---|
704 | # Test program go boom.
|
---|
705 | sub _dubious_return {
|
---|
706 | my($test, $tot, $estatus, $wstatus) = @_;
|
---|
707 | my ($failed, $canon, $percent) = ('??', '??');
|
---|
708 |
|
---|
709 | printf "$test->{ml}dubious\n\tTest returned status $estatus ".
|
---|
710 | "(wstat %d, 0x%x)\n",
|
---|
711 | $wstatus,$wstatus;
|
---|
712 | print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
|
---|
713 |
|
---|
714 | $tot->{bad}++;
|
---|
715 |
|
---|
716 | if ($test->{max}) {
|
---|
717 | if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
|
---|
718 | print "\tafter all the subtests completed successfully\n";
|
---|
719 | $percent = 0;
|
---|
720 | $failed = 0; # But we do not set $canon!
|
---|
721 | }
|
---|
722 | else {
|
---|
723 | push @{$test->{failed}}, $test->{'next'}..$test->{max};
|
---|
724 | $failed = @{$test->{failed}};
|
---|
725 | (my $txt, $canon) = _canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
|
---|
726 | $percent = 100*(scalar @{$test->{failed}})/$test->{max};
|
---|
727 | print "DIED. ",$txt;
|
---|
728 | }
|
---|
729 | }
|
---|
730 |
|
---|
731 | return { canon => $canon, max => $test->{max} || '??',
|
---|
732 | failed => $failed,
|
---|
733 | percent => $percent,
|
---|
734 | estat => $estatus, wstat => $wstatus,
|
---|
735 | };
|
---|
736 | }
|
---|
737 |
|
---|
738 |
|
---|
739 | sub _create_fmts {
|
---|
740 | my($failedtests) = @_;
|
---|
741 |
|
---|
742 | my $failed_str = "Failed Test";
|
---|
743 | my $middle_str = " Stat Wstat Total Fail Failed ";
|
---|
744 | my $list_str = "List of Failed";
|
---|
745 |
|
---|
746 | # Figure out our longest name string for formatting purposes.
|
---|
747 | my $max_namelen = length($failed_str);
|
---|
748 | foreach my $script (keys %$failedtests) {
|
---|
749 | my $namelen = length $failedtests->{$script}->{name};
|
---|
750 | $max_namelen = $namelen if $namelen > $max_namelen;
|
---|
751 | }
|
---|
752 |
|
---|
753 | my $list_len = $Columns - length($middle_str) - $max_namelen;
|
---|
754 | if ($list_len < length($list_str)) {
|
---|
755 | $list_len = length($list_str);
|
---|
756 | $max_namelen = $Columns - length($middle_str) - $list_len;
|
---|
757 | if ($max_namelen < length($failed_str)) {
|
---|
758 | $max_namelen = length($failed_str);
|
---|
759 | $Columns = $max_namelen + length($middle_str) + $list_len;
|
---|
760 | }
|
---|
761 | }
|
---|
762 |
|
---|
763 | my $fmt_top = "format STDOUT_TOP =\n"
|
---|
764 | . sprintf("%-${max_namelen}s", $failed_str)
|
---|
765 | . $middle_str
|
---|
766 | . $list_str . "\n"
|
---|
767 | . "-" x $Columns
|
---|
768 | . "\n.\n";
|
---|
769 |
|
---|
770 | my $fmt = "format STDOUT =\n"
|
---|
771 | . "@" . "<" x ($max_namelen - 1)
|
---|
772 | . " @>> @>>>> @>>>> @>>> ^##.##% "
|
---|
773 | . "^" . "<" x ($list_len - 1) . "\n"
|
---|
774 | . '{ $Curtest->{name}, $Curtest->{estat},'
|
---|
775 | . ' $Curtest->{wstat}, $Curtest->{max},'
|
---|
776 | . ' $Curtest->{failed}, $Curtest->{percent},'
|
---|
777 | . ' $Curtest->{canon}'
|
---|
778 | . "\n}\n"
|
---|
779 | . "~~" . " " x ($Columns - $list_len - 2) . "^"
|
---|
780 | . "<" x ($list_len - 1) . "\n"
|
---|
781 | . '$Curtest->{canon}'
|
---|
782 | . "\n.\n";
|
---|
783 |
|
---|
784 | eval $fmt_top;
|
---|
785 | die $@ if $@;
|
---|
786 | eval $fmt;
|
---|
787 | die $@ if $@;
|
---|
788 |
|
---|
789 | return($fmt_top, $fmt);
|
---|
790 | }
|
---|
791 |
|
---|
792 | sub _canonfailed ($$@) {
|
---|
793 | my($max,$skipped,@failed) = @_;
|
---|
794 | my %seen;
|
---|
795 | @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
|
---|
796 | my $failed = @failed;
|
---|
797 | my @result = ();
|
---|
798 | my @canon = ();
|
---|
799 | my $min;
|
---|
800 | my $last = $min = shift @failed;
|
---|
801 | my $canon;
|
---|
802 | if (@failed) {
|
---|
803 | for (@failed, $failed[-1]) { # don't forget the last one
|
---|
804 | if ($_ > $last+1 || $_ == $last) {
|
---|
805 | push @canon, ($min == $last) ? $last : "$min-$last";
|
---|
806 | $min = $_;
|
---|
807 | }
|
---|
808 | $last = $_;
|
---|
809 | }
|
---|
810 | local $" = ", ";
|
---|
811 | push @result, "FAILED tests @canon\n";
|
---|
812 | $canon = join ' ', @canon;
|
---|
813 | }
|
---|
814 | else {
|
---|
815 | push @result, "FAILED test $last\n";
|
---|
816 | $canon = $last;
|
---|
817 | }
|
---|
818 |
|
---|
819 | push @result, "\tFailed $failed/$max tests, ";
|
---|
820 | if ($max) {
|
---|
821 | push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
|
---|
822 | }
|
---|
823 | else {
|
---|
824 | push @result, "?% okay";
|
---|
825 | }
|
---|
826 | my $ender = 's' x ($skipped > 1);
|
---|
827 | if ($skipped) {
|
---|
828 | my $good = $max - $failed - $skipped;
|
---|
829 | my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
|
---|
830 | if ($max) {
|
---|
831 | my $goodper = sprintf("%.2f",100*($good/$max));
|
---|
832 | $skipmsg .= "$goodper%)";
|
---|
833 | }
|
---|
834 | else {
|
---|
835 | $skipmsg .= "?%)";
|
---|
836 | }
|
---|
837 | push @result, $skipmsg;
|
---|
838 | }
|
---|
839 | push @result, "\n";
|
---|
840 | my $txt = join "", @result;
|
---|
841 | ($txt, $canon);
|
---|
842 | }
|
---|
843 |
|
---|
844 | =end _private
|
---|
845 |
|
---|
846 | =back
|
---|
847 |
|
---|
848 | =cut
|
---|
849 |
|
---|
850 |
|
---|
851 | 1;
|
---|
852 | __END__
|
---|
853 |
|
---|
854 |
|
---|
855 | =head1 EXPORT
|
---|
856 |
|
---|
857 | C<&runtests> is exported by Test::Harness by default.
|
---|
858 |
|
---|
859 | C<$verbose>, C<$switches> and C<$debug> are exported upon request.
|
---|
860 |
|
---|
861 | =head1 DIAGNOSTICS
|
---|
862 |
|
---|
863 | =over 4
|
---|
864 |
|
---|
865 | =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
|
---|
866 |
|
---|
867 | If all tests are successful some statistics about the performance are
|
---|
868 | printed.
|
---|
869 |
|
---|
870 | =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
|
---|
871 |
|
---|
872 | For any single script that has failing subtests statistics like the
|
---|
873 | above are printed.
|
---|
874 |
|
---|
875 | =item C<Test returned status %d (wstat %d)>
|
---|
876 |
|
---|
877 | Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
|
---|
878 | and C<$?> are printed in a message similar to the above.
|
---|
879 |
|
---|
880 | =item C<Failed 1 test, %.2f%% okay. %s>
|
---|
881 |
|
---|
882 | =item C<Failed %d/%d tests, %.2f%% okay. %s>
|
---|
883 |
|
---|
884 | If not all tests were successful, the script dies with one of the
|
---|
885 | above messages.
|
---|
886 |
|
---|
887 | =item C<FAILED--Further testing stopped: %s>
|
---|
888 |
|
---|
889 | If a single subtest decides that further testing will not make sense,
|
---|
890 | the script dies with this message.
|
---|
891 |
|
---|
892 | =back
|
---|
893 |
|
---|
894 | =head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
|
---|
895 |
|
---|
896 | Test::Harness sets these before executing the individual tests.
|
---|
897 |
|
---|
898 | =over 4
|
---|
899 |
|
---|
900 | =item C<HARNESS_ACTIVE>
|
---|
901 |
|
---|
902 | This is set to a true value. It allows the tests to determine if they
|
---|
903 | are being executed through the harness or by any other means.
|
---|
904 |
|
---|
905 | =item C<HARNESS_VERSION>
|
---|
906 |
|
---|
907 | This is the version of Test::Harness.
|
---|
908 |
|
---|
909 | =back
|
---|
910 |
|
---|
911 | =head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
|
---|
912 |
|
---|
913 | =over 4
|
---|
914 |
|
---|
915 | =item C<HARNESS_COLUMNS>
|
---|
916 |
|
---|
917 | This value will be used for the width of the terminal. If it is not
|
---|
918 | set then it will default to C<COLUMNS>. If this is not set, it will
|
---|
919 | default to 80. Note that users of Bourne-sh based shells will need to
|
---|
920 | C<export COLUMNS> for this module to use that variable.
|
---|
921 |
|
---|
922 | =item C<HARNESS_COMPILE_TEST>
|
---|
923 |
|
---|
924 | When true it will make harness attempt to compile the test using
|
---|
925 | C<perlcc> before running it.
|
---|
926 |
|
---|
927 | B<NOTE> This currently only works when sitting in the perl source
|
---|
928 | directory!
|
---|
929 |
|
---|
930 | =item C<HARNESS_DEBUG>
|
---|
931 |
|
---|
932 | If true, Test::Harness will print debugging information about itself as
|
---|
933 | it runs the tests. This is different from C<HARNESS_VERBOSE>, which prints
|
---|
934 | the output from the test being run. Setting C<$Test::Harness::Debug> will
|
---|
935 | override this, or you can use the C<-d> switch in the F<prove> utility.
|
---|
936 |
|
---|
937 | =item C<HARNESS_FILELEAK_IN_DIR>
|
---|
938 |
|
---|
939 | When set to the name of a directory, harness will check after each
|
---|
940 | test whether new files appeared in that directory, and report them as
|
---|
941 |
|
---|
942 | LEAKED FILES: scr.tmp 0 my.db
|
---|
943 |
|
---|
944 | If relative, directory name is with respect to the current directory at
|
---|
945 | the moment runtests() was called. Putting absolute path into
|
---|
946 | C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
|
---|
947 |
|
---|
948 | =item C<HARNESS_IGNORE_EXITCODE>
|
---|
949 |
|
---|
950 | Makes harness ignore the exit status of child processes when defined.
|
---|
951 |
|
---|
952 | =item C<HARNESS_NOTTY>
|
---|
953 |
|
---|
954 | When set to a true value, forces it to behave as though STDOUT were
|
---|
955 | not a console. You may need to set this if you don't want harness to
|
---|
956 | output more frequent progress messages using carriage returns. Some
|
---|
957 | consoles may not handle carriage returns properly (which results in a
|
---|
958 | somewhat messy output).
|
---|
959 |
|
---|
960 | =item C<HARNESS_PERL>
|
---|
961 |
|
---|
962 | Usually your tests will be run by C<$^X>, the currently-executing Perl.
|
---|
963 | However, you may want to have it run by a different executable, such as
|
---|
964 | a threading perl, or a different version.
|
---|
965 |
|
---|
966 | If you're using the F<prove> utility, you can use the C<--perl> switch.
|
---|
967 |
|
---|
968 | =item C<HARNESS_PERL_SWITCHES>
|
---|
969 |
|
---|
970 | Its value will be prepended to the switches used to invoke perl on
|
---|
971 | each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
|
---|
972 | run all tests with all warnings enabled.
|
---|
973 |
|
---|
974 | =item C<HARNESS_VERBOSE>
|
---|
975 |
|
---|
976 | If true, Test::Harness will output the verbose results of running
|
---|
977 | its tests. Setting C<$Test::Harness::verbose> will override this,
|
---|
978 | or you can use the C<-v> switch in the F<prove> utility.
|
---|
979 |
|
---|
980 | =back
|
---|
981 |
|
---|
982 | =head1 EXAMPLE
|
---|
983 |
|
---|
984 | Here's how Test::Harness tests itself
|
---|
985 |
|
---|
986 | $ cd ~/src/devel/Test-Harness
|
---|
987 | $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
|
---|
988 | $verbose=0; runtests @ARGV;' t/*.t
|
---|
989 | Using /home/schwern/src/devel/Test-Harness/blib
|
---|
990 | t/base..............ok
|
---|
991 | t/nonumbers.........ok
|
---|
992 | t/ok................ok
|
---|
993 | t/test-harness......ok
|
---|
994 | All tests successful.
|
---|
995 | Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
|
---|
996 |
|
---|
997 | =head1 SEE ALSO
|
---|
998 |
|
---|
999 | The included F<prove> utility for running test scripts from the command line,
|
---|
1000 | L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
|
---|
1001 | the underlying timing routines, and L<Devel::Cover> for test coverage
|
---|
1002 | analysis.
|
---|
1003 |
|
---|
1004 | =head1 TODO
|
---|
1005 |
|
---|
1006 | Provide a way of running tests quietly (ie. no printing) for automated
|
---|
1007 | validation of tests. This will probably take the form of a version
|
---|
1008 | of runtests() which rather than printing its output returns raw data
|
---|
1009 | on the state of the tests. (Partially done in Test::Harness::Straps)
|
---|
1010 |
|
---|
1011 | Document the format.
|
---|
1012 |
|
---|
1013 | Fix HARNESS_COMPILE_TEST without breaking its core usage.
|
---|
1014 |
|
---|
1015 | Figure a way to report test names in the failure summary.
|
---|
1016 |
|
---|
1017 | Rework the test summary so long test names are not truncated as badly.
|
---|
1018 | (Partially done with new skip test styles)
|
---|
1019 |
|
---|
1020 | Add option for coverage analysis.
|
---|
1021 |
|
---|
1022 | Trap STDERR.
|
---|
1023 |
|
---|
1024 | Implement Straps total_results()
|
---|
1025 |
|
---|
1026 | Remember exit code
|
---|
1027 |
|
---|
1028 | Completely redo the print summary code.
|
---|
1029 |
|
---|
1030 | Implement Straps callbacks. (experimentally implemented)
|
---|
1031 |
|
---|
1032 | Straps->analyze_file() not taint clean, don't know if it can be
|
---|
1033 |
|
---|
1034 | Fix that damned VMS nit.
|
---|
1035 |
|
---|
1036 | HARNESS_TODOFAIL to display TODO failures
|
---|
1037 |
|
---|
1038 | Add a test for verbose.
|
---|
1039 |
|
---|
1040 | Change internal list of test results to a hash.
|
---|
1041 |
|
---|
1042 | Fix stats display when there's an overrun.
|
---|
1043 |
|
---|
1044 | Fix so perls with spaces in the filename work.
|
---|
1045 |
|
---|
1046 | Keeping whittling away at _run_all_tests()
|
---|
1047 |
|
---|
1048 | Clean up how the summary is printed. Get rid of those damned formats.
|
---|
1049 |
|
---|
1050 | =head1 BUGS
|
---|
1051 |
|
---|
1052 | HARNESS_COMPILE_TEST currently assumes it's run from the Perl source
|
---|
1053 | directory.
|
---|
1054 |
|
---|
1055 | Please use the CPAN bug ticketing system at L<http://rt.cpan.org/>.
|
---|
1056 | You can also mail bugs, fixes and enhancements to
|
---|
1057 | C<< <bug-test-harness >> at C<< rt.cpan.org> >>.
|
---|
1058 |
|
---|
1059 | =head1 AUTHORS
|
---|
1060 |
|
---|
1061 | Either Tim Bunce or Andreas Koenig, we don't know. What we know for
|
---|
1062 | sure is, that it was inspired by Larry Wall's TEST script that came
|
---|
1063 | with perl distributions for ages. Numerous anonymous contributors
|
---|
1064 | exist. Andreas Koenig held the torch for many years, and then
|
---|
1065 | Michael G Schwern.
|
---|
1066 |
|
---|
1067 | Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
|
---|
1068 |
|
---|
1069 | =head1 COPYRIGHT
|
---|
1070 |
|
---|
1071 | Copyright 2002-2005
|
---|
1072 | by Michael G Schwern C<< <schwern at pobox.com> >>,
|
---|
1073 | Andy Lester C<< <andy at petdance.com> >>.
|
---|
1074 |
|
---|
1075 | This program is free software; you can redistribute it and/or
|
---|
1076 | modify it under the same terms as Perl itself.
|
---|
1077 |
|
---|
1078 | See L<http://www.perl.com/perl/misc/Artistic.html>.
|
---|
1079 |
|
---|
1080 | =cut
|
---|