File manager - Edit - /home/c14075/dragmet-ural.ru/www/Test.tar
Back
More.pm 0000644 00000146731 15137540314 0006023 0 ustar 00 package Test::More; use 5.006; use strict; use warnings; #---- perlcritic exemptions. ----# # We use a lot of subroutine prototypes ## no critic (Subroutines::ProhibitSubroutinePrototypes) # Can't use Carp because it might cause C<use_ok()> to accidentally succeed # even though the module being used forgot to use Carp. Yes, this # actually happened. sub _carp { my( $file, $line ) = ( caller(1) )[ 1, 2 ]; return warn @_, " at $file line $line\n"; } our $VERSION = '1.302175'; use Test::Builder::Module; our @ISA = qw(Test::Builder::Module); our @EXPORT = qw(ok use_ok require_ok is isnt like unlike is_deeply cmp_ok skip todo todo_skip pass fail eq_array eq_hash eq_set $TODO plan done_testing can_ok isa_ok new_ok diag note explain subtest BAIL_OUT ); =head1 NAME Test::More - yet another framework for writing test scripts =head1 SYNOPSIS use Test::More tests => 23; # or use Test::More skip_all => $reason; # or use Test::More; # see done_testing() require_ok( 'Some::Module' ); # Various ways to say "ok" ok($got eq $expected, $test_name); is ($got, $expected, $test_name); isnt($got, $expected, $test_name); # Rather than print STDERR "# here's what went wrong\n" diag("here's what went wrong"); like ($got, qr/expected/, $test_name); unlike($got, qr/expected/, $test_name); cmp_ok($got, '==', $expected, $test_name); is_deeply($got_complex_structure, $expected_complex_structure, $test_name); SKIP: { skip $why, $how_many unless $have_some_feature; ok( foo(), $test_name ); is( foo(42), 23, $test_name ); }; TODO: { local $TODO = $why; ok( foo(), $test_name ); is( foo(42), 23, $test_name ); }; can_ok($module, @methods); isa_ok($object, $class); pass($test_name); fail($test_name); BAIL_OUT($why); # UNIMPLEMENTED!!! my @status = Test::More::status; =head1 DESCRIPTION B<STOP!> If you're just getting started writing tests, have a look at L<Test2::Suite> first. This is a drop in replacement for Test::Simple which you can switch to once you get the hang of basic testing. The purpose of this module is to provide a wide range of testing utilities. Various ways to say "ok" with better diagnostics, facilities to skip tests, test future features and compare complicated data structures. While you can do almost anything with a simple C<ok()> function, it doesn't provide good diagnostic output. =head2 I love it when a plan comes together Before anything else, you need a testing plan. This basically declares how many tests your script is going to run to protect against premature failure. The preferred way to do this is to declare a plan when you C<use Test::More>. use Test::More tests => 23; There are cases when you will not know beforehand how many tests your script is going to run. In this case, you can declare your tests at the end. use Test::More; ... run your tests ... done_testing( $number_of_tests_run ); B<NOTE> C<done_testing()> should never be called in an C<END { ... }> block. Sometimes you really don't know how many tests were run, or it's too difficult to calculate. In which case you can leave off $number_of_tests_run. In some cases, you'll want to completely skip an entire testing script. use Test::More skip_all => $skip_reason; Your script will declare a skip with the reason why you skipped and exit immediately with a zero (success). See L<Test::Harness> for details. If you want to control what functions Test::More will export, you have to use the 'import' option. For example, to import everything but 'fail', you'd do: use Test::More tests => 23, import => ['!fail']; Alternatively, you can use the C<plan()> function. Useful for when you have to calculate the number of tests. use Test::More; plan tests => keys %Stuff * 3; or for deciding between running the tests at all: use Test::More; if( $^O eq 'MacOS' ) { plan skip_all => 'Test irrelevant on MacOS'; } else { plan tests => 42; } =cut sub plan { my $tb = Test::More->builder; return $tb->plan(@_); } # This implements "use Test::More 'no_diag'" but the behavior is # deprecated. sub import_extra { my $class = shift; my $list = shift; my @other = (); my $idx = 0; my $import; while( $idx <= $#{$list} ) { my $item = $list->[$idx]; if( defined $item and $item eq 'no_diag' ) { $class->builder->no_diag(1); } elsif( defined $item and $item eq 'import' ) { if ($import) { push @$import, @{$list->[ ++$idx ]}; } else { $import = $list->[ ++$idx ]; push @other, $item, $import; } } else { push @other, $item; } $idx++; } @$list = @other; if ($class eq __PACKAGE__ && (!$import || grep $_ eq '$TODO', @$import)) { my $to = $class->builder->exported_to; no strict 'refs'; *{"$to\::TODO"} = \our $TODO; if ($import) { @$import = grep $_ ne '$TODO', @$import; } else { push @$list, import => [grep $_ ne '$TODO', @EXPORT]; } } return; } =over 4 =item B<done_testing> done_testing(); done_testing($number_of_tests); If you don't know how many tests you're going to run, you can issue the plan when you're done running tests. $number_of_tests is the same as C<plan()>, it's the number of tests you expected to run. You can omit this, in which case the number of tests you ran doesn't matter, just the fact that your tests ran to conclusion. This is safer than and replaces the "no_plan" plan. B<Note:> You must never put C<done_testing()> inside an C<END { ... }> block. The plan is there to ensure your test does not exit before testing has completed. If you use an END block you completely bypass this protection. =back =cut sub done_testing { my $tb = Test::More->builder; $tb->done_testing(@_); } =head2 Test names By convention, each test is assigned a number in order. This is largely done automatically for you. However, it's often very useful to assign a name to each test. Which would you rather see: ok 4 not ok 5 ok 6 or ok 4 - basic multi-variable not ok 5 - simple exponential ok 6 - force == mass * acceleration The later gives you some idea of what failed. It also makes it easier to find the test in your script, simply search for "simple exponential". All test functions take a name argument. It's optional, but highly suggested that you use it. =head2 I'm ok, you're not ok. The basic purpose of this module is to print out either "ok #" or "not ok #" depending on if a given test succeeded or failed. Everything else is just gravy. All of the following print "ok" or "not ok" depending on if the test succeeded or failed. They all also return true or false, respectively. =over 4 =item B<ok> ok($got eq $expected, $test_name); This simply evaluates any expression (C<$got eq $expected> is just a simple example) and uses that to determine if the test succeeded or failed. A true expression passes, a false one fails. Very simple. For example: ok( $exp{9} == 81, 'simple exponential' ); ok( Film->can('db_Main'), 'set_db()' ); ok( $p->tests == 4, 'saw tests' ); ok( !grep(!defined $_, @items), 'all items defined' ); (Mnemonic: "This is ok.") $test_name is a very short description of the test that will be printed out. It makes it very easy to find a test in your script when it fails and gives others an idea of your intentions. $test_name is optional, but we B<very> strongly encourage its use. Should an C<ok()> fail, it will produce some diagnostics: not ok 18 - sufficient mucus # Failed test 'sufficient mucus' # in foo.t at line 42. This is the same as L<Test::Simple>'s C<ok()> routine. =cut sub ok ($;$) { my( $test, $name ) = @_; my $tb = Test::More->builder; return $tb->ok( $test, $name ); } =item B<is> =item B<isnt> is ( $got, $expected, $test_name ); isnt( $got, $expected, $test_name ); Similar to C<ok()>, C<is()> and C<isnt()> compare their two arguments with C<eq> and C<ne> respectively and use the result of that to determine if the test succeeded or failed. So these: # Is the ultimate answer 42? is( ultimate_answer(), 42, "Meaning of Life" ); # $foo isn't empty isnt( $foo, '', "Got some foo" ); are similar to these: ok( ultimate_answer() eq 42, "Meaning of Life" ); ok( $foo ne '', "Got some foo" ); C<undef> will only ever match C<undef>. So you can test a value against C<undef> like this: is($not_defined, undef, "undefined as expected"); (Mnemonic: "This is that." "This isn't that.") So why use these? They produce better diagnostics on failure. C<ok()> cannot know what you are testing for (beyond the name), but C<is()> and C<isnt()> know what the test was and why it failed. For example this test: my $foo = 'waffle'; my $bar = 'yarblokos'; is( $foo, $bar, 'Is foo the same as bar?' ); Will produce something like this: not ok 17 - Is foo the same as bar? # Failed test 'Is foo the same as bar?' # in foo.t at line 139. # got: 'waffle' # expected: 'yarblokos' So you can figure out what went wrong without rerunning the test. You are encouraged to use C<is()> and C<isnt()> over C<ok()> where possible, however do not be tempted to use them to find out if something is true or false! # XXX BAD! is( exists $brooklyn{tree}, 1, 'A tree grows in Brooklyn' ); This does not check if C<exists $brooklyn{tree}> is true, it checks if it returns 1. Very different. Similar caveats exist for false and 0. In these cases, use C<ok()>. ok( exists $brooklyn{tree}, 'A tree grows in Brooklyn' ); A simple call to C<isnt()> usually does not provide a strong test but there are cases when you cannot say much more about a value than that it is different from some other value: new_ok $obj, "Foo"; my $clone = $obj->clone; isa_ok $obj, "Foo", "Foo->clone"; isnt $obj, $clone, "clone() produces a different object"; For those grammatical pedants out there, there's an C<isn't()> function which is an alias of C<isnt()>. =cut sub is ($$;$) { my $tb = Test::More->builder; return $tb->is_eq(@_); } sub isnt ($$;$) { my $tb = Test::More->builder; return $tb->isnt_eq(@_); } *isn't = \&isnt; # ' to unconfuse syntax higlighters =item B<like> like( $got, qr/expected/, $test_name ); Similar to C<ok()>, C<like()> matches $got against the regex C<qr/expected/>. So this: like($got, qr/expected/, 'this is like that'); is similar to: ok( $got =~ m/expected/, 'this is like that'); (Mnemonic "This is like that".) The second argument is a regular expression. It may be given as a regex reference (i.e. C<qr//>) or (for better compatibility with older perls) as a string that looks like a regex (alternative delimiters are currently not supported): like( $got, '/expected/', 'this is like that' ); Regex options may be placed on the end (C<'/expected/i'>). Its advantages over C<ok()> are similar to that of C<is()> and C<isnt()>. Better diagnostics on failure. =cut sub like ($$;$) { my $tb = Test::More->builder; return $tb->like(@_); } =item B<unlike> unlike( $got, qr/expected/, $test_name ); Works exactly as C<like()>, only it checks if $got B<does not> match the given pattern. =cut sub unlike ($$;$) { my $tb = Test::More->builder; return $tb->unlike(@_); } =item B<cmp_ok> cmp_ok( $got, $op, $expected, $test_name ); Halfway between C<ok()> and C<is()> lies C<cmp_ok()>. This allows you to compare two arguments using any binary perl operator. The test passes if the comparison is true and fails otherwise. # ok( $got eq $expected ); cmp_ok( $got, 'eq', $expected, 'this eq that' ); # ok( $got == $expected ); cmp_ok( $got, '==', $expected, 'this == that' ); # ok( $got && $expected ); cmp_ok( $got, '&&', $expected, 'this && that' ); ...etc... Its advantage over C<ok()> is when the test fails you'll know what $got and $expected were: not ok 1 # Failed test in foo.t at line 12. # '23' # && # undef It's also useful in those cases where you are comparing numbers and C<is()>'s use of C<eq> will interfere: cmp_ok( $big_hairy_number, '==', $another_big_hairy_number ); It's especially useful when comparing greater-than or smaller-than relation between values: cmp_ok( $some_value, '<=', $upper_limit ); =cut sub cmp_ok($$$;$) { my $tb = Test::More->builder; return $tb->cmp_ok(@_); } =item B<can_ok> can_ok($module, @methods); can_ok($object, @methods); Checks to make sure the $module or $object can do these @methods (works with functions, too). can_ok('Foo', qw(this that whatever)); is almost exactly like saying: ok( Foo->can('this') && Foo->can('that') && Foo->can('whatever') ); only without all the typing and with a better interface. Handy for quickly testing an interface. No matter how many @methods you check, a single C<can_ok()> call counts as one test. If you desire otherwise, use: foreach my $meth (@methods) { can_ok('Foo', $meth); } =cut sub can_ok ($@) { my( $proto, @methods ) = @_; my $class = ref $proto || $proto; my $tb = Test::More->builder; unless($class) { my $ok = $tb->ok( 0, "->can(...)" ); $tb->diag(' can_ok() called with empty class or reference'); return $ok; } unless(@methods) { my $ok = $tb->ok( 0, "$class->can(...)" ); $tb->diag(' can_ok() called with no methods'); return $ok; } my @nok = (); foreach my $method (@methods) { $tb->_try( sub { $proto->can($method) } ) or push @nok, $method; } my $name = (@methods == 1) ? "$class->can('$methods[0]')" : "$class->can(...)" ; my $ok = $tb->ok( !@nok, $name ); $tb->diag( map " $class->can('$_') failed\n", @nok ); return $ok; } =item B<isa_ok> isa_ok($object, $class, $object_name); isa_ok($subclass, $class, $object_name); isa_ok($ref, $type, $ref_name); Checks to see if the given C<< $object->isa($class) >>. Also checks to make sure the object was defined in the first place. Handy for this sort of thing: my $obj = Some::Module->new; isa_ok( $obj, 'Some::Module' ); where you'd otherwise have to write my $obj = Some::Module->new; ok( defined $obj && $obj->isa('Some::Module') ); to safeguard against your test script blowing up. You can also test a class, to make sure that it has the right ancestor: isa_ok( 'Vole', 'Rodent' ); It works on references, too: isa_ok( $array_ref, 'ARRAY' ); The diagnostics of this test normally just refer to 'the object'. If you'd like them to be more specific, you can supply an $object_name (for example 'Test customer'). =cut sub isa_ok ($$;$) { my( $thing, $class, $thing_name ) = @_; my $tb = Test::More->builder; my $whatami; if( !defined $thing ) { $whatami = 'undef'; } elsif( ref $thing ) { $whatami = 'reference'; local($@,$!); require Scalar::Util; if( Scalar::Util::blessed($thing) ) { $whatami = 'object'; } } else { $whatami = 'class'; } # We can't use UNIVERSAL::isa because we want to honor isa() overrides my( $rslt, $error ) = $tb->_try( sub { $thing->isa($class) } ); if($error) { die <<WHOA unless $error =~ /^Can't (locate|call) method "isa"/; WHOA! I tried to call ->isa on your $whatami and got some weird error. Here's the error. $error WHOA } # Special case for isa_ok( [], "ARRAY" ) and like if( $whatami eq 'reference' ) { $rslt = UNIVERSAL::isa($thing, $class); } my($diag, $name); if( defined $thing_name ) { $name = "'$thing_name' isa '$class'"; $diag = defined $thing ? "'$thing_name' isn't a '$class'" : "'$thing_name' isn't defined"; } elsif( $whatami eq 'object' ) { my $my_class = ref $thing; $thing_name = qq[An object of class '$my_class']; $name = "$thing_name isa '$class'"; $diag = "The object of class '$my_class' isn't a '$class'"; } elsif( $whatami eq 'reference' ) { my $type = ref $thing; $thing_name = qq[A reference of type '$type']; $name = "$thing_name isa '$class'"; $diag = "The reference of type '$type' isn't a '$class'"; } elsif( $whatami eq 'undef' ) { $thing_name = 'undef'; $name = "$thing_name isa '$class'"; $diag = "$thing_name isn't defined"; } elsif( $whatami eq 'class' ) { $thing_name = qq[The class (or class-like) '$thing']; $name = "$thing_name isa '$class'"; $diag = "$thing_name isn't a '$class'"; } else { die; } my $ok; if($rslt) { $ok = $tb->ok( 1, $name ); } else { $ok = $tb->ok( 0, $name ); $tb->diag(" $diag\n"); } return $ok; } =item B<new_ok> my $obj = new_ok( $class ); my $obj = new_ok( $class => \@args ); my $obj = new_ok( $class => \@args, $object_name ); A convenience function which combines creating an object and calling C<isa_ok()> on that object. It is basically equivalent to: my $obj = $class->new(@args); isa_ok $obj, $class, $object_name; If @args is not given, an empty list will be used. This function only works on C<new()> and it assumes C<new()> will return just a single object which isa C<$class>. =cut sub new_ok { my $tb = Test::More->builder; $tb->croak("new_ok() must be given at least a class") unless @_; my( $class, $args, $object_name ) = @_; $args ||= []; my $obj; my( $success, $error ) = $tb->_try( sub { $obj = $class->new(@$args); 1 } ); if($success) { local $Test::Builder::Level = $Test::Builder::Level + 1; isa_ok $obj, $class, $object_name; } else { $class = 'undef' if !defined $class; $tb->ok( 0, "$class->new() died" ); $tb->diag(" Error was: $error"); } return $obj; } =item B<subtest> subtest $name => \&code, @args; C<subtest()> runs the &code as its own little test with its own plan and its own result. The main test counts this as a single test using the result of the whole subtest to determine if its ok or not ok. For example... use Test::More tests => 3; pass("First test"); subtest 'An example subtest' => sub { plan tests => 2; pass("This is a subtest"); pass("So is this"); }; pass("Third test"); This would produce. 1..3 ok 1 - First test # Subtest: An example subtest 1..2 ok 1 - This is a subtest ok 2 - So is this ok 2 - An example subtest ok 3 - Third test A subtest may call C<skip_all>. No tests will be run, but the subtest is considered a skip. subtest 'skippy' => sub { plan skip_all => 'cuz I said so'; pass('this test will never be run'); }; Returns true if the subtest passed, false otherwise. Due to how subtests work, you may omit a plan if you desire. This adds an implicit C<done_testing()> to the end of your subtest. The following two subtests are equivalent: subtest 'subtest with implicit done_testing()', sub { ok 1, 'subtests with an implicit done testing should work'; ok 1, '... and support more than one test'; ok 1, '... no matter how many tests are run'; }; subtest 'subtest with explicit done_testing()', sub { ok 1, 'subtests with an explicit done testing should work'; ok 1, '... and support more than one test'; ok 1, '... no matter how many tests are run'; done_testing(); }; Extra arguments given to C<subtest> are passed to the callback. For example: sub my_subtest { my $range = shift; ... } for my $range (1, 10, 100, 1000) { subtest "testing range $range", \&my_subtest, $range; } =cut sub subtest { my $tb = Test::More->builder; return $tb->subtest(@_); } =item B<pass> =item B<fail> pass($test_name); fail($test_name); Sometimes you just want to say that the tests have passed. Usually the case is you've got some complicated condition that is difficult to wedge into an C<ok()>. In this case, you can simply use C<pass()> (to declare the test ok) or fail (for not ok). They are synonyms for C<ok(1)> and C<ok(0)>. Use these very, very, very sparingly. =cut sub pass (;$) { my $tb = Test::More->builder; return $tb->ok( 1, @_ ); } sub fail (;$) { my $tb = Test::More->builder; return $tb->ok( 0, @_ ); } =back =head2 Module tests Sometimes you want to test if a module, or a list of modules, can successfully load. For example, you'll often want a first test which simply loads all the modules in the distribution to make sure they work before going on to do more complicated testing. For such purposes we have C<use_ok> and C<require_ok>. =over 4 =item B<require_ok> require_ok($module); require_ok($file); Tries to C<require> the given $module or $file. If it loads successfully, the test will pass. Otherwise it fails and displays the load error. C<require_ok> will guess whether the input is a module name or a filename. No exception will be thrown if the load fails. # require Some::Module require_ok "Some::Module"; # require "Some/File.pl"; require_ok "Some/File.pl"; # stop testing if any of your modules will not load for my $module (@module) { require_ok $module or BAIL_OUT "Can't load $module"; } =cut sub require_ok ($) { my($module) = shift; my $tb = Test::More->builder; my $pack = caller; # Try to determine if we've been given a module name or file. # Module names must be barewords, files not. $module = qq['$module'] unless _is_module_name($module); my $code = <<REQUIRE; package $pack; require $module; 1; REQUIRE my( $eval_result, $eval_error ) = _eval($code); my $ok = $tb->ok( $eval_result, "require $module;" ); unless($ok) { chomp $eval_error; $tb->diag(<<DIAGNOSTIC); Tried to require '$module'. Error: $eval_error DIAGNOSTIC } return $ok; } sub _is_module_name { my $module = shift; # Module names start with a letter. # End with an alphanumeric. # The rest is an alphanumeric or :: $module =~ s/\b::\b//g; return $module =~ /^[a-zA-Z]\w*$/ ? 1 : 0; } =item B<use_ok> BEGIN { use_ok($module); } BEGIN { use_ok($module, @imports); } Like C<require_ok>, but it will C<use> the $module in question and only loads modules, not files. If you just want to test a module can be loaded, use C<require_ok>. If you just want to load a module in a test, we recommend simply using C<use> directly. It will cause the test to stop. It's recommended that you run C<use_ok()> inside a BEGIN block so its functions are exported at compile-time and prototypes are properly honored. If @imports are given, they are passed through to the use. So this: BEGIN { use_ok('Some::Module', qw(foo bar)) } is like doing this: use Some::Module qw(foo bar); Version numbers can be checked like so: # Just like "use Some::Module 1.02" BEGIN { use_ok('Some::Module', 1.02) } Don't try to do this: BEGIN { use_ok('Some::Module'); ...some code that depends on the use... ...happening at compile time... } because the notion of "compile-time" is relative. Instead, you want: BEGIN { use_ok('Some::Module') } BEGIN { ...some code that depends on the use... } If you want the equivalent of C<use Foo ()>, use a module but not import anything, use C<require_ok>. BEGIN { require_ok "Foo" } =cut sub use_ok ($;@) { my( $module, @imports ) = @_; @imports = () unless @imports; my $tb = Test::More->builder; my %caller; @caller{qw/pack file line sub args want eval req strict warn/} = caller(0); my ($pack, $filename, $line, $warn) = @caller{qw/pack file line warn/}; $filename =~ y/\n\r/_/; # so it doesn't run off the "#line $line $f" line my $code; if( @imports == 1 and $imports[0] =~ /^\d+(?:\.\d+)?$/ ) { # probably a version check. Perl needs to see the bare number # for it to work with non-Exporter based modules. $code = <<USE; package $pack; BEGIN { \${^WARNING_BITS} = \$args[-1] if defined \$args[-1] } #line $line $filename use $module $imports[0]; 1; USE } else { $code = <<USE; package $pack; BEGIN { \${^WARNING_BITS} = \$args[-1] if defined \$args[-1] } #line $line $filename use $module \@{\$args[0]}; 1; USE } my ($eval_result, $eval_error) = _eval($code, \@imports, $warn); my $ok = $tb->ok( $eval_result, "use $module;" ); unless($ok) { chomp $eval_error; $@ =~ s{^BEGIN failed--compilation aborted at .*$} {BEGIN failed--compilation aborted at $filename line $line.}m; $tb->diag(<<DIAGNOSTIC); Tried to use '$module'. Error: $eval_error DIAGNOSTIC } return $ok; } sub _eval { my( $code, @args ) = @_; # Work around oddities surrounding resetting of $@ by immediately # storing it. my( $sigdie, $eval_result, $eval_error ); { local( $@, $!, $SIG{__DIE__} ); # isolate eval $eval_result = eval $code; ## no critic (BuiltinFunctions::ProhibitStringyEval) $eval_error = $@; $sigdie = $SIG{__DIE__} || undef; } # make sure that $code got a chance to set $SIG{__DIE__} $SIG{__DIE__} = $sigdie if defined $sigdie; return( $eval_result, $eval_error ); } =back =head2 Complex data structures Not everything is a simple eq check or regex. There are times you need to see if two data structures are equivalent. For these instances Test::More provides a handful of useful functions. B<NOTE> I'm not quite sure what will happen with filehandles. =over 4 =item B<is_deeply> is_deeply( $got, $expected, $test_name ); Similar to C<is()>, except that if $got and $expected are references, it does a deep comparison walking each data structure to see if they are equivalent. If the two structures are different, it will display the place where they start differing. C<is_deeply()> compares the dereferenced values of references, the references themselves (except for their type) are ignored. This means aspects such as blessing and ties are not considered "different". C<is_deeply()> currently has very limited handling of function reference and globs. It merely checks if they have the same referent. This may improve in the future. L<Test::Differences> and L<Test::Deep> provide more in-depth functionality along these lines. B<NOTE> is_deeply() has limitations when it comes to comparing strings and refs: my $path = path('.'); my $hash = {}; is_deeply( $path, "$path" ); # ok is_deeply( $hash, "$hash" ); # fail This happens because is_deeply will unoverload all arguments unconditionally. It is probably best not to use is_deeply with overloading. For legacy reasons this is not likely to ever be fixed. If you would like a much better tool for this you should see L<Test2::Suite> Specifically L<Test2::Tools::Compare> has an C<is()> function that works like C<is_deeply> with many improvements. =cut our( @Data_Stack, %Refs_Seen ); my $DNE = bless [], 'Does::Not::Exist'; sub _dne { return ref $_[0] eq ref $DNE; } ## no critic (Subroutines::RequireArgUnpacking) sub is_deeply { my $tb = Test::More->builder; unless( @_ == 2 or @_ == 3 ) { my $msg = <<'WARNING'; is_deeply() takes two or three args, you gave %d. This usually means you passed an array or hash instead of a reference to it WARNING chop $msg; # clip off newline so carp() will put in line/file _carp sprintf $msg, scalar @_; return $tb->ok(0); } my( $got, $expected, $name ) = @_; $tb->_unoverload_str( \$expected, \$got ); my $ok; if( !ref $got and !ref $expected ) { # neither is a reference $ok = $tb->is_eq( $got, $expected, $name ); } elsif( !ref $got xor !ref $expected ) { # one's a reference, one isn't $ok = $tb->ok( 0, $name ); $tb->diag( _format_stack({ vals => [ $got, $expected ] }) ); } else { # both references local @Data_Stack = (); if( _deep_check( $got, $expected ) ) { $ok = $tb->ok( 1, $name ); } else { $ok = $tb->ok( 0, $name ); $tb->diag( _format_stack(@Data_Stack) ); } } return $ok; } sub _format_stack { my(@Stack) = @_; my $var = '$FOO'; my $did_arrow = 0; foreach my $entry (@Stack) { my $type = $entry->{type} || ''; my $idx = $entry->{'idx'}; if( $type eq 'HASH' ) { $var .= "->" unless $did_arrow++; $var .= "{$idx}"; } elsif( $type eq 'ARRAY' ) { $var .= "->" unless $did_arrow++; $var .= "[$idx]"; } elsif( $type eq 'REF' ) { $var = "\${$var}"; } } my @vals = @{ $Stack[-1]{vals} }[ 0, 1 ]; my @vars = (); ( $vars[0] = $var ) =~ s/\$FOO/ \$got/; ( $vars[1] = $var ) =~ s/\$FOO/\$expected/; my $out = "Structures begin differing at:\n"; foreach my $idx ( 0 .. $#vals ) { my $val = $vals[$idx]; $vals[$idx] = !defined $val ? 'undef' : _dne($val) ? "Does not exist" : ref $val ? "$val" : "'$val'"; } $out .= "$vars[0] = $vals[0]\n"; $out .= "$vars[1] = $vals[1]\n"; $out =~ s/^/ /msg; return $out; } sub _type { my $thing = shift; return '' if !ref $thing; for my $type (qw(Regexp ARRAY HASH REF SCALAR GLOB CODE VSTRING)) { return $type if UNIVERSAL::isa( $thing, $type ); } return ''; } =back =head2 Diagnostics If you pick the right test function, you'll usually get a good idea of what went wrong when it failed. But sometimes it doesn't work out that way. So here we have ways for you to write your own diagnostic messages which are safer than just C<print STDERR>. =over 4 =item B<diag> diag(@diagnostic_message); Prints a diagnostic message which is guaranteed not to interfere with test output. Like C<print> @diagnostic_message is simply concatenated together. Returns false, so as to preserve failure. Handy for this sort of thing: ok( grep(/foo/, @users), "There's a foo user" ) or diag("Since there's no foo, check that /etc/bar is set up right"); which would produce: not ok 42 - There's a foo user # Failed test 'There's a foo user' # in foo.t at line 52. # Since there's no foo, check that /etc/bar is set up right. You might remember C<ok() or diag()> with the mnemonic C<open() or die()>. B<NOTE> The exact formatting of the diagnostic output is still changing, but it is guaranteed that whatever you throw at it won't interfere with the test. =item B<note> note(@diagnostic_message); Like C<diag()>, except the message will not be seen when the test is run in a harness. It will only be visible in the verbose TAP stream. Handy for putting in notes which might be useful for debugging, but don't indicate a problem. note("Tempfile is $tempfile"); =cut sub diag { return Test::More->builder->diag(@_); } sub note { return Test::More->builder->note(@_); } =item B<explain> my @dump = explain @diagnostic_message; Will dump the contents of any references in a human readable format. Usually you want to pass this into C<note> or C<diag>. Handy for things like... is_deeply($have, $want) || diag explain $have; or note explain \%args; Some::Class->method(%args); =cut sub explain { return Test::More->builder->explain(@_); } =back =head2 Conditional tests Sometimes running a test under certain conditions will cause the test script to die. A certain function or method isn't implemented (such as C<fork()> on MacOS), some resource isn't available (like a net connection) or a module isn't available. In these cases it's necessary to skip tests, or declare that they are supposed to fail but will work in the future (a todo test). For more details on the mechanics of skip and todo tests see L<Test::Harness>. The way Test::More handles this is with a named block. Basically, a block of tests which can be skipped over or made todo. It's best if I just show you... =over 4 =item B<SKIP: BLOCK> SKIP: { skip $why, $how_many if $condition; ...normal testing code goes here... } This declares a block of tests that might be skipped, $how_many tests there are, $why and under what $condition to skip them. An example is the easiest way to illustrate: SKIP: { eval { require HTML::Lint }; skip "HTML::Lint not installed", 2 if $@; my $lint = new HTML::Lint; isa_ok( $lint, "HTML::Lint" ); $lint->parse( $html ); is( $lint->errors, 0, "No errors found in HTML" ); } If the user does not have HTML::Lint installed, the whole block of code I<won't be run at all>. Test::More will output special ok's which Test::Harness interprets as skipped, but passing, tests. It's important that $how_many accurately reflects the number of tests in the SKIP block so the # of tests run will match up with your plan. If your plan is C<no_plan> $how_many is optional and will default to 1. It's perfectly safe to nest SKIP blocks. Each SKIP block must have the label C<SKIP>, or Test::More can't work its magic. You don't skip tests which are failing because there's a bug in your program, or for which you don't yet have code written. For that you use TODO. Read on. =cut ## no critic (Subroutines::RequireFinalReturn) sub skip { my( $why, $how_many ) = @_; my $tb = Test::More->builder; # If the plan is set, and is static, then skip needs a count. If the plan # is 'no_plan' we are fine. As well if plan is undefined then we are # waiting for done_testing. unless (defined $how_many) { my $plan = $tb->has_plan; _carp "skip() needs to know \$how_many tests are in the block" if $plan && $plan =~ m/^\d+$/; $how_many = 1; } if( defined $how_many and $how_many =~ /\D/ ) { _carp "skip() was passed a non-numeric number of tests. Did you get the arguments backwards?"; $how_many = 1; } for( 1 .. $how_many ) { $tb->skip($why); } no warnings 'exiting'; last SKIP; } =item B<TODO: BLOCK> TODO: { local $TODO = $why if $condition; ...normal testing code goes here... } Declares a block of tests you expect to fail and $why. Perhaps it's because you haven't fixed a bug or haven't finished a new feature: TODO: { local $TODO = "URI::Geller not finished"; my $card = "Eight of clubs"; is( URI::Geller->your_card, $card, 'Is THIS your card?' ); my $spoon; URI::Geller->bend_spoon; is( $spoon, 'bent', "Spoon bending, that's original" ); } With a todo block, the tests inside are expected to fail. Test::More will run the tests normally, but print out special flags indicating they are "todo". L<Test::Harness> will interpret failures as being ok. Should anything succeed, it will report it as an unexpected success. You then know the thing you had todo is done and can remove the TODO flag. The nice part about todo tests, as opposed to simply commenting out a block of tests, is that it is like having a programmatic todo list. You know how much work is left to be done, you're aware of what bugs there are, and you'll know immediately when they're fixed. Once a todo test starts succeeding, simply move it outside the block. When the block is empty, delete it. =item B<todo_skip> TODO: { todo_skip $why, $how_many if $condition; ...normal testing code... } With todo tests, it's best to have the tests actually run. That way you'll know when they start passing. Sometimes this isn't possible. Often a failing test will cause the whole program to die or hang, even inside an C<eval BLOCK> with and using C<alarm>. In these extreme cases you have no choice but to skip over the broken tests entirely. The syntax and behavior is similar to a C<SKIP: BLOCK> except the tests will be marked as failing but todo. L<Test::Harness> will interpret them as passing. =cut sub todo_skip { my( $why, $how_many ) = @_; my $tb = Test::More->builder; unless( defined $how_many ) { # $how_many can only be avoided when no_plan is in use. _carp "todo_skip() needs to know \$how_many tests are in the block" unless $tb->has_plan eq 'no_plan'; $how_many = 1; } for( 1 .. $how_many ) { $tb->todo_skip($why); } no warnings 'exiting'; last TODO; } =item When do I use SKIP vs. TODO? B<If it's something the user might not be able to do>, use SKIP. This includes optional modules that aren't installed, running under an OS that doesn't have some feature (like C<fork()> or symlinks), or maybe you need an Internet connection and one isn't available. B<If it's something the programmer hasn't done yet>, use TODO. This is for any code you haven't written yet, or bugs you have yet to fix, but want to put tests in your testing script (always a good idea). =back =head2 Test control =over 4 =item B<BAIL_OUT> BAIL_OUT($reason); Indicates to the harness that things are going so badly all testing should terminate. This includes the running of any additional test scripts. This is typically used when testing cannot continue such as a critical module failing to compile or a necessary external utility not being available such as a database connection failing. The test will exit with 255. For even better control look at L<Test::Most>. =cut sub BAIL_OUT { my $reason = shift; my $tb = Test::More->builder; $tb->BAIL_OUT($reason); } =back =head2 Discouraged comparison functions The use of the following functions is discouraged as they are not actually testing functions and produce no diagnostics to help figure out what went wrong. They were written before C<is_deeply()> existed because I couldn't figure out how to display a useful diff of two arbitrary data structures. These functions are usually used inside an C<ok()>. ok( eq_array(\@got, \@expected) ); C<is_deeply()> can do that better and with diagnostics. is_deeply( \@got, \@expected ); They may be deprecated in future versions. =over 4 =item B<eq_array> my $is_eq = eq_array(\@got, \@expected); Checks if two arrays are equivalent. This is a deep check, so multi-level structures are handled correctly. =cut #'# sub eq_array { local @Data_Stack = (); _deep_check(@_); } sub _eq_array { my( $a1, $a2 ) = @_; if( grep _type($_) ne 'ARRAY', $a1, $a2 ) { warn "eq_array passed a non-array ref"; return 0; } return 1 if $a1 eq $a2; my $ok = 1; my $max = $#$a1 > $#$a2 ? $#$a1 : $#$a2; for( 0 .. $max ) { my $e1 = $_ > $#$a1 ? $DNE : $a1->[$_]; my $e2 = $_ > $#$a2 ? $DNE : $a2->[$_]; next if _equal_nonrefs($e1, $e2); push @Data_Stack, { type => 'ARRAY', idx => $_, vals => [ $e1, $e2 ] }; $ok = _deep_check( $e1, $e2 ); pop @Data_Stack if $ok; last unless $ok; } return $ok; } sub _equal_nonrefs { my( $e1, $e2 ) = @_; return if ref $e1 or ref $e2; if ( defined $e1 ) { return 1 if defined $e2 and $e1 eq $e2; } else { return 1 if !defined $e2; } return; } sub _deep_check { my( $e1, $e2 ) = @_; my $tb = Test::More->builder; my $ok = 0; # Effectively turn %Refs_Seen into a stack. This avoids picking up # the same referenced used twice (such as [\$a, \$a]) to be considered # circular. local %Refs_Seen = %Refs_Seen; { $tb->_unoverload_str( \$e1, \$e2 ); # Either they're both references or both not. my $same_ref = !( !ref $e1 xor !ref $e2 ); my $not_ref = ( !ref $e1 and !ref $e2 ); if( defined $e1 xor defined $e2 ) { $ok = 0; } elsif( !defined $e1 and !defined $e2 ) { # Shortcut if they're both undefined. $ok = 1; } elsif( _dne($e1) xor _dne($e2) ) { $ok = 0; } elsif( $same_ref and( $e1 eq $e2 ) ) { $ok = 1; } elsif($not_ref) { push @Data_Stack, { type => '', vals => [ $e1, $e2 ] }; $ok = 0; } else { if( $Refs_Seen{$e1} ) { return $Refs_Seen{$e1} eq $e2; } else { $Refs_Seen{$e1} = "$e2"; } my $type = _type($e1); $type = 'DIFFERENT' unless _type($e2) eq $type; if( $type eq 'DIFFERENT' ) { push @Data_Stack, { type => $type, vals => [ $e1, $e2 ] }; $ok = 0; } elsif( $type eq 'ARRAY' ) { $ok = _eq_array( $e1, $e2 ); } elsif( $type eq 'HASH' ) { $ok = _eq_hash( $e1, $e2 ); } elsif( $type eq 'REF' ) { push @Data_Stack, { type => $type, vals => [ $e1, $e2 ] }; $ok = _deep_check( $$e1, $$e2 ); pop @Data_Stack if $ok; } elsif( $type eq 'SCALAR' ) { push @Data_Stack, { type => 'REF', vals => [ $e1, $e2 ] }; $ok = _deep_check( $$e1, $$e2 ); pop @Data_Stack if $ok; } elsif($type) { push @Data_Stack, { type => $type, vals => [ $e1, $e2 ] }; $ok = 0; } else { _whoa( 1, "No type in _deep_check" ); } } } return $ok; } sub _whoa { my( $check, $desc ) = @_; if($check) { die <<"WHOA"; WHOA! $desc This should never happen! Please contact the author immediately! WHOA } } =item B<eq_hash> my $is_eq = eq_hash(\%got, \%expected); Determines if the two hashes contain the same keys and values. This is a deep check. =cut sub eq_hash { local @Data_Stack = (); return _deep_check(@_); } sub _eq_hash { my( $a1, $a2 ) = @_; if( grep _type($_) ne 'HASH', $a1, $a2 ) { warn "eq_hash passed a non-hash ref"; return 0; } return 1 if $a1 eq $a2; my $ok = 1; my $bigger = keys %$a1 > keys %$a2 ? $a1 : $a2; foreach my $k ( keys %$bigger ) { my $e1 = exists $a1->{$k} ? $a1->{$k} : $DNE; my $e2 = exists $a2->{$k} ? $a2->{$k} : $DNE; next if _equal_nonrefs($e1, $e2); push @Data_Stack, { type => 'HASH', idx => $k, vals => [ $e1, $e2 ] }; $ok = _deep_check( $e1, $e2 ); pop @Data_Stack if $ok; last unless $ok; } return $ok; } =item B<eq_set> my $is_eq = eq_set(\@got, \@expected); Similar to C<eq_array()>, except the order of the elements is B<not> important. This is a deep check, but the irrelevancy of order only applies to the top level. ok( eq_set(\@got, \@expected) ); Is better written: is_deeply( [sort @got], [sort @expected] ); B<NOTE> By historical accident, this is not a true set comparison. While the order of elements does not matter, duplicate elements do. B<NOTE> C<eq_set()> does not know how to deal with references at the top level. The following is an example of a comparison which might not work: eq_set([\1, \2], [\2, \1]); L<Test::Deep> contains much better set comparison functions. =cut sub eq_set { my( $a1, $a2 ) = @_; return 0 unless @$a1 == @$a2; no warnings 'uninitialized'; # It really doesn't matter how we sort them, as long as both arrays are # sorted with the same algorithm. # # Ensure that references are not accidentally treated the same as a # string containing the reference. # # Have to inline the sort routine due to a threading/sort bug. # See [rt.cpan.org 6782] # # I don't know how references would be sorted so we just don't sort # them. This means eq_set doesn't really work with refs. return eq_array( [ grep( ref, @$a1 ), sort( grep( !ref, @$a1 ) ) ], [ grep( ref, @$a2 ), sort( grep( !ref, @$a2 ) ) ], ); } =back =head2 Extending and Embedding Test::More Sometimes the Test::More interface isn't quite enough. Fortunately, Test::More is built on top of L<Test::Builder> which provides a single, unified backend for any test library to use. This means two test libraries which both use <Test::Builder> B<can> be used together in the same program>. If you simply want to do a little tweaking of how the tests behave, you can access the underlying L<Test::Builder> object like so: =over 4 =item B<builder> my $test_builder = Test::More->builder; Returns the L<Test::Builder> object underlying Test::More for you to play with. =back =head1 EXIT CODES If all your tests passed, L<Test::Builder> will exit with zero (which is normal). If anything failed it will exit with how many failed. If you run less (or more) tests than you planned, the missing (or extras) will be considered failures. If no tests were ever run L<Test::Builder> will throw a warning and exit with 255. If the test died, even after having successfully completed all its tests, it will still be considered a failure and will exit with 255. So the exit codes are... 0 all tests successful 255 test died or all passed but wrong # of tests run any other number how many failed (including missing or extras) If you fail more than 254 tests, it will be reported as 254. B<NOTE> This behavior may go away in future versions. =head1 COMPATIBILITY Test::More works with Perls as old as 5.8.1. Thread support is not very reliable before 5.10.1, but that's because threads are not very reliable before 5.10.1. Although Test::More has been a core module in versions of Perl since 5.6.2, Test::More has evolved since then, and not all of the features you're used to will be present in the shipped version of Test::More. If you are writing a module, don't forget to indicate in your package metadata the minimum version of Test::More that you require. For instance, if you want to use C<done_testing()> but want your test script to run on Perl 5.10.0, you will need to explicitly require Test::More > 0.88. Key feature milestones include: =over 4 =item subtests Subtests were released in Test::More 0.94, which came with Perl 5.12.0. Subtests did not implicitly call C<done_testing()> until 0.96; the first Perl with that fix was Perl 5.14.0 with 0.98. =item C<done_testing()> This was released in Test::More 0.88 and first shipped with Perl in 5.10.1 as part of Test::More 0.92. =item C<cmp_ok()> Although C<cmp_ok()> was introduced in 0.40, 0.86 fixed an important bug to make it safe for overloaded objects; the fixed first shipped with Perl in 5.10.1 as part of Test::More 0.92. =item C<new_ok()> C<note()> and C<explain()> These were was released in Test::More 0.82, and first shipped with Perl in 5.10.1 as part of Test::More 0.92. =back There is a full version history in the Changes file, and the Test::More versions included as core can be found using L<Module::CoreList>: $ corelist -a Test::More =head1 CAVEATS and NOTES =over 4 =item utf8 / "Wide character in print" If you use utf8 or other non-ASCII characters with Test::More you might get a "Wide character in print" warning. Using C<< binmode STDOUT, ":utf8" >> will not fix it. L<Test::Builder> (which powers Test::More) duplicates STDOUT and STDERR. So any changes to them, including changing their output disciplines, will not be seen by Test::More. One work around is to apply encodings to STDOUT and STDERR as early as possible and before Test::More (or any other Test module) loads. use open ':std', ':encoding(utf8)'; use Test::More; A more direct work around is to change the filehandles used by L<Test::Builder>. my $builder = Test::More->builder; binmode $builder->output, ":encoding(utf8)"; binmode $builder->failure_output, ":encoding(utf8)"; binmode $builder->todo_output, ":encoding(utf8)"; =item Overloaded objects String overloaded objects are compared B<as strings> (or in C<cmp_ok()>'s case, strings or numbers as appropriate to the comparison op). This prevents Test::More from piercing an object's interface allowing better blackbox testing. So if a function starts returning overloaded objects instead of bare strings your tests won't notice the difference. This is good. However, it does mean that functions like C<is_deeply()> cannot be used to test the internals of string overloaded objects. In this case I would suggest L<Test::Deep> which contains more flexible testing functions for complex data structures. =item Threads Test::More will only be aware of threads if C<use threads> has been done I<before> Test::More is loaded. This is ok: use threads; use Test::More; This may cause problems: use Test::More use threads; 5.8.1 and above are supported. Anything below that has too many bugs. =back =head1 HISTORY This is a case of convergent evolution with Joshua Pritikin's L<Test> module. I was largely unaware of its existence when I'd first written my own C<ok()> routines. This module exists because I can't figure out how to easily wedge test names into Test's interface (along with a few other problems). The goal here is to have a testing utility that's simple to learn, quick to use and difficult to trip yourself up with while still providing more flexibility than the existing Test.pm. As such, the names of the most common routines are kept tiny, special cases and magic side-effects are kept to a minimum. WYSIWYG. =head1 SEE ALSO =head2 =head2 ALTERNATIVES L<Test2::Suite> is the most recent and modern set of tools for testing. L<Test::Simple> if all this confuses you and you just want to write some tests. You can upgrade to Test::More later (it's forward compatible). L<Test::Legacy> tests written with Test.pm, the original testing module, do not play well with other testing libraries. Test::Legacy emulates the Test.pm interface and does play well with others. =head2 ADDITIONAL LIBRARIES L<Test::Differences> for more ways to test complex data structures. And it plays well with Test::More. L<Test::Class> is like xUnit but more perlish. L<Test::Deep> gives you more powerful complex data structure testing. L<Test::Inline> shows the idea of embedded testing. L<Mock::Quick> The ultimate mocking library. Easily spawn objects defined on the fly. Can also override, block, or reimplement packages as needed. L<Test::FixtureBuilder> Quickly define fixture data for unit tests. =head2 OTHER COMPONENTS L<Test::Harness> is the test runner and output interpreter for Perl. It's the thing that powers C<make test> and where the C<prove> utility comes from. =head2 BUNDLES L<Test::Most> Most commonly needed test functions and features. =head1 AUTHORS Michael G Schwern E<lt>schwern@pobox.comE<gt> with much inspiration from Joshua Pritikin's Test module and lots of help from Barrie Slaymaker, Tony Bowden, blackstar.co.uk, chromatic, Fergal Daly and the perl-qa gang. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 BUGS See F<https://github.com/Test-More/test-more/issues> to report and view bugs. =head1 SOURCE The source code repository for Test::More can be found at F<http://github.com/Test-More/test-more/>. =head1 COPYRIGHT Copyright 2001-2008 by Michael G Schwern E<lt>schwern@pobox.comE<gt>. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See F<http://www.perl.com/perl/misc/Artistic.html> =cut 1; Tester/Capture.pm 0000644 00000010510 15137540314 0007753 0 ustar 00 use strict; package Test::Tester::Capture; our $VERSION = '1.302175'; use Test::Builder; use vars qw( @ISA ); @ISA = qw( Test::Builder ); # Make Test::Tester::Capture thread-safe for ithreads. BEGIN { use Config; *share = sub { 0 }; *lock = sub { 0 }; } my $Curr_Test = 0; share($Curr_Test); my @Test_Results = (); share(@Test_Results); my $Prem_Diag = {diag => ""}; share($Curr_Test); sub new { # Test::Tester::Capgture::new used to just return __PACKAGE__ # because Test::Builder::new enforced its singleton nature by # return __PACKAGE__. That has since changed, Test::Builder::new now # returns a blessed has and around version 0.78, Test::Builder::todo # started wanting to modify $self. To cope with this, we now return # a blessed hash. This is a short-term hack, the correct thing to do # is to detect which style of Test::Builder we're dealing with and # act appropriately. my $class = shift; return bless {}, $class; } sub ok { my($self, $test, $name) = @_; my $ctx = $self->ctx; # $test might contain an object which we don't want to accidentally # store, so we turn it into a boolean. $test = $test ? 1 : 0; lock $Curr_Test; $Curr_Test++; my($pack, $file, $line) = $self->caller; my $todo = $self->todo(); my $result = {}; share($result); unless( $test ) { @$result{ 'ok', 'actual_ok' } = ( ( $todo ? 1 : 0 ), 0 ); } else { @$result{ 'ok', 'actual_ok' } = ( 1, $test ); } if( defined $name ) { $name =~ s|#|\\#|g; # # in a name can confuse Test::Harness. $result->{name} = $name; } else { $result->{name} = ''; } if( $todo ) { my $what_todo = $todo; $result->{reason} = $what_todo; $result->{type} = 'todo'; } else { $result->{reason} = ''; $result->{type} = ''; } $Test_Results[$Curr_Test-1] = $result; unless( $test ) { my $msg = $todo ? "Failed (TODO)" : "Failed"; $result->{fail_diag} = (" $msg test ($file at line $line)\n"); } $result->{diag} = ""; $result->{_level} = $Test::Builder::Level; $result->{_depth} = Test::Tester::find_run_tests(); $ctx->release; return $test ? 1 : 0; } sub skip { my($self, $why) = @_; $why ||= ''; my $ctx = $self->ctx; lock($Curr_Test); $Curr_Test++; my %result; share(%result); %result = ( 'ok' => 1, actual_ok => 1, name => '', type => 'skip', reason => $why, diag => "", _level => $Test::Builder::Level, _depth => Test::Tester::find_run_tests(), ); $Test_Results[$Curr_Test-1] = \%result; $ctx->release; return 1; } sub todo_skip { my($self, $why) = @_; $why ||= ''; my $ctx = $self->ctx; lock($Curr_Test); $Curr_Test++; my %result; share(%result); %result = ( 'ok' => 1, actual_ok => 0, name => '', type => 'todo_skip', reason => $why, diag => "", _level => $Test::Builder::Level, _depth => Test::Tester::find_run_tests(), ); $Test_Results[$Curr_Test-1] = \%result; $ctx->release; return 1; } sub diag { my($self, @msgs) = @_; return unless @msgs; # Prevent printing headers when compiling (i.e. -c) return if $^C; my $ctx = $self->ctx; # Escape each line with a #. foreach (@msgs) { $_ = 'undef' unless defined; } push @msgs, "\n" unless $msgs[-1] =~ /\n\Z/; my $result = $Curr_Test ? $Test_Results[$Curr_Test - 1] : $Prem_Diag; $result->{diag} .= join("", @msgs); $ctx->release; return 0; } sub details { return @Test_Results; } # Stub. Feel free to send me a patch to implement this. sub note { } sub explain { return Test::Builder::explain(@_); } sub premature { return $Prem_Diag->{diag}; } sub current_test { if (@_ > 1) { die "Don't try to change the test number!"; } else { return $Curr_Test; } } sub reset { $Curr_Test = 0; @Test_Results = (); $Prem_Diag = {diag => ""}; } 1; __END__ =head1 NAME Test::Tester::Capture - Help testing test modules built with Test::Builder =head1 DESCRIPTION This is a subclass of Test::Builder that overrides many of the methods so that they don't output anything. It also keeps track of its own set of test results so that you can use Test::Builder based modules to perform tests on other Test::Builder based modules. =head1 AUTHOR Most of the code here was lifted straight from Test::Builder and then had chunks removed by Fergal Daly <fergal@esatclear.ie>. =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut Tester/CaptureRunner.pm 0000644 00000002426 15137540314 0011154 0 ustar 00 # $Header: /home/fergal/my/cvs/Test-Tester/lib/Test/Tester/CaptureRunner.pm,v 1.3 2003/03/05 01:07:55 fergal Exp $ use strict; package Test::Tester::CaptureRunner; our $VERSION = '1.302175'; use Test::Tester::Capture; require Exporter; sub new { my $pkg = shift; my $self = bless {}, $pkg; return $self; } sub run_tests { my $self = shift; my $test = shift; capture()->reset; $self->{StartLevel} = $Test::Builder::Level; &$test(); } sub get_results { my $self = shift; my @results = capture()->details; my $start = $self->{StartLevel}; foreach my $res (@results) { next if defined $res->{depth}; my $depth = $res->{_depth} - $res->{_level} - $start - 3; # print "my $depth = $res->{_depth} - $res->{_level} - $start - 1\n"; $res->{depth} = $depth; } return @results; } sub get_premature { return capture()->premature; } sub capture { return Test::Tester::Capture->new; } __END__ =head1 NAME Test::Tester::CaptureRunner - Help testing test modules built with Test::Builder =head1 DESCRIPTION This stuff if needed to allow me to play with other ways of monitoring the test results. =head1 AUTHOR Copyright 2003 by Fergal Daly <fergal@esatclear.ie>. =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut Tester/Delegate.pm 0000644 00000001073 15137540314 0010066 0 ustar 00 use strict; use warnings; package Test::Tester::Delegate; our $VERSION = '1.302175'; use Scalar::Util(); use vars '$AUTOLOAD'; sub new { my $pkg = shift; my $obj = shift; my $self = bless {}, $pkg; return $self; } sub AUTOLOAD { my ($sub) = $AUTOLOAD =~ /.*::(.*?)$/; return if $sub eq "DESTROY"; my $obj = $_[0]->{Object}; my $ref = $obj->can($sub); shift(@_); unshift(@_, $obj); goto &$ref; } sub can { my $this = shift; my ($sub) = @_; return $this->{Object}->can($sub) if Scalar::Util::blessed($this); return $this->SUPER::can(@_); } 1; Tutorial.pod 0000644 00000045622 15137540314 0007067 0 ustar 00 =head1 NAME Test::Tutorial - A tutorial about writing really basic tests =head1 DESCRIPTION I<AHHHHHHH!!!! NOT TESTING! Anything but testing! Beat me, whip me, send me to Detroit, but don't make me write tests!> I<*sob*> I<Besides, I don't know how to write the damned things.> Is this you? Is writing tests right up there with writing documentation and having your fingernails pulled out? Did you open up a test and read ######## We start with some black magic and decide that's quite enough for you? It's ok. That's all gone now. We've done all the black magic for you. And here are the tricks... =head2 Nuts and bolts of testing. Here's the most basic test program. #!/usr/bin/perl -w print "1..1\n"; print 1 + 1 == 2 ? "ok 1\n" : "not ok 1\n"; Because 1 + 1 is 2, it prints: 1..1 ok 1 What this says is: C<1..1> "I'm going to run one test." [1] C<ok 1> "The first test passed". And that's about all magic there is to testing. Your basic unit of testing is the I<ok>. For each thing you test, an C<ok> is printed. Simple. L<Test::Harness> interprets your test results to determine if you succeeded or failed (more on that later). Writing all these print statements rapidly gets tedious. Fortunately, there's L<Test::Simple>. It has one function, C<ok()>. #!/usr/bin/perl -w use Test::Simple tests => 1; ok( 1 + 1 == 2 ); That does the same thing as the previous code. C<ok()> is the backbone of Perl testing, and we'll be using it instead of roll-your-own from here on. If C<ok()> gets a true value, the test passes. False, it fails. #!/usr/bin/perl -w use Test::Simple tests => 2; ok( 1 + 1 == 2 ); ok( 2 + 2 == 5 ); From that comes: 1..2 ok 1 not ok 2 # Failed test (test.pl at line 5) # Looks like you failed 1 tests of 2. C<1..2> "I'm going to run two tests." This number is a I<plan>. It helps to ensure your test program ran all the way through and didn't die or skip some tests. C<ok 1> "The first test passed." C<not ok 2> "The second test failed". Test::Simple helpfully prints out some extra commentary about your tests. It's not scary. Come, hold my hand. We're going to give an example of testing a module. For our example, we'll be testing a date library, L<Date::ICal>. It's on CPAN, so download a copy and follow along. [2] =head2 Where to start? This is the hardest part of testing, where do you start? People often get overwhelmed at the apparent enormity of the task of testing a whole module. The best place to start is at the beginning. L<Date::ICal> is an object-oriented module, and that means you start by making an object. Test C<new()>. #!/usr/bin/perl -w # assume these two lines are in all subsequent examples use strict; use warnings; use Test::Simple tests => 2; use Date::ICal; my $ical = Date::ICal->new; # create an object ok( defined $ical ); # check that we got something ok( $ical->isa('Date::ICal') ); # and it's the right class Run that and you should get: 1..2 ok 1 ok 2 Congratulations! You've written your first useful test. =head2 Names That output isn't terribly descriptive, is it? When you have two tests you can figure out which one is #2, but what if you have 102 tests? Each test can be given a little descriptive name as the second argument to C<ok()>. use Test::Simple tests => 2; ok( defined $ical, 'new() returned something' ); ok( $ical->isa('Date::ICal'), " and it's the right class" ); Now you'll see: 1..2 ok 1 - new() returned something ok 2 - and it's the right class =head2 Test the manual The simplest way to build up a decent testing suite is to just test what the manual says it does. [3] Let's pull something out of the L<Date::ICal/SYNOPSIS> and test that all its bits work. #!/usr/bin/perl -w use Test::Simple tests => 8; use Date::ICal; $ical = Date::ICal->new( year => 1964, month => 10, day => 16, hour => 16, min => 12, sec => 47, tz => '0530' ); ok( defined $ical, 'new() returned something' ); ok( $ical->isa('Date::ICal'), " and it's the right class" ); ok( $ical->sec == 47, ' sec()' ); ok( $ical->min == 12, ' min()' ); ok( $ical->hour == 16, ' hour()' ); ok( $ical->day == 17, ' day()' ); ok( $ical->month == 10, ' month()' ); ok( $ical->year == 1964, ' year()' ); Run that and you get: 1..8 ok 1 - new() returned something ok 2 - and it's the right class ok 3 - sec() ok 4 - min() ok 5 - hour() not ok 6 - day() # Failed test (- at line 16) ok 7 - month() ok 8 - year() # Looks like you failed 1 tests of 8. Whoops, a failure! [4] L<Test::Simple> helpfully lets us know on what line the failure occurred, but not much else. We were supposed to get 17, but we didn't. What did we get?? Dunno. You could re-run the test in the debugger or throw in some print statements to find out. Instead, switch from L<Test::Simple> to L<Test::More>. L<Test::More> does everything L<Test::Simple> does, and more! In fact, L<Test::More> does things I<exactly> the way L<Test::Simple> does. You can literally swap L<Test::Simple> out and put L<Test::More> in its place. That's just what we're going to do. L<Test::More> does more than L<Test::Simple>. The most important difference at this point is it provides more informative ways to say "ok". Although you can write almost any test with a generic C<ok()>, it can't tell you what went wrong. The C<is()> function lets us declare that something is supposed to be the same as something else: use Test::More tests => 8; use Date::ICal; $ical = Date::ICal->new( year => 1964, month => 10, day => 16, hour => 16, min => 12, sec => 47, tz => '0530' ); ok( defined $ical, 'new() returned something' ); ok( $ical->isa('Date::ICal'), " and it's the right class" ); is( $ical->sec, 47, ' sec()' ); is( $ical->min, 12, ' min()' ); is( $ical->hour, 16, ' hour()' ); is( $ical->day, 17, ' day()' ); is( $ical->month, 10, ' month()' ); is( $ical->year, 1964, ' year()' ); "Is C<< $ical->sec >> 47?" "Is C<< $ical->min >> 12?" With C<is()> in place, you get more information: 1..8 ok 1 - new() returned something ok 2 - and it's the right class ok 3 - sec() ok 4 - min() ok 5 - hour() not ok 6 - day() # Failed test (- at line 16) # got: '16' # expected: '17' ok 7 - month() ok 8 - year() # Looks like you failed 1 tests of 8. Aha. C<< $ical->day >> returned 16, but we expected 17. A quick check shows that the code is working fine, we made a mistake when writing the tests. Change it to: is( $ical->day, 16, ' day()' ); ... and everything works. Any time you're doing a "this equals that" sort of test, use C<is()>. It even works on arrays. The test is always in scalar context, so you can test how many elements are in an array this way. [5] is( @foo, 5, 'foo has 5 elements' ); =head2 Sometimes the tests are wrong This brings up a very important lesson. Code has bugs. Tests are code. Ergo, tests have bugs. A failing test could mean a bug in the code, but don't discount the possibility that the test is wrong. On the flip side, don't be tempted to prematurely declare a test incorrect just because you're having trouble finding the bug. Invalidating a test isn't something to be taken lightly, and don't use it as a cop out to avoid work. =head2 Testing lots of values We're going to be wanting to test a lot of dates here, trying to trick the code with lots of different edge cases. Does it work before 1970? After 2038? Before 1904? Do years after 10,000 give it trouble? Does it get leap years right? We could keep repeating the code above, or we could set up a little try/expect loop. use Test::More tests => 32; use Date::ICal; my %ICal_Dates = ( # An ICal string And the year, month, day # hour, minute and second we expect. '19971024T120000' => # from the docs. [ 1997, 10, 24, 12, 0, 0 ], '20390123T232832' => # after the Unix epoch [ 2039, 1, 23, 23, 28, 32 ], '19671225T000000' => # before the Unix epoch [ 1967, 12, 25, 0, 0, 0 ], '18990505T232323' => # before the MacOS epoch [ 1899, 5, 5, 23, 23, 23 ], ); while( my($ical_str, $expect) = each %ICal_Dates ) { my $ical = Date::ICal->new( ical => $ical_str ); ok( defined $ical, "new(ical => '$ical_str')" ); ok( $ical->isa('Date::ICal'), " and it's the right class" ); is( $ical->year, $expect->[0], ' year()' ); is( $ical->month, $expect->[1], ' month()' ); is( $ical->day, $expect->[2], ' day()' ); is( $ical->hour, $expect->[3], ' hour()' ); is( $ical->min, $expect->[4], ' min()' ); is( $ical->sec, $expect->[5], ' sec()' ); } Now we can test bunches of dates by just adding them to C<%ICal_Dates>. Now that it's less work to test with more dates, you'll be inclined to just throw more in as you think of them. Only problem is, every time we add to that we have to keep adjusting the C<< use Test::More tests => ## >> line. That can rapidly get annoying. There are ways to make this work better. First, we can calculate the plan dynamically using the C<plan()> function. use Test::More; use Date::ICal; my %ICal_Dates = ( ...same as before... ); # For each key in the hash we're running 8 tests. plan tests => keys(%ICal_Dates) * 8; ...and then your tests... To be even more flexible, use C<done_testing>. This means we're just running some tests, don't know how many. [6] use Test::More; # instead of tests => 32 ... # tests here done_testing(); # reached the end safely If you don't specify a plan, L<Test::More> expects to see C<done_testing()> before your program exits. It will warn you if you forget it. You can give C<done_testing()> an optional number of tests you expected to run, and if the number ran differs, L<Test::More> will give you another kind of warning. =head2 Informative names Take a look at the line: ok( defined $ical, "new(ical => '$ical_str')" ); We've added more detail about what we're testing and the ICal string itself we're trying out to the name. So you get results like: ok 25 - new(ical => '19971024T120000') ok 26 - and it's the right class ok 27 - year() ok 28 - month() ok 29 - day() ok 30 - hour() ok 31 - min() ok 32 - sec() If something in there fails, you'll know which one it was and that will make tracking down the problem easier. Try to put a bit of debugging information into the test names. Describe what the tests test, to make debugging a failed test easier for you or for the next person who runs your test. =head2 Skipping tests Poking around in the existing L<Date::ICal> tests, I found this in F<t/01sanity.t> [7] #!/usr/bin/perl -w use Test::More tests => 7; use Date::ICal; # Make sure epoch time is being handled sanely. my $t1 = Date::ICal->new( epoch => 0 ); is( $t1->epoch, 0, "Epoch time of 0" ); # XXX This will only work on unix systems. is( $t1->ical, '19700101Z', " epoch to ical" ); is( $t1->year, 1970, " year()" ); is( $t1->month, 1, " month()" ); is( $t1->day, 1, " day()" ); # like the tests above, but starting with ical instead of epoch my $t2 = Date::ICal->new( ical => '19700101Z' ); is( $t2->ical, '19700101Z', "Start of epoch in ICal notation" ); is( $t2->epoch, 0, " and back to ICal" ); The beginning of the epoch is different on most non-Unix operating systems [8]. Even though Perl smooths out the differences for the most part, certain ports do it differently. MacPerl is one off the top of my head. [9] Rather than putting a comment in the test and hoping someone will read the test while debugging the failure, we can explicitly say it's never going to work and skip the test. use Test::More tests => 7; use Date::ICal; # Make sure epoch time is being handled sanely. my $t1 = Date::ICal->new( epoch => 0 ); is( $t1->epoch, 0, "Epoch time of 0" ); SKIP: { skip('epoch to ICal not working on Mac OS', 6) if $^O eq 'MacOS'; is( $t1->ical, '19700101Z', " epoch to ical" ); is( $t1->year, 1970, " year()" ); is( $t1->month, 1, " month()" ); is( $t1->day, 1, " day()" ); # like the tests above, but starting with ical instead of epoch my $t2 = Date::ICal->new( ical => '19700101Z' ); is( $t2->ical, '19700101Z', "Start of epoch in ICal notation" ); is( $t2->epoch, 0, " and back to ICal" ); } A little bit of magic happens here. When running on anything but MacOS, all the tests run normally. But when on MacOS, C<skip()> causes the entire contents of the SKIP block to be jumped over. It never runs. Instead, C<skip()> prints special output that tells L<Test::Harness> that the tests have been skipped. 1..7 ok 1 - Epoch time of 0 ok 2 # skip epoch to ICal not working on MacOS ok 3 # skip epoch to ICal not working on MacOS ok 4 # skip epoch to ICal not working on MacOS ok 5 # skip epoch to ICal not working on MacOS ok 6 # skip epoch to ICal not working on MacOS ok 7 # skip epoch to ICal not working on MacOS This means your tests won't fail on MacOS. This means fewer emails from MacPerl users telling you about failing tests that you know will never work. You've got to be careful with skip tests. These are for tests which don't work and I<never will>. It is not for skipping genuine bugs (we'll get to that in a moment). The tests are wholly and completely skipped. [10] This will work. SKIP: { skip("I don't wanna die!"); die, die, die, die, die; } =head2 Todo tests While thumbing through the L<Date::ICal> man page, I came across this: ical $ical_string = $ical->ical; Retrieves, or sets, the date on the object, using any valid ICal date/time string. "Retrieves or sets". Hmmm. I didn't see a test for using C<ical()> to set the date in the Date::ICal test suite. So I wrote one: use Test::More tests => 1; use Date::ICal; my $ical = Date::ICal->new; $ical->ical('20201231Z'); is( $ical->ical, '20201231Z', 'Setting via ical()' ); Run that. I saw: 1..1 not ok 1 - Setting via ical() # Failed test (- at line 6) # got: '20010814T233649Z' # expected: '20201231Z' # Looks like you failed 1 tests of 1. Whoops! Looks like it's unimplemented. Assume you don't have the time to fix this. [11] Normally, you'd just comment out the test and put a note in a todo list somewhere. Instead, explicitly state "this test will fail" by wrapping it in a C<TODO> block: use Test::More tests => 1; TODO: { local $TODO = 'ical($ical) not yet implemented'; my $ical = Date::ICal->new; $ical->ical('20201231Z'); is( $ical->ical, '20201231Z', 'Setting via ical()' ); } Now when you run, it's a little different: 1..1 not ok 1 - Setting via ical() # TODO ical($ical) not yet implemented # got: '20010822T201551Z' # expected: '20201231Z' L<Test::More> doesn't say "Looks like you failed 1 tests of 1". That '# TODO' tells L<Test::Harness> "this is supposed to fail" and it treats a failure as a successful test. You can write tests even before you've fixed the underlying code. If a TODO test passes, L<Test::Harness> will report it "UNEXPECTEDLY SUCCEEDED". When that happens, remove the TODO block with C<local $TODO> and turn it into a real test. =head2 Testing with taint mode. Taint mode is a funny thing. It's the globalest of all global features. Once you turn it on, it affects I<all> code in your program and I<all> modules used (and all the modules they use). If a single piece of code isn't taint clean, the whole thing explodes. With that in mind, it's very important to ensure your module works under taint mode. It's very simple to have your tests run under taint mode. Just throw a C<-T> into the C<#!> line. L<Test::Harness> will read the switches in C<#!> and use them to run your tests. #!/usr/bin/perl -Tw ...test normally here... When you say C<make test> it will run with taint mode on. =head1 FOOTNOTES =over 4 =item 1 The first number doesn't really mean anything, but it has to be 1. It's the second number that's important. =item 2 For those following along at home, I'm using version 1.31. It has some bugs, which is good -- we'll uncover them with our tests. =item 3 You can actually take this one step further and test the manual itself. Have a look at L<Test::Inline> (formerly L<Pod::Tests>). =item 4 Yes, there's a mistake in the test suite. What! Me, contrived? =item 5 We'll get to testing the contents of lists later. =item 6 But what happens if your test program dies halfway through?! Since we didn't say how many tests we're going to run, how can we know it failed? No problem, L<Test::More> employs some magic to catch that death and turn the test into a failure, even if every test passed up to that point. =item 7 I cleaned it up a little. =item 8 Most Operating Systems record time as the number of seconds since a certain date. This date is the beginning of the epoch. Unix's starts at midnight January 1st, 1970 GMT. =item 9 MacOS's epoch is midnight January 1st, 1904. VMS's is midnight, November 17th, 1858, but vmsperl emulates the Unix epoch so it's not a problem. =item 10 As long as the code inside the SKIP block at least compiles. Please don't ask how. No, it's not a filter. =item 11 Do NOT be tempted to use TODO tests as a way to avoid fixing simple bugs! =back =head1 AUTHORS Michael G Schwern E<lt>schwern@pobox.comE<gt> and the perl-qa dancers! =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 COPYRIGHT Copyright 2001 by Michael G Schwern E<lt>schwern@pobox.comE<gt>. This documentation is free; you can redistribute it and/or modify it under the same terms as Perl itself. Irrespective of its distribution, all code examples in these files are hereby placed into the public domain. You are permitted and encouraged to use this code in your own programs for fun or for profit as you see fit. A simple comment in the code giving credit would be courteous but is not required. =cut Builder.pm 0000644 00000174125 15137540314 0006505 0 ustar 00 package Test::Builder; use 5.006; use strict; use warnings; our $VERSION = '1.302175'; BEGIN { if( $] < 5.008 ) { require Test::Builder::IO::Scalar; } } use Scalar::Util qw/blessed reftype weaken/; use Test2::Util qw/USE_THREADS try get_tid/; use Test2::API qw/context release/; # Make Test::Builder thread-safe for ithreads. BEGIN { warn "Test::Builder was loaded after Test2 initialization, this is not recommended." if Test2::API::test2_init_done() || Test2::API::test2_load_done(); if (USE_THREADS && ! Test2::API::test2_ipc_disabled()) { require Test2::IPC; require Test2::IPC::Driver::Files; Test2::IPC::Driver::Files->import; Test2::API::test2_ipc_enable_polling(); Test2::API::test2_no_wait(1); } } use Test2::Event::Subtest; use Test2::Hub::Subtest; use Test::Builder::Formatter; use Test::Builder::TodoDiag; our $Level = 1; our $Test = $ENV{TB_NO_EARLY_INIT} ? undef : Test::Builder->new; sub _add_ts_hooks { my $self = shift; my $hub = $self->{Stack}->top; # Take a reference to the hash key, we do this to avoid closing over $self # which is the singleton. We use a reference because the value could change # in rare cases. my $epkgr = \$self->{Exported_To}; #$hub->add_context_aquire(sub {$_[0]->{level} += $Level - 1}); $hub->pre_filter(sub { my ($active_hub, $e) = @_; my $epkg = $$epkgr; my $cpkg = $e->{trace} ? $e->{trace}->{frame}->[0] : undef; no strict 'refs'; no warnings 'once'; my $todo; $todo = ${"$cpkg\::TODO"} if $cpkg; $todo = ${"$epkg\::TODO"} if $epkg && !$todo; return $e unless defined($todo); return $e unless length($todo); # Turn a diag into a todo diag return Test::Builder::TodoDiag->new(%$e) if ref($e) eq 'Test2::Event::Diag'; $e->set_todo($todo) if $e->can('set_todo'); $e->add_amnesty({tag => 'TODO', details => $todo}); # Set todo on ok's if ($e->isa('Test2::Event::Ok')) { $e->set_effective_pass(1); if (my $result = $e->get_meta(__PACKAGE__)) { $result->{reason} ||= $todo; $result->{type} ||= 'todo'; $result->{ok} = 1; } } return $e; }, inherit => 1); } { no warnings; INIT { use warnings; Test2::API::test2_load() unless Test2::API::test2_in_preload(); } } sub new { my($class) = shift; unless($Test) { $Test = $class->create(singleton => 1); Test2::API::test2_add_callback_post_load( sub { $Test->{Original_Pid} = $$ if !$Test->{Original_Pid} || $Test->{Original_Pid} == 0; $Test->reset(singleton => 1); $Test->_add_ts_hooks; } ); # Non-TB tools normally expect 0 added to the level. $Level is normally 1. So # we only want the level to change if $Level != 1. # TB->ctx compensates for this later. Test2::API::test2_add_callback_context_aquire(sub { $_[0]->{level} += $Level - 1 }); Test2::API::test2_add_callback_exit(sub { $Test->_ending(@_) }); Test2::API::test2_ipc()->set_no_fatal(1) if Test2::API::test2_has_ipc(); } return $Test; } sub create { my $class = shift; my %params = @_; my $self = bless {}, $class; if ($params{singleton}) { $self->{Stack} = Test2::API::test2_stack(); } else { $self->{Stack} = Test2::API::Stack->new; $self->{Stack}->new_hub( formatter => Test::Builder::Formatter->new, ipc => Test2::API::test2_ipc(), ); $self->reset(%params); $self->_add_ts_hooks; } return $self; } sub ctx { my $self = shift; context( # 1 for our frame, another for the -1 off of $Level in our hook at the top. level => 2, fudge => 1, stack => $self->{Stack}, hub => $self->{Hub}, wrapped => 1, @_ ); } sub parent { my $self = shift; my $ctx = $self->ctx; my $chub = $self->{Hub} || $ctx->hub; $ctx->release; my $meta = $chub->meta(__PACKAGE__, {}); my $parent = $meta->{parent}; return undef unless $parent; return bless { Original_Pid => $$, Stack => $self->{Stack}, Hub => $parent, }, blessed($self); } sub child { my( $self, $name ) = @_; $name ||= "Child of " . $self->name; my $ctx = $self->ctx; my $parent = $ctx->hub; my $pmeta = $parent->meta(__PACKAGE__, {}); $self->croak("You already have a child named ($pmeta->{child}) running") if $pmeta->{child}; $pmeta->{child} = $name; # Clear $TODO for the child. my $orig_TODO = $self->find_TODO(undef, 1, undef); my $subevents = []; my $hub = $ctx->stack->new_hub( class => 'Test2::Hub::Subtest', ); $hub->pre_filter(sub { my ($active_hub, $e) = @_; # Turn a diag into a todo diag return Test::Builder::TodoDiag->new(%$e) if ref($e) eq 'Test2::Event::Diag'; return $e; }, inherit => 1) if $orig_TODO; $hub->listen(sub { push @$subevents => $_[1] }); $hub->set_nested( $parent->nested + 1 ); my $meta = $hub->meta(__PACKAGE__, {}); $meta->{Name} = $name; $meta->{TODO} = $orig_TODO; $meta->{TODO_PKG} = $ctx->trace->package; $meta->{parent} = $parent; $meta->{Test_Results} = []; $meta->{subevents} = $subevents; $meta->{subtest_id} = $hub->id; $meta->{subtest_uuid} = $hub->uuid; $meta->{subtest_buffered} = $parent->format ? 0 : 1; $self->_add_ts_hooks; $ctx->release; return bless { Original_Pid => $$, Stack => $self->{Stack}, Hub => $hub, no_log_results => $self->{no_log_results} }, blessed($self); } sub finalize { my $self = shift; my $ok = 1; ($ok) = @_ if @_; my $st_ctx = $self->ctx; my $chub = $self->{Hub} || return $st_ctx->release; my $meta = $chub->meta(__PACKAGE__, {}); if ($meta->{child}) { $self->croak("Can't call finalize() with child ($meta->{child}) active"); } local $? = 0; # don't fail if $subtests happened to set $? nonzero $self->{Stack}->pop($chub); $self->find_TODO($meta->{TODO_PKG}, 1, $meta->{TODO}); my $parent = $self->parent; my $ctx = $parent->ctx; my $trace = $ctx->trace; delete $ctx->hub->meta(__PACKAGE__, {})->{child}; $chub->finalize($trace->snapshot(hid => $chub->hid, nested => $chub->nested), 1) if $ok && $chub->count && !$chub->no_ending && !$chub->ended; my $plan = $chub->plan || 0; my $count = $chub->count; my $failed = $chub->failed; my $passed = $chub->is_passing; my $num_extra = $plan =~ m/\D/ ? 0 : $count - $plan; if ($count && $num_extra != 0) { my $s = $plan == 1 ? '' : 's'; $st_ctx->diag(<<"FAIL"); Looks like you planned $plan test$s but ran $count. FAIL } if ($failed) { my $s = $failed == 1 ? '' : 's'; my $qualifier = $num_extra == 0 ? '' : ' run'; $st_ctx->diag(<<"FAIL"); Looks like you failed $failed test$s of $count$qualifier. FAIL } if (!$passed && !$failed && $count && !$num_extra) { $st_ctx->diag(<<"FAIL"); All assertions inside the subtest passed, but errors were encountered. FAIL } $st_ctx->release; unless ($chub->bailed_out) { my $plan = $chub->plan; if ( $plan && $plan eq 'SKIP' ) { $parent->skip($chub->skip_reason, $meta->{Name}); } elsif ( !$chub->count ) { $parent->ok( 0, sprintf q[No tests run for subtest "%s"], $meta->{Name} ); } else { $parent->{subevents} = $meta->{subevents}; $parent->{subtest_id} = $meta->{subtest_id}; $parent->{subtest_uuid} = $meta->{subtest_uuid}; $parent->{subtest_buffered} = $meta->{subtest_buffered}; $parent->ok( $chub->is_passing, $meta->{Name} ); } } $ctx->release; return $chub->is_passing; } sub subtest { my $self = shift; my ($name, $code, @args) = @_; my $ctx = $self->ctx; $ctx->throw("subtest()'s second argument must be a code ref") unless $code && reftype($code) eq 'CODE'; $name ||= "Child of " . $self->name; $_->($name,$code,@args) for Test2::API::test2_list_pre_subtest_callbacks(); $ctx->note("Subtest: $name"); my $child = $self->child($name); my $start_pid = $$; my $st_ctx; my ($ok, $err, $finished, $child_error); T2_SUBTEST_WRAPPER: { my $ctx = $self->ctx; $st_ctx = $ctx->snapshot; $ctx->release; $ok = eval { local $Level = 1; $code->(@args); 1 }; ($err, $child_error) = ($@, $?); # They might have done 'BEGIN { skip_all => "whatever" }' if (!$ok && $err =~ m/Label not found for "last T2_SUBTEST_WRAPPER"/ || (blessed($err) && blessed($err) eq 'Test::Builder::Exception')) { $ok = undef; $err = undef; } else { $finished = 1; } } if ($start_pid != $$ && !$INC{'Test2/IPC.pm'}) { warn $ok ? "Forked inside subtest, but subtest never finished!\n" : $err; exit 255; } my $trace = $ctx->trace; if (!$finished) { if(my $bailed = $st_ctx->hub->bailed_out) { my $chub = $child->{Hub}; $self->{Stack}->pop($chub); $ctx->bail($bailed->reason); } my $code = $st_ctx->hub->exit_code; $ok = !$code; $err = "Subtest ended with exit code $code" if $code; } my $st_hub = $st_ctx->hub; my $plan = $st_hub->plan; my $count = $st_hub->count; if (!$count && (!defined($plan) || "$plan" ne 'SKIP')) { $st_ctx->plan(0) unless defined $plan; $st_ctx->diag('No tests run!'); } $child->finalize($st_ctx->trace); $ctx->release; die $err unless $ok; $? = $child_error if defined $child_error; return $st_hub->is_passing; } sub name { my $self = shift; my $ctx = $self->ctx; release $ctx, $ctx->hub->meta(__PACKAGE__, {})->{Name}; } sub reset { ## no critic (Subroutines::ProhibitBuiltinHomonyms) my ($self, %params) = @_; Test2::API::test2_unset_is_end(); # We leave this a global because it has to be localized and localizing # hash keys is just asking for pain. Also, it was documented. $Level = 1; $self->{no_log_results} = $ENV{TEST_NO_LOG_RESULTS} ? 1 : 0 unless $params{singleton}; $self->{Original_Pid} = Test2::API::test2_in_preload() ? -1 : $$; my $ctx = $self->ctx; my $hub = $ctx->hub; $ctx->release; unless ($params{singleton}) { $hub->reset_state(); $hub->_tb_reset(); } $ctx = $self->ctx; my $meta = $ctx->hub->meta(__PACKAGE__, {}); %$meta = ( Name => $0, Ending => 0, Done_Testing => undef, Skip_All => 0, Test_Results => [], parent => $meta->{parent}, ); $self->{Exported_To} = undef unless $params{singleton}; $self->{Orig_Handles} ||= do { my $format = $ctx->hub->format; my $out; if ($format && $format->isa('Test2::Formatter::TAP')) { $out = $format->handles; } $out ? [@$out] : []; }; $self->use_numbers(1); $self->no_header(0) unless $params{singleton}; $self->no_ending(0) unless $params{singleton}; $self->reset_outputs; $ctx->release; return; } my %plan_cmds = ( no_plan => \&no_plan, skip_all => \&skip_all, tests => \&_plan_tests, ); sub plan { my( $self, $cmd, $arg ) = @_; return unless $cmd; my $ctx = $self->ctx; my $hub = $ctx->hub; $ctx->throw("You tried to plan twice") if $hub->plan; local $Level = $Level + 1; if( my $method = $plan_cmds{$cmd} ) { local $Level = $Level + 1; $self->$method($arg); } else { my @args = grep { defined } ( $cmd, $arg ); $ctx->throw("plan() doesn't understand @args"); } release $ctx, 1; } sub _plan_tests { my($self, $arg) = @_; my $ctx = $self->ctx; if($arg) { local $Level = $Level + 1; $self->expected_tests($arg); } elsif( !defined $arg ) { $ctx->throw("Got an undefined number of tests"); } else { $ctx->throw("You said to run 0 tests"); } $ctx->release; } sub expected_tests { my $self = shift; my($max) = @_; my $ctx = $self->ctx; if(@_) { $self->croak("Number of tests must be a positive integer. You gave it '$max'") unless $max =~ /^\+?\d+$/; $ctx->plan($max); } my $hub = $ctx->hub; $ctx->release; my $plan = $hub->plan; return 0 unless $plan; return 0 if $plan =~ m/\D/; return $plan; } sub no_plan { my($self, $arg) = @_; my $ctx = $self->ctx; if (defined $ctx->hub->plan) { warn "Plan already set, no_plan() is a no-op, this will change to a hard failure in the future."; $ctx->release; return; } $ctx->alert("no_plan takes no arguments") if $arg; $ctx->hub->plan('NO PLAN'); release $ctx, 1; } sub done_testing { my($self, $num_tests) = @_; my $ctx = $self->ctx; my $meta = $ctx->hub->meta(__PACKAGE__, {}); if ($meta->{Done_Testing}) { my ($file, $line) = @{$meta->{Done_Testing}}[1,2]; local $ctx->hub->{ended}; # OMG This is awful. $self->ok(0, "done_testing() was already called at $file line $line"); $ctx->release; return; } $meta->{Done_Testing} = [$ctx->trace->call]; my $plan = $ctx->hub->plan; my $count = $ctx->hub->count; # If done_testing() specified the number of tests, shut off no_plan if( defined $num_tests ) { $ctx->plan($num_tests) if !$plan || $plan eq 'NO PLAN'; } elsif ($count && defined $num_tests && $count != $num_tests) { $self->ok(0, "planned to run @{[ $self->expected_tests ]} but done_testing() expects $num_tests"); } else { $num_tests = $self->current_test; } if( $self->expected_tests && $num_tests != $self->expected_tests ) { $self->ok(0, "planned to run @{[ $self->expected_tests ]} ". "but done_testing() expects $num_tests"); } $ctx->plan($num_tests) if $ctx->hub->plan && $ctx->hub->plan eq 'NO PLAN'; $ctx->hub->finalize($ctx->trace, 1); release $ctx, 1; } sub has_plan { my $self = shift; my $ctx = $self->ctx; my $plan = $ctx->hub->plan; $ctx->release; return( $plan ) if $plan && $plan !~ m/\D/; return('no_plan') if $plan && $plan eq 'NO PLAN'; return(undef); } sub skip_all { my( $self, $reason ) = @_; my $ctx = $self->ctx; $ctx->hub->meta(__PACKAGE__, {})->{Skip_All} = $reason || 1; # Work around old perl bug if ($] < 5.020000) { my $begin = 0; my $level = 0; while (my @call = caller($level++)) { last unless @call && $call[0]; next unless $call[3] =~ m/::BEGIN$/; $begin++; last; } # HACK! die 'Label not found for "last T2_SUBTEST_WRAPPER"' if $begin && $ctx->hub->meta(__PACKAGE__, {})->{parent}; } $ctx->plan(0, SKIP => $reason); } sub exported_to { my( $self, $pack ) = @_; if( defined $pack ) { $self->{Exported_To} = $pack; } return $self->{Exported_To}; } sub ok { my( $self, $test, $name ) = @_; my $ctx = $self->ctx; # $test might contain an object which we don't want to accidentally # store, so we turn it into a boolean. $test = $test ? 1 : 0; # In case $name is a string overloaded object, force it to stringify. no warnings qw/uninitialized numeric/; $name = "$name" if defined $name; # Profiling showed that the regex here was a huge time waster, doing the # numeric addition first cuts our profile time from ~300ms to ~50ms $self->diag(<<" ERR") if 0 + $name && $name =~ /^[\d\s]+$/; You named your test '$name'. You shouldn't use numbers for your test names. Very confusing. ERR use warnings qw/uninitialized numeric/; my $trace = $ctx->{trace}; my $hub = $ctx->{hub}; my $result = { ok => $test, actual_ok => $test, reason => '', type => '', (name => defined($name) ? $name : ''), }; $hub->{_meta}->{+__PACKAGE__}->{Test_Results}[ $hub->{count} ] = $result unless $self->{no_log_results}; my $orig_name = $name; my @attrs; my $subevents = delete $self->{subevents}; my $subtest_id = delete $self->{subtest_id}; my $subtest_uuid = delete $self->{subtest_uuid}; my $subtest_buffered = delete $self->{subtest_buffered}; my $epkg = 'Test2::Event::Ok'; if ($subevents) { $epkg = 'Test2::Event::Subtest'; push @attrs => (subevents => $subevents, subtest_id => $subtest_id, subtest_uuid => $subtest_uuid, buffered => $subtest_buffered); } my $e = bless { trace => bless( {%$trace}, 'Test2::EventFacet::Trace'), pass => $test, name => $name, _meta => {'Test::Builder' => $result}, effective_pass => $test, @attrs, }, $epkg; $hub->send($e); $self->_ok_debug($trace, $orig_name) unless($test); $ctx->release; return $test; } sub _ok_debug { my $self = shift; my ($trace, $orig_name) = @_; my $is_todo = $self->in_todo; my $msg = $is_todo ? "Failed (TODO)" : "Failed"; my (undef, $file, $line) = $trace->call; if (defined $orig_name) { $self->diag(qq[ $msg test '$orig_name'\n at $file line $line.\n]); } else { $self->diag(qq[ $msg test at $file line $line.\n]); } } sub _diag_fh { my $self = shift; local $Level = $Level + 1; return $self->in_todo ? $self->todo_output : $self->failure_output; } sub _unoverload { my ($self, $type, $thing) = @_; return unless ref $$thing; return unless blessed($$thing) || scalar $self->_try(sub{ $$thing->isa('UNIVERSAL') }); { local ($!, $@); require overload; } my $string_meth = overload::Method( $$thing, $type ) || return; $$thing = $$thing->$string_meth(); } sub _unoverload_str { my $self = shift; $self->_unoverload( q[""], $_ ) for @_; } sub _unoverload_num { my $self = shift; $self->_unoverload( '0+', $_ ) for @_; for my $val (@_) { next unless $self->_is_dualvar($$val); $$val = $$val + 0; } } # This is a hack to detect a dualvar such as $! sub _is_dualvar { my( $self, $val ) = @_; # Objects are not dualvars. return 0 if ref $val; no warnings 'numeric'; my $numval = $val + 0; return ($numval != 0 and $numval ne $val ? 1 : 0); } sub is_eq { my( $self, $got, $expect, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; if( !defined $got || !defined $expect ) { # undef only matches undef and nothing else my $test = !defined $got && !defined $expect; $self->ok( $test, $name ); $self->_is_diag( $got, 'eq', $expect ) unless $test; $ctx->release; return $test; } release $ctx, $self->cmp_ok( $got, 'eq', $expect, $name ); } sub is_num { my( $self, $got, $expect, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; if( !defined $got || !defined $expect ) { # undef only matches undef and nothing else my $test = !defined $got && !defined $expect; $self->ok( $test, $name ); $self->_is_diag( $got, '==', $expect ) unless $test; $ctx->release; return $test; } release $ctx, $self->cmp_ok( $got, '==', $expect, $name ); } sub _diag_fmt { my( $self, $type, $val ) = @_; if( defined $$val ) { if( $type eq 'eq' or $type eq 'ne' ) { # quote and force string context $$val = "'$$val'"; } else { # force numeric context $self->_unoverload_num($val); } } else { $$val = 'undef'; } return; } sub _is_diag { my( $self, $got, $type, $expect ) = @_; $self->_diag_fmt( $type, $_ ) for \$got, \$expect; local $Level = $Level + 1; return $self->diag(<<"DIAGNOSTIC"); got: $got expected: $expect DIAGNOSTIC } sub _isnt_diag { my( $self, $got, $type ) = @_; $self->_diag_fmt( $type, \$got ); local $Level = $Level + 1; return $self->diag(<<"DIAGNOSTIC"); got: $got expected: anything else DIAGNOSTIC } sub isnt_eq { my( $self, $got, $dont_expect, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; if( !defined $got || !defined $dont_expect ) { # undef only matches undef and nothing else my $test = defined $got || defined $dont_expect; $self->ok( $test, $name ); $self->_isnt_diag( $got, 'ne' ) unless $test; $ctx->release; return $test; } release $ctx, $self->cmp_ok( $got, 'ne', $dont_expect, $name ); } sub isnt_num { my( $self, $got, $dont_expect, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; if( !defined $got || !defined $dont_expect ) { # undef only matches undef and nothing else my $test = defined $got || defined $dont_expect; $self->ok( $test, $name ); $self->_isnt_diag( $got, '!=' ) unless $test; $ctx->release; return $test; } release $ctx, $self->cmp_ok( $got, '!=', $dont_expect, $name ); } sub like { my( $self, $thing, $regex, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; release $ctx, $self->_regex_ok( $thing, $regex, '=~', $name ); } sub unlike { my( $self, $thing, $regex, $name ) = @_; my $ctx = $self->ctx; local $Level = $Level + 1; release $ctx, $self->_regex_ok( $thing, $regex, '!~', $name ); } my %numeric_cmps = map { ( $_, 1 ) } ( "<", "<=", ">", ">=", "==", "!=", "<=>" ); # Bad, these are not comparison operators. Should we include more? my %cmp_ok_bl = map { ( $_, 1 ) } ( "=", "+=", ".=", "x=", "^=", "|=", "||=", "&&=", "..."); sub cmp_ok { my( $self, $got, $type, $expect, $name ) = @_; my $ctx = $self->ctx; if ($cmp_ok_bl{$type}) { $ctx->throw("$type is not a valid comparison operator in cmp_ok()"); } my ($test, $succ); my $error; { ## no critic (BuiltinFunctions::ProhibitStringyEval) local( $@, $!, $SIG{__DIE__} ); # isolate eval my($pack, $file, $line) = $ctx->trace->call(); # This is so that warnings come out at the caller's level $succ = eval qq[ #line $line "(eval in cmp_ok) $file" \$test = (\$got $type \$expect); 1; ]; $error = $@; } local $Level = $Level + 1; my $ok = $self->ok( $test, $name ); # Treat overloaded objects as numbers if we're asked to do a # numeric comparison. my $unoverload = $numeric_cmps{$type} ? '_unoverload_num' : '_unoverload_str'; $self->diag(<<"END") unless $succ; An error occurred while using $type: ------------------------------------ $error ------------------------------------ END unless($ok) { $self->$unoverload( \$got, \$expect ); if( $type =~ /^(eq|==)$/ ) { $self->_is_diag( $got, $type, $expect ); } elsif( $type =~ /^(ne|!=)$/ ) { no warnings; my $eq = ($got eq $expect || $got == $expect) && ( (defined($got) xor defined($expect)) || (length($got) != length($expect)) ); use warnings; if ($eq) { $self->_cmp_diag( $got, $type, $expect ); } else { $self->_isnt_diag( $got, $type ); } } else { $self->_cmp_diag( $got, $type, $expect ); } } return release $ctx, $ok; } sub _cmp_diag { my( $self, $got, $type, $expect ) = @_; $got = defined $got ? "'$got'" : 'undef'; $expect = defined $expect ? "'$expect'" : 'undef'; local $Level = $Level + 1; return $self->diag(<<"DIAGNOSTIC"); $got $type $expect DIAGNOSTIC } sub _caller_context { my $self = shift; my( $pack, $file, $line ) = $self->caller(1); my $code = ''; $code .= "#line $line $file\n" if defined $file and defined $line; return $code; } sub BAIL_OUT { my( $self, $reason ) = @_; my $ctx = $self->ctx; $self->{Bailed_Out} = 1; $ctx->bail($reason); } { no warnings 'once'; *BAILOUT = \&BAIL_OUT; } sub skip { my( $self, $why, $name ) = @_; $why ||= ''; $name = '' unless defined $name; $self->_unoverload_str( \$why ); my $ctx = $self->ctx; $ctx->hub->meta(__PACKAGE__, {})->{Test_Results}[ $ctx->hub->count ] = { 'ok' => 1, actual_ok => 1, name => $name, type => 'skip', reason => $why, } unless $self->{no_log_results}; $name =~ s|#|\\#|g; # # in a name can confuse Test::Harness. $name =~ s{\n}{\n# }sg; $why =~ s{\n}{\n# }sg; my $tctx = $ctx->snapshot; $tctx->skip('', $why); return release $ctx, 1; } sub todo_skip { my( $self, $why ) = @_; $why ||= ''; my $ctx = $self->ctx; $ctx->hub->meta(__PACKAGE__, {})->{Test_Results}[ $ctx->hub->count ] = { 'ok' => 1, actual_ok => 0, name => '', type => 'todo_skip', reason => $why, } unless $self->{no_log_results}; $why =~ s{\n}{\n# }sg; my $tctx = $ctx->snapshot; $tctx->send_event( 'Skip', todo => $why, todo_diag => 1, reason => $why, pass => 0); return release $ctx, 1; } sub maybe_regex { my( $self, $regex ) = @_; my $usable_regex = undef; return $usable_regex unless defined $regex; my( $re, $opts ); # Check for qr/foo/ if( _is_qr($regex) ) { $usable_regex = $regex; } # Check for '/foo/' or 'm,foo,' elsif(( $re, $opts ) = $regex =~ m{^ /(.*)/ (\w*) $ }sx or ( undef, $re, $opts ) = $regex =~ m,^ m([^\w\s]) (.+) \1 (\w*) $,sx ) { $usable_regex = length $opts ? "(?$opts)$re" : $re; } return $usable_regex; } sub _is_qr { my $regex = shift; # is_regexp() checks for regexes in a robust manner, say if they're # blessed. return re::is_regexp($regex) if defined &re::is_regexp; return ref $regex eq 'Regexp'; } sub _regex_ok { my( $self, $thing, $regex, $cmp, $name ) = @_; my $ok = 0; my $usable_regex = $self->maybe_regex($regex); unless( defined $usable_regex ) { local $Level = $Level + 1; $ok = $self->ok( 0, $name ); $self->diag(" '$regex' doesn't look much like a regex to me."); return $ok; } { my $test; my $context = $self->_caller_context; { ## no critic (BuiltinFunctions::ProhibitStringyEval) local( $@, $!, $SIG{__DIE__} ); # isolate eval # No point in issuing an uninit warning, they'll see it in the diagnostics no warnings 'uninitialized'; $test = eval $context . q{$test = $thing =~ /$usable_regex/ ? 1 : 0}; } $test = !$test if $cmp eq '!~'; local $Level = $Level + 1; $ok = $self->ok( $test, $name ); } unless($ok) { $thing = defined $thing ? "'$thing'" : 'undef'; my $match = $cmp eq '=~' ? "doesn't match" : "matches"; local $Level = $Level + 1; $self->diag( sprintf <<'DIAGNOSTIC', $thing, $match, $regex ); %s %13s '%s' DIAGNOSTIC } return $ok; } sub is_fh { my $self = shift; my $maybe_fh = shift; return 0 unless defined $maybe_fh; return 1 if ref $maybe_fh eq 'GLOB'; # its a glob ref return 1 if ref \$maybe_fh eq 'GLOB'; # its a glob return eval { $maybe_fh->isa("IO::Handle") } || eval { tied($maybe_fh)->can('TIEHANDLE') }; } sub level { my( $self, $level ) = @_; if( defined $level ) { $Level = $level; } return $Level; } sub use_numbers { my( $self, $use_nums ) = @_; my $ctx = $self->ctx; my $format = $ctx->hub->format; unless ($format && $format->can('no_numbers') && $format->can('set_no_numbers')) { warn "The current formatter does not support 'use_numbers'" if $format; return release $ctx, 0; } $format->set_no_numbers(!$use_nums) if defined $use_nums; return release $ctx, $format->no_numbers ? 0 : 1; } BEGIN { for my $method (qw(no_header no_diag)) { my $set = "set_$method"; my $code = sub { my( $self, $no ) = @_; my $ctx = $self->ctx; my $format = $ctx->hub->format; unless ($format && $format->can($set)) { warn "The current formatter does not support '$method'" if $format; $ctx->release; return } $format->$set($no) if defined $no; return release $ctx, $format->$method ? 1 : 0; }; no strict 'refs'; ## no critic *$method = $code; } } sub no_ending { my( $self, $no ) = @_; my $ctx = $self->ctx; $ctx->hub->set_no_ending($no) if defined $no; return release $ctx, $ctx->hub->no_ending; } sub diag { my $self = shift; return unless @_; my $text = join '' => map {defined($_) ? $_ : 'undef'} @_; if (Test2::API::test2_in_preload()) { chomp($text); $text =~ s/^/# /msg; print STDERR $text, "\n"; return 0; } my $ctx = $self->ctx; $ctx->diag($text); $ctx->release; return 0; } sub note { my $self = shift; return unless @_; my $text = join '' => map {defined($_) ? $_ : 'undef'} @_; if (Test2::API::test2_in_preload()) { chomp($text); $text =~ s/^/# /msg; print STDOUT $text, "\n"; return 0; } my $ctx = $self->ctx; $ctx->note($text); $ctx->release; return 0; } sub explain { my $self = shift; local ($@, $!); require Data::Dumper; return map { ref $_ ? do { my $dumper = Data::Dumper->new( [$_] ); $dumper->Indent(1)->Terse(1); $dumper->Sortkeys(1) if $dumper->can("Sortkeys"); $dumper->Dump; } : $_ } @_; } sub output { my( $self, $fh ) = @_; my $ctx = $self->ctx; my $format = $ctx->hub->format; $ctx->release; return unless $format && $format->isa('Test2::Formatter::TAP'); $format->handles->[Test2::Formatter::TAP::OUT_STD()] = $self->_new_fh($fh) if defined $fh; return $format->handles->[Test2::Formatter::TAP::OUT_STD()]; } sub failure_output { my( $self, $fh ) = @_; my $ctx = $self->ctx; my $format = $ctx->hub->format; $ctx->release; return unless $format && $format->isa('Test2::Formatter::TAP'); $format->handles->[Test2::Formatter::TAP::OUT_ERR()] = $self->_new_fh($fh) if defined $fh; return $format->handles->[Test2::Formatter::TAP::OUT_ERR()]; } sub todo_output { my( $self, $fh ) = @_; my $ctx = $self->ctx; my $format = $ctx->hub->format; $ctx->release; return unless $format && $format->isa('Test::Builder::Formatter'); $format->handles->[Test::Builder::Formatter::OUT_TODO()] = $self->_new_fh($fh) if defined $fh; return $format->handles->[Test::Builder::Formatter::OUT_TODO()]; } sub _new_fh { my $self = shift; my($file_or_fh) = shift; my $fh; if( $self->is_fh($file_or_fh) ) { $fh = $file_or_fh; } elsif( ref $file_or_fh eq 'SCALAR' ) { # Scalar refs as filehandles was added in 5.8. if( $] >= 5.008 ) { open $fh, ">>", $file_or_fh or $self->croak("Can't open scalar ref $file_or_fh: $!"); } # Emulate scalar ref filehandles with a tie. else { $fh = Test::Builder::IO::Scalar->new($file_or_fh) or $self->croak("Can't tie scalar ref $file_or_fh"); } } else { open $fh, ">", $file_or_fh or $self->croak("Can't open test output log $file_or_fh: $!"); _autoflush($fh); } return $fh; } sub _autoflush { my($fh) = shift; my $old_fh = select $fh; $| = 1; select $old_fh; return; } sub reset_outputs { my $self = shift; my $ctx = $self->ctx; my $format = $ctx->hub->format; $ctx->release; return unless $format && $format->isa('Test2::Formatter::TAP'); $format->set_handles([@{$self->{Orig_Handles}}]) if $self->{Orig_Handles}; return; } sub carp { my $self = shift; my $ctx = $self->ctx; $ctx->alert(join "", @_); $ctx->release; } sub croak { my $self = shift; my $ctx = $self->ctx; $ctx->throw(join "", @_); $ctx->release; } sub current_test { my( $self, $num ) = @_; my $ctx = $self->ctx; my $hub = $ctx->hub; if( defined $num ) { $hub->set_count($num); unless ($self->{no_log_results}) { # If the test counter is being pushed forward fill in the details. my $test_results = $ctx->hub->meta(__PACKAGE__, {})->{Test_Results}; if ($num > @$test_results) { my $start = @$test_results ? @$test_results : 0; for ($start .. $num - 1) { $test_results->[$_] = { 'ok' => 1, actual_ok => undef, reason => 'incrementing test number', type => 'unknown', name => undef }; } } # If backward, wipe history. Its their funeral. elsif ($num < @$test_results) { $#{$test_results} = $num - 1; } } } return release $ctx, $hub->count; } sub is_passing { my $self = shift; my $ctx = $self->ctx; my $hub = $ctx->hub; if( @_ ) { my ($bool) = @_; $hub->set_failed(0) if $bool; $hub->is_passing($bool); } return release $ctx, $hub->is_passing; } sub summary { my($self) = shift; return if $self->{no_log_results}; my $ctx = $self->ctx; my $data = $ctx->hub->meta(__PACKAGE__, {})->{Test_Results}; $ctx->release; return map { $_ ? $_->{'ok'} : () } @$data; } sub details { my $self = shift; return if $self->{no_log_results}; my $ctx = $self->ctx; my $data = $ctx->hub->meta(__PACKAGE__, {})->{Test_Results}; $ctx->release; return @$data; } sub find_TODO { my( $self, $pack, $set, $new_value ) = @_; my $ctx = $self->ctx; $pack ||= $ctx->trace->package || $self->exported_to; $ctx->release; return unless $pack; no strict 'refs'; ## no critic no warnings 'once'; my $old_value = ${ $pack . '::TODO' }; $set and ${ $pack . '::TODO' } = $new_value; return $old_value; } sub todo { my( $self, $pack ) = @_; local $Level = $Level + 1; my $ctx = $self->ctx; $ctx->release; my $meta = $ctx->hub->meta(__PACKAGE__, {todo => []})->{todo}; return $meta->[-1]->[1] if $meta && @$meta; $pack ||= $ctx->trace->package; return unless $pack; no strict 'refs'; ## no critic no warnings 'once'; return ${ $pack . '::TODO' }; } sub in_todo { my $self = shift; local $Level = $Level + 1; my $ctx = $self->ctx; $ctx->release; my $meta = $ctx->hub->meta(__PACKAGE__, {todo => []})->{todo}; return 1 if $meta && @$meta; my $pack = $ctx->trace->package || return 0; no strict 'refs'; ## no critic no warnings 'once'; my $todo = ${ $pack . '::TODO' }; return 0 unless defined $todo; return 0 if "$todo" eq ''; return 1; } sub todo_start { my $self = shift; my $message = @_ ? shift : ''; my $ctx = $self->ctx; my $hub = $ctx->hub; my $filter = $hub->pre_filter(sub { my ($active_hub, $e) = @_; # Turn a diag into a todo diag return Test::Builder::TodoDiag->new(%$e) if ref($e) eq 'Test2::Event::Diag'; # Set todo on ok's if ($hub == $active_hub && $e->isa('Test2::Event::Ok')) { $e->set_todo($message); $e->set_effective_pass(1); if (my $result = $e->get_meta(__PACKAGE__)) { $result->{reason} ||= $message; $result->{type} ||= 'todo'; $result->{ok} = 1; } } return $e; }, inherit => 1); push @{$ctx->hub->meta(__PACKAGE__, {todo => []})->{todo}} => [$filter, $message]; $ctx->release; return; } sub todo_end { my $self = shift; my $ctx = $self->ctx; my $set = pop @{$ctx->hub->meta(__PACKAGE__, {todo => []})->{todo}}; $ctx->throw('todo_end() called without todo_start()') unless $set; $ctx->hub->pre_unfilter($set->[0]); $ctx->release; return; } sub caller { ## no critic (Subroutines::ProhibitBuiltinHomonyms) my( $self ) = @_; my $ctx = $self->ctx; my $trace = $ctx->trace; $ctx->release; return wantarray ? $trace->call : $trace->package; } sub _try { my( $self, $code, %opts ) = @_; my $error; my $return; { local $!; # eval can mess up $! local $@; # don't set $@ in the test local $SIG{__DIE__}; # don't trip an outside DIE handler. $return = eval { $code->() }; $error = $@; } die $error if $error and $opts{die_on_fail}; return wantarray ? ( $return, $error ) : $return; } sub _ending { my $self = shift; my ($ctx, $real_exit_code, $new) = @_; unless ($ctx) { my $octx = $self->ctx; $ctx = $octx->snapshot; $octx->release; } return if $ctx->hub->no_ending; return if $ctx->hub->meta(__PACKAGE__, {})->{Ending}++; # Don't bother with an ending if this is a forked copy. Only the parent # should do the ending. return unless $self->{Original_Pid} == $$; my $hub = $ctx->hub; return if $hub->bailed_out; my $plan = $hub->plan; my $count = $hub->count; my $failed = $hub->failed; my $passed = $hub->is_passing; return unless $plan || $count || $failed; # Ran tests but never declared a plan or hit done_testing if( !$hub->plan and $hub->count ) { $self->diag("Tests were run but no plan was declared and done_testing() was not seen."); if($real_exit_code) { $self->diag(<<"FAIL"); Looks like your test exited with $real_exit_code just after $count. FAIL $$new ||= $real_exit_code; return; } # But if the tests ran, handle exit code. if($failed > 0) { my $exit_code = $failed <= 254 ? $failed : 254; $$new ||= $exit_code; return; } $$new ||= 254; return; } if ($real_exit_code && !$count) { $self->diag("Looks like your test exited with $real_exit_code before it could output anything."); $$new ||= $real_exit_code; return; } return if $plan && "$plan" eq 'SKIP'; if (!$count) { $self->diag('No tests run!'); $$new ||= 255; return; } if ($real_exit_code) { $self->diag(<<"FAIL"); Looks like your test exited with $real_exit_code just after $count. FAIL $$new ||= $real_exit_code; return; } if ($plan eq 'NO PLAN') { $ctx->plan( $count ); $plan = $hub->plan; } # Figure out if we passed or failed and print helpful messages. my $num_extra = $count - $plan; if ($num_extra != 0) { my $s = $plan == 1 ? '' : 's'; $self->diag(<<"FAIL"); Looks like you planned $plan test$s but ran $count. FAIL } if ($failed) { my $s = $failed == 1 ? '' : 's'; my $qualifier = $num_extra == 0 ? '' : ' run'; $self->diag(<<"FAIL"); Looks like you failed $failed test$s of $count$qualifier. FAIL } if (!$passed && !$failed && $count && !$num_extra) { $ctx->diag(<<"FAIL"); All assertions passed, but errors were encountered. FAIL } my $exit_code = 0; if ($failed) { $exit_code = $failed <= 254 ? $failed : 254; } elsif ($num_extra != 0) { $exit_code = 255; } elsif (!$passed) { $exit_code = 255; } $$new ||= $exit_code; return; } # Some things used this even though it was private... I am looking at you # Test::Builder::Prefix... sub _print_comment { my( $self, $fh, @msgs ) = @_; return if $self->no_diag; return unless @msgs; # Prevent printing headers when compiling (i.e. -c) return if $^C; # Smash args together like print does. # Convert undef to 'undef' so its readable. my $msg = join '', map { defined($_) ? $_ : 'undef' } @msgs; # Escape the beginning, _print will take care of the rest. $msg =~ s/^/# /; local( $\, $", $, ) = ( undef, ' ', '' ); print $fh $msg; return 0; } # This is used by Test::SharedFork to turn on IPC after the fact. Not # documenting because I do not want it used. The method name is borrowed from # Test::Builder 2 # Once Test2 stuff goes stable this method will be removed and Test::SharedFork # will be made smarter. sub coordinate_forks { my $self = shift; { local ($@, $!); require Test2::IPC; } Test2::IPC->import; Test2::API::test2_ipc_enable_polling(); Test2::API::test2_load(); my $ipc = Test2::IPC::apply_ipc($self->{Stack}); $ipc->set_no_fatal(1); Test2::API::test2_no_wait(1); } sub no_log_results { $_[0]->{no_log_results} = 1 } 1; __END__ =head1 NAME Test::Builder - Backend for building test libraries =head1 SYNOPSIS package My::Test::Module; use base 'Test::Builder::Module'; my $CLASS = __PACKAGE__; sub ok { my($test, $name) = @_; my $tb = $CLASS->builder; $tb->ok($test, $name); } =head1 DESCRIPTION L<Test::Simple> and L<Test::More> have proven to be popular testing modules, but they're not always flexible enough. Test::Builder provides a building block upon which to write your own test libraries I<which can work together>. =head2 Construction =over 4 =item B<new> my $Test = Test::Builder->new; Returns a Test::Builder object representing the current state of the test. Since you only run one test per program C<new> always returns the same Test::Builder object. No matter how many times you call C<new()>, you're getting the same object. This is called a singleton. This is done so that multiple modules share such global information as the test counter and where test output is going. If you want a completely new Test::Builder object different from the singleton, use C<create>. =item B<create> my $Test = Test::Builder->create; Ok, so there can be more than one Test::Builder object and this is how you get it. You might use this instead of C<new()> if you're testing a Test::Builder based module, but otherwise you probably want C<new>. B<NOTE>: the implementation is not complete. C<level>, for example, is still shared by B<all> Test::Builder objects, even ones created using this method. Also, the method name may change in the future. =item B<subtest> $builder->subtest($name, \&subtests, @args); See documentation of C<subtest> in Test::More. C<subtest> also, and optionally, accepts arguments which will be passed to the subtests reference. =item B<name> diag $builder->name; Returns the name of the current builder. Top level builders default to C<$0> (the name of the executable). Child builders are named via the C<child> method. If no name is supplied, will be named "Child of $parent->name". =item B<reset> $Test->reset; Reinitializes the Test::Builder singleton to its original state. Mostly useful for tests run in persistent environments where the same test might be run multiple times in the same process. =back =head2 Setting up tests These methods are for setting up tests and declaring how many there are. You usually only want to call one of these methods. =over 4 =item B<plan> $Test->plan('no_plan'); $Test->plan( skip_all => $reason ); $Test->plan( tests => $num_tests ); A convenient way to set up your tests. Call this and Test::Builder will print the appropriate headers and take the appropriate actions. If you call C<plan()>, don't call any of the other methods below. =item B<expected_tests> my $max = $Test->expected_tests; $Test->expected_tests($max); Gets/sets the number of tests we expect this test to run and prints out the appropriate headers. =item B<no_plan> $Test->no_plan; Declares that this test will run an indeterminate number of tests. =item B<done_testing> $Test->done_testing(); $Test->done_testing($num_tests); Declares that you are done testing, no more tests will be run after this point. If a plan has not yet been output, it will do so. $num_tests is the number of tests you planned to run. If a numbered plan was already declared, and if this contradicts, a failing test will be run to reflect the planning mistake. If C<no_plan> was declared, this will override. If C<done_testing()> is called twice, the second call will issue a failing test. If C<$num_tests> is omitted, the number of tests run will be used, like no_plan. C<done_testing()> is, in effect, used when you'd want to use C<no_plan>, but safer. You'd use it like so: $Test->ok($a == $b); $Test->done_testing(); Or to plan a variable number of tests: for my $test (@tests) { $Test->ok($test); } $Test->done_testing(scalar @tests); =item B<has_plan> $plan = $Test->has_plan Find out whether a plan has been defined. C<$plan> is either C<undef> (no plan has been set), C<no_plan> (indeterminate # of tests) or an integer (the number of expected tests). =item B<skip_all> $Test->skip_all; $Test->skip_all($reason); Skips all the tests, using the given C<$reason>. Exits immediately with 0. =item B<exported_to> my $pack = $Test->exported_to; $Test->exported_to($pack); Tells Test::Builder what package you exported your functions to. This method isn't terribly useful since modules which share the same Test::Builder object might get exported to different packages and only the last one will be honored. =back =head2 Running tests These actually run the tests, analogous to the functions in Test::More. They all return true if the test passed, false if the test failed. C<$name> is always optional. =over 4 =item B<ok> $Test->ok($test, $name); Your basic test. Pass if C<$test> is true, fail if $test is false. Just like Test::Simple's C<ok()>. =item B<is_eq> $Test->is_eq($got, $expected, $name); Like Test::More's C<is()>. Checks if C<$got eq $expected>. This is the string version. C<undef> only ever matches another C<undef>. =item B<is_num> $Test->is_num($got, $expected, $name); Like Test::More's C<is()>. Checks if C<$got == $expected>. This is the numeric version. C<undef> only ever matches another C<undef>. =item B<isnt_eq> $Test->isnt_eq($got, $dont_expect, $name); Like L<Test::More>'s C<isnt()>. Checks if C<$got ne $dont_expect>. This is the string version. =item B<isnt_num> $Test->isnt_num($got, $dont_expect, $name); Like L<Test::More>'s C<isnt()>. Checks if C<$got ne $dont_expect>. This is the numeric version. =item B<like> $Test->like($thing, qr/$regex/, $name); $Test->like($thing, '/$regex/', $name); Like L<Test::More>'s C<like()>. Checks if $thing matches the given C<$regex>. =item B<unlike> $Test->unlike($thing, qr/$regex/, $name); $Test->unlike($thing, '/$regex/', $name); Like L<Test::More>'s C<unlike()>. Checks if $thing B<does not match> the given C<$regex>. =item B<cmp_ok> $Test->cmp_ok($thing, $type, $that, $name); Works just like L<Test::More>'s C<cmp_ok()>. $Test->cmp_ok($big_num, '!=', $other_big_num); =back =head2 Other Testing Methods These are methods which are used in the course of writing a test but are not themselves tests. =over 4 =item B<BAIL_OUT> $Test->BAIL_OUT($reason); Indicates to the L<Test::Harness> that things are going so badly all testing should terminate. This includes running any additional test scripts. It will exit with 255. =for deprecated BAIL_OUT() used to be BAILOUT() =item B<skip> $Test->skip; $Test->skip($why); Skips the current test, reporting C<$why>. =item B<todo_skip> $Test->todo_skip; $Test->todo_skip($why); Like C<skip()>, only it will declare the test as failing and TODO. Similar to print "not ok $tnum # TODO $why\n"; =begin _unimplemented =item B<skip_rest> $Test->skip_rest; $Test->skip_rest($reason); Like C<skip()>, only it skips all the rest of the tests you plan to run and terminates the test. If you're running under C<no_plan>, it skips once and terminates the test. =end _unimplemented =back =head2 Test building utility methods These methods are useful when writing your own test methods. =over 4 =item B<maybe_regex> $Test->maybe_regex(qr/$regex/); $Test->maybe_regex('/$regex/'); This method used to be useful back when Test::Builder worked on Perls before 5.6 which didn't have qr//. Now its pretty useless. Convenience method for building testing functions that take regular expressions as arguments. Takes a quoted regular expression produced by C<qr//>, or a string representing a regular expression. Returns a Perl value which may be used instead of the corresponding regular expression, or C<undef> if its argument is not recognized. For example, a version of C<like()>, sans the useful diagnostic messages, could be written as: sub laconic_like { my ($self, $thing, $regex, $name) = @_; my $usable_regex = $self->maybe_regex($regex); die "expecting regex, found '$regex'\n" unless $usable_regex; $self->ok($thing =~ m/$usable_regex/, $name); } =item B<is_fh> my $is_fh = $Test->is_fh($thing); Determines if the given C<$thing> can be used as a filehandle. =cut =back =head2 Test style =over 4 =item B<level> $Test->level($how_high); How far up the call stack should C<$Test> look when reporting where the test failed. Defaults to 1. Setting C<$Test::Builder::Level> overrides. This is typically useful localized: sub my_ok { my $test = shift; local $Test::Builder::Level = $Test::Builder::Level + 1; $TB->ok($test); } To be polite to other functions wrapping your own you usually want to increment C<$Level> rather than set it to a constant. =item B<use_numbers> $Test->use_numbers($on_or_off); Whether or not the test should output numbers. That is, this if true: ok 1 ok 2 ok 3 or this if false ok ok ok Most useful when you can't depend on the test output order, such as when threads or forking is involved. Defaults to on. =item B<no_diag> $Test->no_diag($no_diag); If set true no diagnostics will be printed. This includes calls to C<diag()>. =item B<no_ending> $Test->no_ending($no_ending); Normally, Test::Builder does some extra diagnostics when the test ends. It also changes the exit code as described below. If this is true, none of that will be done. =item B<no_header> $Test->no_header($no_header); If set to true, no "1..N" header will be printed. =back =head2 Output Controlling where the test output goes. It's ok for your test to change where STDOUT and STDERR point to, Test::Builder's default output settings will not be affected. =over 4 =item B<diag> $Test->diag(@msgs); Prints out the given C<@msgs>. Like C<print>, arguments are simply appended together. Normally, it uses the C<failure_output()> handle, but if this is for a TODO test, the C<todo_output()> handle is used. Output will be indented and marked with a # so as not to interfere with test output. A newline will be put on the end if there isn't one already. We encourage using this rather than calling print directly. Returns false. Why? Because C<diag()> is often used in conjunction with a failing test (C<ok() || diag()>) it "passes through" the failure. return ok(...) || diag(...); =for blame transfer Mark Fowler <mark@twoshortplanks.com> =item B<note> $Test->note(@msgs); Like C<diag()>, but it prints to the C<output()> handle so it will not normally be seen by the user except in verbose mode. =item B<explain> my @dump = $Test->explain(@msgs); Will dump the contents of any references in a human readable format. Handy for things like... is_deeply($have, $want) || diag explain $have; or is_deeply($have, $want) || note explain $have; =item B<output> =item B<failure_output> =item B<todo_output> my $filehandle = $Test->output; $Test->output($filehandle); $Test->output($filename); $Test->output(\$scalar); These methods control where Test::Builder will print its output. They take either an open C<$filehandle>, a C<$filename> to open and write to or a C<$scalar> reference to append to. It will always return a C<$filehandle>. B<output> is where normal "ok/not ok" test output goes. Defaults to STDOUT. B<failure_output> is where diagnostic output on test failures and C<diag()> goes. It is normally not read by Test::Harness and instead is displayed to the user. Defaults to STDERR. C<todo_output> is used instead of C<failure_output()> for the diagnostics of a failing TODO test. These will not be seen by the user. Defaults to STDOUT. =item reset_outputs $tb->reset_outputs; Resets all the output filehandles back to their defaults. =item carp $tb->carp(@message); Warns with C<@message> but the message will appear to come from the point where the original test function was called (C<< $tb->caller >>). =item croak $tb->croak(@message); Dies with C<@message> but the message will appear to come from the point where the original test function was called (C<< $tb->caller >>). =back =head2 Test Status and Info =over 4 =item B<no_log_results> This will turn off result long-term storage. Calling this method will make C<details> and C<summary> useless. You may want to use this if you are running enough tests to fill up all available memory. Test::Builder->new->no_log_results(); There is no way to turn it back on. =item B<current_test> my $curr_test = $Test->current_test; $Test->current_test($num); Gets/sets the current test number we're on. You usually shouldn't have to set this. If set forward, the details of the missing tests are filled in as 'unknown'. if set backward, the details of the intervening tests are deleted. You can erase history if you really want to. =item B<is_passing> my $ok = $builder->is_passing; Indicates if the test suite is currently passing. More formally, it will be false if anything has happened which makes it impossible for the test suite to pass. True otherwise. For example, if no tests have run C<is_passing()> will be true because even though a suite with no tests is a failure you can add a passing test to it and start passing. Don't think about it too much. =item B<summary> my @tests = $Test->summary; A simple summary of the tests so far. True for pass, false for fail. This is a logical pass/fail, so todos are passes. Of course, test #1 is $tests[0], etc... =item B<details> my @tests = $Test->details; Like C<summary()>, but with a lot more detail. $tests[$test_num - 1] = { 'ok' => is the test considered a pass? actual_ok => did it literally say 'ok'? name => name of the test (if any) type => type of test (if any, see below). reason => reason for the above (if any) }; 'ok' is true if Test::Harness will consider the test to be a pass. 'actual_ok' is a reflection of whether or not the test literally printed 'ok' or 'not ok'. This is for examining the result of 'todo' tests. 'name' is the name of the test. 'type' indicates if it was a special test. Normal tests have a type of ''. Type can be one of the following: skip see skip() todo see todo() todo_skip see todo_skip() unknown see below Sometimes the Test::Builder test counter is incremented without it printing any test output, for example, when C<current_test()> is changed. In these cases, Test::Builder doesn't know the result of the test, so its type is 'unknown'. These details for these tests are filled in. They are considered ok, but the name and actual_ok is left C<undef>. For example "not ok 23 - hole count # TODO insufficient donuts" would result in this structure: $tests[22] = # 23 - 1, since arrays start from 0. { ok => 1, # logically, the test passed since its todo actual_ok => 0, # in absolute terms, it failed name => 'hole count', type => 'todo', reason => 'insufficient donuts' }; =item B<todo> my $todo_reason = $Test->todo; my $todo_reason = $Test->todo($pack); If the current tests are considered "TODO" it will return the reason, if any. This reason can come from a C<$TODO> variable or the last call to C<todo_start()>. Since a TODO test does not need a reason, this function can return an empty string even when inside a TODO block. Use C<< $Test->in_todo >> to determine if you are currently inside a TODO block. C<todo()> is about finding the right package to look for C<$TODO> in. It's pretty good at guessing the right package to look at. It first looks for the caller based on C<$Level + 1>, since C<todo()> is usually called inside a test function. As a last resort it will use C<exported_to()>. Sometimes there is some confusion about where C<todo()> should be looking for the C<$TODO> variable. If you want to be sure, tell it explicitly what $pack to use. =item B<find_TODO> my $todo_reason = $Test->find_TODO(); my $todo_reason = $Test->find_TODO($pack); Like C<todo()> but only returns the value of C<$TODO> ignoring C<todo_start()>. Can also be used to set C<$TODO> to a new value while returning the old value: my $old_reason = $Test->find_TODO($pack, 1, $new_reason); =item B<in_todo> my $in_todo = $Test->in_todo; Returns true if the test is currently inside a TODO block. =item B<todo_start> $Test->todo_start(); $Test->todo_start($message); This method allows you declare all subsequent tests as TODO tests, up until the C<todo_end> method has been called. The C<TODO:> and C<$TODO> syntax is generally pretty good about figuring out whether or not we're in a TODO test. However, often we find that this is not possible to determine (such as when we want to use C<$TODO> but the tests are being executed in other packages which can't be inferred beforehand). Note that you can use this to nest "todo" tests $Test->todo_start('working on this'); # lots of code $Test->todo_start('working on that'); # more code $Test->todo_end; $Test->todo_end; This is generally not recommended, but large testing systems often have weird internal needs. We've tried to make this also work with the TODO: syntax, but it's not guaranteed and its use is also discouraged: TODO: { local $TODO = 'We have work to do!'; $Test->todo_start('working on this'); # lots of code $Test->todo_start('working on that'); # more code $Test->todo_end; $Test->todo_end; } Pick one style or another of "TODO" to be on the safe side. =item C<todo_end> $Test->todo_end; Stops running tests as "TODO" tests. This method is fatal if called without a preceding C<todo_start> method call. =item B<caller> my $package = $Test->caller; my($pack, $file, $line) = $Test->caller; my($pack, $file, $line) = $Test->caller($height); Like the normal C<caller()>, except it reports according to your C<level()>. C<$height> will be added to the C<level()>. If C<caller()> winds up off the top of the stack it report the highest context. =back =head1 EXIT CODES If all your tests passed, Test::Builder will exit with zero (which is normal). If anything failed it will exit with how many failed. If you run less (or more) tests than you planned, the missing (or extras) will be considered failures. If no tests were ever run Test::Builder will throw a warning and exit with 255. If the test died, even after having successfully completed all its tests, it will still be considered a failure and will exit with 255. So the exit codes are... 0 all tests successful 255 test died or all passed but wrong # of tests run any other number how many failed (including missing or extras) If you fail more than 254 tests, it will be reported as 254. =head1 THREADS In perl 5.8.1 and later, Test::Builder is thread-safe. The test number is shared by all threads. This means if one thread sets the test number using C<current_test()> they will all be effected. While versions earlier than 5.8.1 had threads they contain too many bugs to support. Test::Builder is only thread-aware if threads.pm is loaded I<before> Test::Builder. You can directly disable thread support with one of the following: $ENV{T2_NO_IPC} = 1 or no Test2::IPC; or Test2::API::test2_ipc_disable() =head1 MEMORY An informative hash, accessible via C<details()>, is stored for each test you perform. So memory usage will scale linearly with each test run. Although this is not a problem for most test suites, it can become an issue if you do large (hundred thousands to million) combinatorics tests in the same run. In such cases, you are advised to either split the test file into smaller ones, or use a reverse approach, doing "normal" (code) compares and triggering C<fail()> should anything go unexpected. Future versions of Test::Builder will have a way to turn history off. =head1 EXAMPLES CPAN can provide the best examples. L<Test::Simple>, L<Test::More>, L<Test::Exception> and L<Test::Differences> all use Test::Builder. =head1 SEE ALSO =head2 INTERNALS L<Test2>, L<Test2::API> =head2 LEGACY L<Test::Simple>, L<Test::More> =head2 EXTERNAL L<Test::Harness> =head1 AUTHORS Original code by chromatic, maintained by Michael G Schwern E<lt>schwern@pobox.comE<gt> =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 COPYRIGHT Copyright 2002-2008 by chromatic E<lt>chromatic@wgz.orgE<gt> and Michael G Schwern E<lt>schwern@pobox.comE<gt>. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See F<http://www.perl.com/perl/misc/Artistic.html> Simple.pm 0000644 00000014534 15137540314 0006345 0 ustar 00 package Test::Simple; use 5.006; use strict; our $VERSION = '1.302175'; use Test::Builder::Module; our @ISA = qw(Test::Builder::Module); our @EXPORT = qw(ok); my $CLASS = __PACKAGE__; =head1 NAME Test::Simple - Basic utilities for writing tests. =head1 SYNOPSIS use Test::Simple tests => 1; ok( $foo eq $bar, 'foo is bar' ); =head1 DESCRIPTION ** If you are unfamiliar with testing B<read L<Test::Tutorial> first!> ** This is an extremely simple, extremely basic module for writing tests suitable for CPAN modules and other pursuits. If you wish to do more complicated testing, use the Test::More module (a drop-in replacement for this one). The basic unit of Perl testing is the ok. For each thing you want to test your program will print out an "ok" or "not ok" to indicate pass or fail. You do this with the C<ok()> function (see below). The only other constraint is you must pre-declare how many tests you plan to run. This is in case something goes horribly wrong during the test and your test program aborts, or skips a test or whatever. You do this like so: use Test::Simple tests => 23; You must have a plan. =over 4 =item B<ok> ok( $foo eq $bar, $name ); ok( $foo eq $bar ); C<ok()> is given an expression (in this case C<$foo eq $bar>). If it's true, the test passed. If it's false, it didn't. That's about it. C<ok()> prints out either "ok" or "not ok" along with a test number (it keeps track of that for you). # This produces "ok 1 - Hell not yet frozen over" (or not ok) ok( get_temperature($hell) > 0, 'Hell not yet frozen over' ); If you provide a $name, that will be printed along with the "ok/not ok" to make it easier to find your test when if fails (just search for the name). It also makes it easier for the next guy to understand what your test is for. It's highly recommended you use test names. All tests are run in scalar context. So this: ok( @stuff, 'I have some stuff' ); will do what you mean (fail if stuff is empty) =cut sub ok ($;$) { ## no critic (Subroutines::ProhibitSubroutinePrototypes) return $CLASS->builder->ok(@_); } =back Test::Simple will start by printing number of tests run in the form "1..M" (so "1..5" means you're going to run 5 tests). This strange format lets L<Test::Harness> know how many tests you plan on running in case something goes horribly wrong. If all your tests passed, Test::Simple will exit with zero (which is normal). If anything failed it will exit with how many failed. If you run less (or more) tests than you planned, the missing (or extras) will be considered failures. If no tests were ever run Test::Simple will throw a warning and exit with 255. If the test died, even after having successfully completed all its tests, it will still be considered a failure and will exit with 255. So the exit codes are... 0 all tests successful 255 test died or all passed but wrong # of tests run any other number how many failed (including missing or extras) If you fail more than 254 tests, it will be reported as 254. This module is by no means trying to be a complete testing system. It's just to get you started. Once you're off the ground its recommended you look at L<Test::More>. =head1 EXAMPLE Here's an example of a simple .t file for the fictional Film module. use Test::Simple tests => 5; use Film; # What you're testing. my $btaste = Film->new({ Title => 'Bad Taste', Director => 'Peter Jackson', Rating => 'R', NumExplodingSheep => 1 }); ok( defined($btaste) && ref $btaste eq 'Film', 'new() works' ); ok( $btaste->Title eq 'Bad Taste', 'Title() get' ); ok( $btaste->Director eq 'Peter Jackson', 'Director() get' ); ok( $btaste->Rating eq 'R', 'Rating() get' ); ok( $btaste->NumExplodingSheep == 1, 'NumExplodingSheep() get' ); It will produce output like this: 1..5 ok 1 - new() works ok 2 - Title() get ok 3 - Director() get not ok 4 - Rating() get # Failed test 'Rating() get' # in t/film.t at line 14. ok 5 - NumExplodingSheep() get # Looks like you failed 1 tests of 5 Indicating the Film::Rating() method is broken. =head1 CAVEATS Test::Simple will only report a maximum of 254 failures in its exit code. If this is a problem, you probably have a huge test script. Split it into multiple files. (Otherwise blame the Unix folks for using an unsigned short integer as the exit status). Because VMS's exit codes are much, much different than the rest of the universe, and perl does horrible mangling to them that gets in my way, it works like this on VMS. 0 SS$_NORMAL all tests successful 4 SS$_ABORT something went wrong Unfortunately, I can't differentiate any further. =head1 NOTES Test::Simple is B<explicitly> tested all the way back to perl 5.6.0. Test::Simple is thread-safe in perl 5.8.1 and up. =head1 HISTORY This module was conceived while talking with Tony Bowden in his kitchen one night about the problems I was having writing some really complicated feature into the new Testing module. He observed that the main problem is not dealing with these edge cases but that people hate to write tests B<at all>. What was needed was a dead simple module that took all the hard work out of testing and was really, really easy to learn. Paul Johnson simultaneously had this idea (unfortunately, he wasn't in Tony's kitchen). This is it. =head1 SEE ALSO =over 4 =item L<Test::More> More testing functions! Once you outgrow Test::Simple, look at L<Test::More>. Test::Simple is 100% forward compatible with L<Test::More> (i.e. you can just use L<Test::More> instead of Test::Simple in your programs and things will still work). =back Look in L<Test::More>'s SEE ALSO for more testing modules. =head1 AUTHORS Idea by Tony Bowden and Paul Johnson, code by Michael G Schwern E<lt>schwern@pobox.comE<gt>, wardrobe by Calvin Klein. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 COPYRIGHT Copyright 2001-2008 by Michael G Schwern E<lt>schwern@pobox.comE<gt>. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See F<http://www.perl.com/perl/misc/Artistic.html> =cut 1; Tester.pm 0000644 00000043635 15137540314 0006366 0 ustar 00 use strict; package Test::Tester; BEGIN { if (*Test::Builder::new{CODE}) { warn "You should load Test::Tester before Test::Builder (or anything that loads Test::Builder)" } } use Test::Builder; use Test::Tester::CaptureRunner; use Test::Tester::Delegate; require Exporter; use vars qw( @ISA @EXPORT ); our $VERSION = '1.302175'; @EXPORT = qw( run_tests check_tests check_test cmp_results show_space ); @ISA = qw( Exporter ); my $Test = Test::Builder->new; my $Capture = Test::Tester::Capture->new; my $Delegator = Test::Tester::Delegate->new; $Delegator->{Object} = $Test; my $runner = Test::Tester::CaptureRunner->new; my $want_space = $ENV{TESTTESTERSPACE}; sub show_space { $want_space = 1; } my $colour = ''; my $reset = ''; if (my $want_colour = $ENV{TESTTESTERCOLOUR} || $ENV{TESTTESTERCOLOR}) { if (eval { require Term::ANSIColor; 1 }) { eval { require Win32::Console::ANSI } if 'MSWin32' eq $^O; # support color on windows platforms my ($f, $b) = split(",", $want_colour); $colour = Term::ANSIColor::color($f).Term::ANSIColor::color("on_$b"); $reset = Term::ANSIColor::color("reset"); } } sub new_new { return $Delegator; } sub capture { return Test::Tester::Capture->new; } sub fh { # experiment with capturing output, I don't like it $runner = Test::Tester::FHRunner->new; return $Test; } sub find_run_tests { my $d = 1; my $found = 0; while ((not $found) and (my ($sub) = (caller($d))[3]) ) { # print "$d: $sub\n"; $found = ($sub eq "Test::Tester::run_tests"); $d++; } # die "Didn't find 'run_tests' in caller stack" unless $found; return $d; } sub run_tests { local($Delegator->{Object}) = $Capture; $runner->run_tests(@_); return ($runner->get_premature, $runner->get_results); } sub check_test { my $test = shift; my $expect = shift; my $name = shift; $name = "" unless defined($name); @_ = ($test, [$expect], $name); goto &check_tests; } sub check_tests { my $test = shift; my $expects = shift; my $name = shift; $name = "" unless defined($name); my ($prem, @results) = eval { run_tests($test, $name) }; $Test->ok(! $@, "Test '$name' completed") || $Test->diag($@); $Test->ok(! length($prem), "Test '$name' no premature diagnostication") || $Test->diag("Before any testing anything, your tests said\n$prem"); local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_results(\@results, $expects, $name); return ($prem, @results); } sub cmp_field { my ($result, $expect, $field, $desc) = @_; if (defined $expect->{$field}) { $Test->is_eq($result->{$field}, $expect->{$field}, "$desc compare $field"); } } sub cmp_result { my ($result, $expect, $name) = @_; my $sub_name = $result->{name}; $sub_name = "" unless defined($name); my $desc = "subtest '$sub_name' of '$name'"; { local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_field($result, $expect, "ok", $desc); cmp_field($result, $expect, "actual_ok", $desc); cmp_field($result, $expect, "type", $desc); cmp_field($result, $expect, "reason", $desc); cmp_field($result, $expect, "name", $desc); } # if we got no depth then default to 1 my $depth = 1; if (exists $expect->{depth}) { $depth = $expect->{depth}; } # if depth was explicitly undef then don't test it if (defined $depth) { $Test->is_eq($result->{depth}, $depth, "checking depth") || $Test->diag('You need to change $Test::Builder::Level'); } if (defined(my $exp = $expect->{diag})) { my $got = ''; if (ref $exp eq 'Regexp') { if (not $Test->like($result->{diag}, $exp, "subtest '$sub_name' of '$name' compare diag")) { $got = $result->{diag}; } } else { # if there actually is some diag then put a \n on the end if it's not # there already $exp .= "\n" if (length($exp) and $exp !~ /\n$/); if (not $Test->ok($result->{diag} eq $exp, "subtest '$sub_name' of '$name' compare diag")) { $got = $result->{diag}; } } if ($got) { my $glen = length($got); my $elen = length($exp); for ($got, $exp) { my @lines = split("\n", $_); $_ = join("\n", map { if ($want_space) { $_ = $colour.escape($_).$reset; } else { "'$colour$_$reset'" } } @lines); } $Test->diag(<<EOM); Got diag ($glen bytes): $got Expected diag ($elen bytes): $exp EOM } } } sub escape { my $str = shift; my $res = ''; for my $char (split("", $str)) { my $c = ord($char); if(($c>32 and $c<125) or $c == 10) { $res .= $char; } else { $res .= sprintf('\x{%x}', $c) } } return $res; } sub cmp_results { my ($results, $expects, $name) = @_; $Test->is_num(scalar @$results, scalar @$expects, "Test '$name' result count"); for (my $i = 0; $i < @$expects; $i++) { my $expect = $expects->[$i]; my $result = $results->[$i]; local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_result($result, $expect, $name); } } ######## nicked from Test::More sub plan { my(@plan) = @_; my $caller = caller; $Test->exported_to($caller); my @imports = (); foreach my $idx (0..$#plan) { if( $plan[$idx] eq 'import' ) { my($tag, $imports) = splice @plan, $idx, 2; @imports = @$imports; last; } } $Test->plan(@plan); __PACKAGE__->_export_to_level(1, __PACKAGE__, @imports); } sub import { my($class) = shift; { no warnings 'redefine'; *Test::Builder::new = \&new_new; } goto &plan; } sub _export_to_level { my $pkg = shift; my $level = shift; (undef) = shift; # redundant arg my $callpkg = caller($level); $pkg->export($callpkg, @_); } ############ 1; __END__ =head1 NAME Test::Tester - Ease testing test modules built with Test::Builder =head1 SYNOPSIS use Test::Tester tests => 6; use Test::MyStyle; check_test( sub { is_mystyle_eq("this", "that", "not eq"); }, { ok => 0, # expect this to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); or use Test::Tester tests => 6; use Test::MyStyle; check_test( sub { is_mystyle_qr("this", "that", "not matching"); }, { ok => 0, # expect this to fail name => "not matching", diag => qr/Expected: 'this'\s+Got: 'that'/, } ); or use Test::Tester; use Test::More tests => 3; use Test::MyStyle; my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); # now use Test::More::like to check the diagnostic output like($results[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); =head1 DESCRIPTION If you have written a test module based on Test::Builder then Test::Tester allows you to test it with the minimum of effort. =head1 HOW TO USE (THE EASY WAY) From version 0.08 Test::Tester no longer requires you to included anything special in your test modules. All you need to do is use Test::Tester; in your test script B<before> any other Test::Builder based modules and away you go. Other modules based on Test::Builder can be used to help with the testing. In fact you can even use functions from your module to test other functions from the same module (while this is possible it is probably not a good idea, if your module has bugs, then using it to test itself may give the wrong answers). The easiest way to test is to do something like check_test( sub { is_mystyle_eq("this", "that", "not eq") }, { ok => 0, # we expect the test to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); this will execute the is_mystyle_eq test, capturing its results and checking that they are what was expected. You may need to examine the test results in a more flexible way, for example, the diagnostic output may be quite long or complex or it may involve something that you cannot predict in advance like a timestamp. In this case you can get direct access to the test results: my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); like($result[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); or check_test( sub { is_mystyle_qr("this", "that", "not matching") }, { ok => 0, # we expect the test to fail name => "not matching", diag => qr/Expected: 'this'\s+Got: 'that'/, } ); We cannot predict how long the database ping will take so we use Test::More's like() test to check that the diagnostic string is of the right form. =head1 HOW TO USE (THE HARD WAY) I<This is here for backwards compatibility only> Make your module use the Test::Tester::Capture object instead of the Test::Builder one. How to do this depends on your module but assuming that your module holds the Test::Builder object in $Test and that all your test routines access it through $Test then providing a function something like this sub set_builder { $Test = shift; } should allow your test scripts to do Test::YourModule::set_builder(Test::Tester->capture); and after that any tests inside your module will captured. =head1 TEST RESULTS The result of each test is captured in a hash. These hashes are the same as the hashes returned by Test::Builder->details but with a couple of extra fields. These fields are documented in L<Test::Builder> in the details() function =over 2 =item ok Did the test pass? =item actual_ok Did the test really pass? That is, did the pass come from Test::Builder->ok() or did it pass because it was a TODO test? =item name The name supplied for the test. =item type What kind of test? Possibilities include, skip, todo etc. See L<Test::Builder> for more details. =item reason The reason for the skip, todo etc. See L<Test::Builder> for more details. =back These fields are exclusive to Test::Tester. =over 2 =item diag Any diagnostics that were output for the test. This only includes diagnostics output B<after> the test result is declared. Note that Test::Builder ensures that any diagnostics end in a \n and it in earlier versions of Test::Tester it was essential that you have the final \n in your expected diagnostics. From version 0.10 onward, Test::Tester will add the \n if you forgot it. It will not add a \n if you are expecting no diagnostics. See below for help tracking down hard to find space and tab related problems. =item depth This allows you to check that your test module is setting the correct value for $Test::Builder::Level and thus giving the correct file and line number when a test fails. It is calculated by looking at caller() and $Test::Builder::Level. It should count how many subroutines there are before jumping into the function you are testing. So for example in run_tests( sub { my_test_function("a", "b") } ); the depth should be 1 and in sub deeper { my_test_function("a", "b") } run_tests(sub { deeper() }); depth should be 2, that is 1 for the sub {} and one for deeper(). This might seem a little complex but if your tests look like the simple examples in this doc then you don't need to worry as the depth will always be 1 and that's what Test::Tester expects by default. B<Note>: if you do not specify a value for depth in check_test() then it automatically compares it against 1, if you really want to skip the depth test then pass in undef. B<Note>: depth will not be correctly calculated for tests that run from a signal handler or an END block or anywhere else that hides the call stack. =back Some of Test::Tester's functions return arrays of these hashes, just like Test::Builder->details. That is, the hash for the first test will be array element 1 (not 0). Element 0 will not be a hash it will be a string which contains any diagnostic output that came before the first test. This should usually be empty, if it's not, it means something output diagnostics before any test results showed up. =head1 SPACES AND TABS Appearances can be deceptive, especially when it comes to emptiness. If you are scratching your head trying to work out why Test::Tester is saying that your diagnostics are wrong when they look perfectly right then the answer is probably whitespace. From version 0.10 on, Test::Tester surrounds the expected and got diag values with single quotes to make it easier to spot trailing whitespace. So in this example # Got diag (5 bytes): # 'abcd ' # Expected diag (4 bytes): # 'abcd' it is quite clear that there is a space at the end of the first string. Another way to solve this problem is to use colour and inverse video on an ANSI terminal, see below COLOUR below if you want this. Unfortunately this is sometimes not enough, neither colour nor quotes will help you with problems involving tabs, other non-printing characters and certain kinds of problems inherent in Unicode. To deal with this, you can switch Test::Tester into a mode whereby all "tricky" characters are shown as \{xx}. Tricky characters are those with ASCII code less than 33 or higher than 126. This makes the output more difficult to read but much easier to find subtle differences between strings. To turn on this mode either call C<show_space()> in your test script or set the C<TESTTESTERSPACE> environment variable to be a true value. The example above would then look like # Got diag (5 bytes): # abcd\x{20} # Expected diag (4 bytes): # abcd =head1 COLOUR If you prefer to use colour as a means of finding tricky whitespace characters then you can set the C<TESTTESTCOLOUR> environment variable to a comma separated pair of colours, the first for the foreground, the second for the background. For example "white,red" will print white text on a red background. This requires the Term::ANSIColor module. You can specify any colour that would be acceptable to the Term::ANSIColor::color function. If you spell colour differently, that's no problem. The C<TESTTESTERCOLOR> variable also works (if both are set then the British spelling wins out). =head1 EXPORTED FUNCTIONS =head3 ($premature, @results) = run_tests(\&test_sub) \&test_sub is a reference to a subroutine. run_tests runs the subroutine in $test_sub and captures the results of any tests inside it. You can run more than 1 test inside this subroutine if you like. $premature is a string containing any diagnostic output from before the first test. @results is an array of test result hashes. =head3 cmp_result(\%result, \%expect, $name) \%result is a ref to a test result hash. \%expect is a ref to a hash of expected values for the test result. cmp_result compares the result with the expected values. If any differences are found it outputs diagnostics. You may leave out any field from the expected result and cmp_result will not do the comparison of that field. =head3 cmp_results(\@results, \@expects, $name) \@results is a ref to an array of test results. \@expects is a ref to an array of hash refs. cmp_results checks that the results match the expected results and if any differences are found it outputs diagnostics. It first checks that the number of elements in \@results and \@expects is the same. Then it goes through each result checking it against the expected result as in cmp_result() above. =head3 ($premature, @results) = check_tests(\&test_sub, \@expects, $name) \&test_sub is a reference to a subroutine. \@expect is a ref to an array of hash refs which are expected test results. check_tests combines run_tests and cmp_tests into a single call. It also checks if the tests died at any stage. It returns the same values as run_tests, so you can further examine the test results if you need to. =head3 ($premature, @results) = check_test(\&test_sub, \%expect, $name) \&test_sub is a reference to a subroutine. \%expect is a ref to an hash of expected values for the test result. check_test is a wrapper around check_tests. It combines run_tests and cmp_tests into a single call, checking if the test died. It assumes that only a single test is run inside \&test_sub and include a test to make sure this is true. It returns the same values as run_tests, so you can further examine the test results if you need to. =head3 show_space() Turn on the escaping of characters as described in the SPACES AND TABS section. =head1 HOW IT WORKS Normally, a test module (let's call it Test:MyStyle) calls Test::Builder->new to get the Test::Builder object. Test::MyStyle calls methods on this object to record information about test results. When Test::Tester is loaded, it replaces Test::Builder's new() method with one which returns a Test::Tester::Delegate object. Most of the time this object behaves as the real Test::Builder object. Any methods that are called are delegated to the real Test::Builder object so everything works perfectly. However once we go into test mode, the method calls are no longer passed to the real Test::Builder object, instead they go to the Test::Tester::Capture object. This object seems exactly like the real Test::Builder object, except, instead of outputting test results and diagnostics, it just records all the information for later analysis. =head1 CAVEATS Support for calling Test::Builder->note is minimal. It's implemented as an empty stub, so modules that use it will not crash but the calls are not recorded for testing purposes like the others. Patches welcome. =head1 SEE ALSO L<Test::Builder> the source of testing goodness. L<Test::Builder::Tester> for an alternative approach to the problem tackled by Test::Tester - captures the strings output by Test::Builder. This means you cannot get separate access to the individual pieces of information and you must predict B<exactly> what your test will output. =head1 AUTHOR This module is copyright 2005 Fergal Daly <fergal@esatclear.ie>, some parts are based on other people's work. Plan handling lifted from Test::More. written by Michael G Schwern <schwern@pobox.com>. Test::Tester::Capture is a cut down and hacked up version of Test::Builder. Test::Builder was written by chromatic <chromatic@wgz.org> and Michael G Schwern <schwern@pobox.com>. =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut use/ok.pm 0000644 00000002520 15137540314 0006311 0 ustar 00 package Test::use::ok; use 5.005; our $VERSION = '1.302175'; __END__ =head1 NAME Test::use::ok - Alternative to Test::More::use_ok =head1 SYNOPSIS use ok 'Some::Module'; =head1 DESCRIPTION According to the B<Test::More> documentation, it is recommended to run C<use_ok()> inside a C<BEGIN> block, so functions are exported at compile-time and prototypes are properly honored. That is, instead of writing this: use_ok( 'Some::Module' ); use_ok( 'Other::Module' ); One should write this: BEGIN { use_ok( 'Some::Module' ); } BEGIN { use_ok( 'Other::Module' ); } However, people often either forget to add C<BEGIN>, or mistakenly group C<use_ok> with other tests in a single C<BEGIN> block, which can create subtle differences in execution order. With this module, simply change all C<use_ok> in test scripts to C<use ok>, and they will be executed at C<BEGIN> time. The explicit space after C<use> makes it clear that this is a single compile-time action. =head1 SEE ALSO L<Test::More> =head1 MAINTAINER =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =encoding utf8 =head1 CC0 1.0 Universal To the extent possible under law, 唐鳳 has waived all copyright and related or neighboring rights to L<Test-use-ok>. This work is published from Taiwan. L<http://creativecommons.org/publicdomain/zero/1.0> =cut Harness.pm 0000644 00000040411 15137540314 0006510 0 ustar 00 package Test::Harness; use 5.006; use strict; use warnings; use constant IS_WIN32 => ( $^O =~ /^(MS)?Win32$/ ); use constant IS_VMS => ( $^O eq 'VMS' ); use TAP::Harness (); use TAP::Parser::Aggregator (); use TAP::Parser::Source (); use TAP::Parser::SourceHandler::Perl (); use Text::ParseWords qw(shellwords); use Config; use base 'Exporter'; # $ML $Last_ML_Print BEGIN { eval q{use Time::HiRes 'time'}; our $has_time_hires = !$@; } =head1 NAME Test::Harness - Run Perl standard test scripts with statistics =head1 VERSION Version 3.42 =cut our $VERSION = '3.42'; # Backwards compatibility for exportable variable names. *verbose = *Verbose; *switches = *Switches; *debug = *Debug; $ENV{HARNESS_ACTIVE} = 1; $ENV{HARNESS_VERSION} = $VERSION; END { # For VMS. delete $ENV{HARNESS_ACTIVE}; delete $ENV{HARNESS_VERSION}; } our @EXPORT = qw(&runtests); our @EXPORT_OK = qw(&execute_tests $verbose $switches); our $Verbose = $ENV{HARNESS_VERBOSE} || 0; our $Debug = $ENV{HARNESS_DEBUG} || 0; our $Switches = '-w'; our $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80; $Columns--; # Some shells have trouble with a full line of text. our $Timer = $ENV{HARNESS_TIMER} || 0; our $Color = $ENV{HARNESS_COLOR} || 0; our $IgnoreExit = $ENV{HARNESS_IGNORE_EXIT} || 0; =head1 SYNOPSIS use Test::Harness; runtests(@test_files); =head1 DESCRIPTION Although, for historical reasons, the L<Test::Harness> distribution takes its name from this module it now exists only to provide L<TAP::Harness> with an interface that is somewhat backwards compatible with L<Test::Harness> 2.xx. If you're writing new code consider using L<TAP::Harness> directly instead. Emulation is provided for C<runtests> and C<execute_tests> but the pluggable 'Straps' interface that previous versions of L<Test::Harness> supported is not reproduced here. Straps is now available as a stand alone module: L<Test::Harness::Straps>. See L<TAP::Parser>, L<TAP::Harness> for the main documentation for this distribution. =head1 FUNCTIONS The following functions are available. =head2 runtests( @test_files ) This runs all the given I<@test_files> and divines whether they passed or failed based on their output to STDOUT (details above). It prints out each individual test which failed along with a summary report and a how long it all took. It returns true if everything was ok. Otherwise it will C<die()> with one of the messages in the DIAGNOSTICS section. =cut sub _has_taint { my $test = shift; return TAP::Parser::SourceHandler::Perl->get_taint( TAP::Parser::Source->shebang($test) ); } sub _aggregate { my ( $harness, $aggregate, @tests ) = @_; # Don't propagate to our children local $ENV{HARNESS_OPTIONS}; _apply_extra_INC($harness); _aggregate_tests( $harness, $aggregate, @tests ); } # Make sure the child sees all the extra junk in @INC sub _apply_extra_INC { my $harness = shift; $harness->callback( parser_args => sub { my ( $args, $test ) = @_; push @{ $args->{switches} }, map {"-I$_"} _filtered_inc(); } ); } sub _aggregate_tests { my ( $harness, $aggregate, @tests ) = @_; $aggregate->start(); $harness->aggregate_tests( $aggregate, @tests ); $aggregate->stop(); } sub runtests { my @tests = @_; # shield against -l local ( $\, $, ); my $harness = _new_harness(); my $aggregate = TAP::Parser::Aggregator->new(); local $ENV{PERL_USE_UNSAFE_INC} = 1 if not exists $ENV{PERL_USE_UNSAFE_INC}; _aggregate( $harness, $aggregate, @tests ); $harness->formatter->summary($aggregate); my $total = $aggregate->total; my $passed = $aggregate->passed; my $failed = $aggregate->failed; my @parsers = $aggregate->parsers; my $num_bad = 0; for my $parser (@parsers) { $num_bad++ if $parser->has_problems; } die(sprintf( "Failed %d/%d test programs. %d/%d subtests failed.\n", $num_bad, scalar @parsers, $failed, $total ) ) if $num_bad; return $total && $total == $passed; } sub _canon { my @list = sort { $a <=> $b } @_; my @ranges = (); my $count = scalar @list; my $pos = 0; while ( $pos < $count ) { my $end = $pos + 1; $end++ while $end < $count && $list[$end] <= $list[ $end - 1 ] + 1; push @ranges, ( $end == $pos + 1 ) ? $list[$pos] : join( '-', $list[$pos], $list[ $end - 1 ] ); $pos = $end; } return join( ' ', @ranges ); } sub _new_harness { my $sub_args = shift || {}; my ( @lib, @switches ); my @opt = map { shellwords($_) } grep { defined } $Switches, $ENV{HARNESS_PERL_SWITCHES}; while ( my $opt = shift @opt ) { if ( $opt =~ /^ -I (.*) $ /x ) { push @lib, length($1) ? $1 : shift @opt; } else { push @switches, $opt; } } # Do things the old way on VMS... push @lib, _filtered_inc() if IS_VMS; # If $Verbose isn't numeric default to 1. This helps core. my $verbosity = ( $Verbose ? ( $Verbose !~ /\d/ ) ? 1 : $Verbose : 0 ); my $args = { timer => $Timer, directives => our $Directives, lib => \@lib, switches => \@switches, color => $Color, verbosity => $verbosity, ignore_exit => $IgnoreExit, }; $args->{stdout} = $sub_args->{out} if exists $sub_args->{out}; my $class = $ENV{HARNESS_SUBCLASS} || 'TAP::Harness'; if ( defined( my $env_opt = $ENV{HARNESS_OPTIONS} ) ) { for my $opt ( split /:/, $env_opt ) { if ( $opt =~ /^j(\d*)$/ ) { $args->{jobs} = $1 || 9; } elsif ( $opt eq 'c' ) { $args->{color} = 1; } elsif ( $opt =~ m/^f(.*)$/ ) { my $fmt = $1; $fmt =~ s/-/::/g; $args->{formatter_class} = $fmt; } elsif ( $opt =~ m/^a(.*)$/ ) { my $archive = $1; $class = "TAP::Harness::Archive"; $args->{archive} = $archive; } else { die "Unknown HARNESS_OPTIONS item: $opt\n"; } } } return TAP::Harness->_construct( $class, $args ); } # Get the parts of @INC which are changed from the stock list AND # preserve reordering of stock directories. sub _filtered_inc { my @inc = grep { !ref } @INC; #28567 if (IS_VMS) { # VMS has a 255-byte limit on the length of %ENV entries, so # toss the ones that involve perl_root, the install location @inc = grep !/perl_root/i, @inc; } elsif (IS_WIN32) { # Lose any trailing backslashes in the Win32 paths s/[\\\/]+$// for @inc; } my @default_inc = _default_inc(); my @new_inc; my %seen; for my $dir (@inc) { next if $seen{$dir}++; if ( $dir eq ( $default_inc[0] || '' ) ) { shift @default_inc; } else { push @new_inc, $dir; } shift @default_inc while @default_inc and $seen{ $default_inc[0] }; } return @new_inc; } { # Cache this to avoid repeatedly shelling out to Perl. my @inc; sub _default_inc { return @inc if @inc; local $ENV{PERL5LIB}; local $ENV{PERLLIB}; my $perl = $ENV{HARNESS_PERL} || $^X; # Avoid using -l for the benefit of Perl 6 chomp( @inc = `"$perl" -e "print join qq[\\n], \@INC, q[]"` ); return @inc; } } sub _check_sequence { my @list = @_; my $prev; while ( my $next = shift @list ) { return if defined $prev && $next <= $prev; $prev = $next; } return 1; } sub execute_tests { my %args = @_; my $harness = _new_harness( \%args ); my $aggregate = TAP::Parser::Aggregator->new(); my %tot = ( bonus => 0, max => 0, ok => 0, bad => 0, good => 0, files => 0, tests => 0, sub_skipped => 0, todo => 0, skipped => 0, bench => undef, ); # Install a callback so we get to see any plans the # harness executes. $harness->callback( made_parser => sub { my $parser = shift; $parser->callback( plan => sub { my $plan = shift; if ( $plan->directive eq 'SKIP' ) { $tot{skipped}++; } } ); } ); local $ENV{PERL_USE_UNSAFE_INC} = 1 if not exists $ENV{PERL_USE_UNSAFE_INC}; _aggregate( $harness, $aggregate, @{ $args{tests} } ); $tot{bench} = $aggregate->elapsed; my @tests = $aggregate->descriptions; # TODO: Work out the circumstances under which the files # and tests totals can differ. $tot{files} = $tot{tests} = scalar @tests; my %failedtests = (); my %todo_passed = (); for my $test (@tests) { my ($parser) = $aggregate->parsers($test); my @failed = $parser->failed; my $wstat = $parser->wait; my $estat = $parser->exit; my $planned = $parser->tests_planned; my @errors = $parser->parse_errors; my $passed = $parser->passed; my $actual_passed = $parser->actual_passed; my $ok_seq = _check_sequence( $parser->actual_passed ); # Duplicate exit, wait status semantics of old version $estat ||= '' unless $wstat; $wstat ||= ''; $tot{max} += ( $planned || 0 ); $tot{bonus} += $parser->todo_passed; $tot{ok} += $passed > $actual_passed ? $passed : $actual_passed; $tot{sub_skipped} += $parser->skipped; $tot{todo} += $parser->todo; if ( @failed || $estat || @errors ) { $tot{bad}++; my $huh_planned = $planned ? undef : '??'; my $huh_errors = $ok_seq ? undef : '??'; $failedtests{$test} = { 'canon' => $huh_planned || $huh_errors || _canon(@failed) || '??', 'estat' => $estat, 'failed' => $huh_planned || $huh_errors || scalar @failed, 'max' => $huh_planned || $planned, 'name' => $test, 'wstat' => $wstat }; } else { $tot{good}++; } my @todo = $parser->todo_passed; if (@todo) { $todo_passed{$test} = { 'canon' => _canon(@todo), 'estat' => $estat, 'failed' => scalar @todo, 'max' => scalar $parser->todo, 'name' => $test, 'wstat' => $wstat }; } } return ( \%tot, \%failedtests, \%todo_passed ); } =head2 execute_tests( tests => \@test_files, out => \*FH ) Runs all the given C<@test_files> (just like C<runtests()>) but doesn't generate the final report. During testing, progress information will be written to the currently selected output filehandle (usually C<STDOUT>), or to the filehandle given by the C<out> parameter. The I<out> is optional. Returns a list of two values, C<$total> and C<$failed>, describing the results. C<$total> is a hash ref summary of all the tests run. Its keys and values are this: bonus Number of individual todo tests unexpectedly passed max Number of individual tests ran ok Number of individual tests passed sub_skipped Number of individual tests skipped todo Number of individual todo tests files Number of test files ran good Number of test files passed bad Number of test files failed tests Number of test files originally given skipped Number of test files skipped If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've got a successful test. C<$failed> is a hash ref of all the test scripts that failed. Each key is the name of a test script, each value is another hash representing how that script failed. Its keys are these: name Name of the test which failed estat Script's exit value wstat Script's wait status max Number of individual tests failed Number which failed canon List of tests which failed (as string). C<$failed> should be empty if everything passed. =cut 1; __END__ =head1 EXPORT C<&runtests> is exported by C<Test::Harness> by default. C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are exported upon request. =head1 ENVIRONMENT VARIABLES THAT TAP::HARNESS::COMPATIBLE SETS C<Test::Harness> sets these before executing the individual tests. =over 4 =item C<HARNESS_ACTIVE> This is set to a true value. It allows the tests to determine if they are being executed through the harness or by any other means. =item C<HARNESS_VERSION> This is the version of C<Test::Harness>. =back =head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS =over 4 =item C<HARNESS_PERL_SWITCHES> Setting this adds perl command line switches to each test file run. For example, C<HARNESS_PERL_SWITCHES=-T> will turn on taint mode. C<HARNESS_PERL_SWITCHES=-MDevel::Cover> will run C<Devel::Cover> for each test. C<-w> is always set. You can turn this off in the test with C<BEGIN { $^W = 0 }>. =item C<HARNESS_TIMER> Setting this to true will make the harness display the number of milliseconds each test took. You can also use F<prove>'s C<--timer> switch. =item C<HARNESS_VERBOSE> If true, C<Test::Harness> will output the verbose results of running its tests. Setting C<$Test::Harness::verbose> will override this, or you can use the C<-v> switch in the F<prove> utility. =item C<HARNESS_OPTIONS> Provide additional options to the harness. Currently supported options are: =over =item C<< j<n> >> Run <n> (default 9) parallel jobs. =item C<< c >> Try to color output. See L<TAP::Formatter::Base/"new">. =item C<< a<file.tgz> >> Will use L<TAP::Harness::Archive> as the harness class, and save the TAP to C<file.tgz> =item C<< fPackage-With-Dashes >> Set the formatter_class of the harness being run. Since the C<HARNESS_OPTIONS> is seperated by C<:>, we use C<-> instead. =back Multiple options may be separated by colons: HARNESS_OPTIONS=j9:c make test =item C<HARNESS_SUBCLASS> Specifies a TAP::Harness subclass to be used in place of TAP::Harness. =item C<HARNESS_SUMMARY_COLOR_SUCCESS> Determines the L<Term::ANSIColor> for the summary in case it is successful. This color defaults to C<'green'>. =item C<HARNESS_SUMMARY_COLOR_FAIL> Determines the L<Term::ANSIColor> for the failure in case it is successful. This color defaults to C<'red'>. =back =head1 Taint Mode Normally when a Perl program is run in taint mode the contents of the C<PERL5LIB> environment variable do not appear in C<@INC>. Because C<PERL5LIB> is often used during testing to add build directories to C<@INC> C<Test::Harness> passes the names of any directories found in C<PERL5LIB> as -I switches. The net effect of this is that C<PERL5LIB> is honoured even in taint mode. =head1 SEE ALSO L<TAP::Harness> =head1 BUGS Please report any bugs or feature requests to C<bug-test-harness at rt.cpan.org>, or through the web interface at L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>. I will be notified, and then you'll automatically be notified of progress on your bug as I make changes. =head1 AUTHORS Andy Armstrong C<< <andy@hexten.net> >> L<Test::Harness> 2.64 (maintained by Andy Lester and on which this module is based) has this attribution: Either Tim Bunce or Andreas Koenig, we don't know. What we know for sure is, that it was inspired by Larry Wall's F<TEST> script that came with perl distributions for ages. Numerous anonymous contributors exist. Andreas Koenig held the torch for many years, and then Michael G Schwern. =head1 LICENCE AND COPYRIGHT Copyright (c) 2007-2011, Andy Armstrong C<< <andy@hexten.net> >>. All rights reserved. This module is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See L<perlartistic>. Builder/Tester/Color.pm 0000644 00000001715 15137540314 0011023 0 ustar 00 package Test::Builder::Tester::Color; use strict; our $VERSION = '1.302175'; require Test::Builder::Tester; =head1 NAME Test::Builder::Tester::Color - turn on colour in Test::Builder::Tester =head1 SYNOPSIS When running a test script perl -MTest::Builder::Tester::Color test.t =head1 DESCRIPTION Importing this module causes the subroutine color in Test::Builder::Tester to be called with a true value causing colour highlighting to be turned on in debug output. The sole purpose of this module is to enable colour highlighting from the command line. =cut sub import { Test::Builder::Tester::color(1); } =head1 AUTHOR Copyright Mark Fowler E<lt>mark@twoshortplanks.comE<gt> 2002. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 BUGS This module will have no effect unless Term::ANSIColor is installed. =head1 SEE ALSO L<Test::Builder::Tester>, L<Term::ANSIColor> =cut 1; Builder/TodoDiag.pm 0000644 00000002071 15137540314 0010165 0 ustar 00 package Test::Builder::TodoDiag; use strict; use warnings; our $VERSION = '1.302175'; BEGIN { require Test2::Event::Diag; our @ISA = qw(Test2::Event::Diag) } sub diagnostics { 0 } sub facet_data { my $self = shift; my $out = $self->SUPER::facet_data(); $out->{info}->[0]->{debug} = 0; return $out; } 1; __END__ =pod =encoding UTF-8 =head1 NAME Test::Builder::TodoDiag - Test::Builder subclass of Test2::Event::Diag =head1 DESCRIPTION This is used to encapsulate diag messages created inside TODO. =head1 SYNOPSIS You do not need to use this directly. =head1 SOURCE The source code repository for Test2 can be found at F<http://github.com/Test-More/test-more/>. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 AUTHORS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 COPYRIGHT Copyright 2019 Chad Granum E<lt>exodist@cpan.orgE<gt>. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See F<http://dev.perl.org/licenses/> =cut Builder/Tester.pm 0000644 00000043163 15137540314 0007750 0 ustar 00 package Test::Builder::Tester; use strict; our $VERSION = '1.302175'; use Test::Builder; use Symbol; use Carp; =head1 NAME Test::Builder::Tester - test testsuites that have been built with Test::Builder =head1 SYNOPSIS use Test::Builder::Tester tests => 1; use Test::More; test_out("not ok 1 - foo"); test_fail(+1); fail("foo"); test_test("fail works"); =head1 DESCRIPTION A module that helps you test testing modules that are built with L<Test::Builder>. The testing system is designed to be used by performing a three step process for each test you wish to test. This process starts with using C<test_out> and C<test_err> in advance to declare what the testsuite you are testing will output with L<Test::Builder> to stdout and stderr. You then can run the test(s) from your test suite that call L<Test::Builder>. At this point the output of L<Test::Builder> is safely captured by L<Test::Builder::Tester> rather than being interpreted as real test output. The final stage is to call C<test_test> that will simply compare what you predeclared to what L<Test::Builder> actually outputted, and report the results back with a "ok" or "not ok" (with debugging) to the normal output. =cut #### # set up testing #### my $t = Test::Builder->new; ### # make us an exporter ### use Exporter; our @ISA = qw(Exporter); our @EXPORT = qw(test_out test_err test_fail test_diag test_test line_num); sub import { my $class = shift; my(@plan) = @_; my $caller = caller; $t->exported_to($caller); $t->plan(@plan); my @imports = (); foreach my $idx ( 0 .. $#plan ) { if( $plan[$idx] eq 'import' ) { @imports = @{ $plan[ $idx + 1 ] }; last; } } __PACKAGE__->export_to_level( 1, __PACKAGE__, @imports ); } ### # set up file handles ### # create some private file handles my $output_handle = gensym; my $error_handle = gensym; # and tie them to this package my $out = tie *$output_handle, "Test::Builder::Tester::Tie", "STDOUT"; my $err = tie *$error_handle, "Test::Builder::Tester::Tie", "STDERR"; #### # exported functions #### # for remembering that we're testing and where we're testing at my $testing = 0; my $testing_num; my $original_is_passing; # remembering where the file handles were originally connected my $original_output_handle; my $original_failure_handle; my $original_todo_handle; my $original_formatter; my $original_harness_env; # function that starts testing and redirects the filehandles for now sub _start_testing { # Hack for things that conditioned on Test-Stream being loaded $INC{'Test/Stream.pm'} ||= 'fake' if $INC{'Test/Moose/More.pm'}; # even if we're running under Test::Harness pretend we're not # for now. This needed so Test::Builder doesn't add extra spaces $original_harness_env = $ENV{HARNESS_ACTIVE} || 0; $ENV{HARNESS_ACTIVE} = 0; my $hub = $t->{Hub} || ($t->{Stack} ? $t->{Stack}->top : Test2::API::test2_stack->top); $original_formatter = $hub->format; unless ($original_formatter && $original_formatter->isa('Test::Builder::Formatter')) { my $fmt = Test::Builder::Formatter->new; $hub->format($fmt); } # remember what the handles were set to $original_output_handle = $t->output(); $original_failure_handle = $t->failure_output(); $original_todo_handle = $t->todo_output(); # switch out to our own handles $t->output($output_handle); $t->failure_output($error_handle); $t->todo_output($output_handle); # clear the expected list $out->reset(); $err->reset(); # remember that we're testing $testing = 1; $testing_num = $t->current_test; $t->current_test(0); $original_is_passing = $t->is_passing; $t->is_passing(1); # look, we shouldn't do the ending stuff $t->no_ending(1); } =head2 Functions These are the six methods that are exported as default. =over 4 =item test_out =item test_err Procedures for predeclaring the output that your test suite is expected to produce until C<test_test> is called. These procedures automatically assume that each line terminates with "\n". So test_out("ok 1","ok 2"); is the same as test_out("ok 1\nok 2"); which is even the same as test_out("ok 1"); test_out("ok 2"); Once C<test_out> or C<test_err> (or C<test_fail> or C<test_diag>) have been called, all further output from L<Test::Builder> will be captured by L<Test::Builder::Tester>. This means that you will not be able perform further tests to the normal output in the normal way until you call C<test_test> (well, unless you manually meddle with the output filehandles) =cut sub test_out { # do we need to do any setup? _start_testing() unless $testing; $out->expect(@_); } sub test_err { # do we need to do any setup? _start_testing() unless $testing; $err->expect(@_); } =item test_fail Because the standard failure message that L<Test::Builder> produces whenever a test fails will be a common occurrence in your test error output, and because it has changed between Test::Builder versions, rather than forcing you to call C<test_err> with the string all the time like so test_err("# Failed test ($0 at line ".line_num(+1).")"); C<test_fail> exists as a convenience function that can be called instead. It takes one argument, the offset from the current line that the line that causes the fail is on. test_fail(+1); This means that the example in the synopsis could be rewritten more simply as: test_out("not ok 1 - foo"); test_fail(+1); fail("foo"); test_test("fail works"); =cut sub test_fail { # do we need to do any setup? _start_testing() unless $testing; # work out what line we should be on my( $package, $filename, $line ) = caller; $line = $line + ( shift() || 0 ); # prevent warnings # expect that on stderr $err->expect("# Failed test ($filename at line $line)"); } =item test_diag As most of the remaining expected output to the error stream will be created by L<Test::Builder>'s C<diag> function, L<Test::Builder::Tester> provides a convenience function C<test_diag> that you can use instead of C<test_err>. The C<test_diag> function prepends comment hashes and spacing to the start and newlines to the end of the expected output passed to it and adds it to the list of expected error output. So, instead of writing test_err("# Couldn't open file"); you can write test_diag("Couldn't open file"); Remember that L<Test::Builder>'s diag function will not add newlines to the end of output and test_diag will. So to check Test::Builder->new->diag("foo\n","bar\n"); You would do test_diag("foo","bar") without the newlines. =cut sub test_diag { # do we need to do any setup? _start_testing() unless $testing; # expect the same thing, but prepended with "# " local $_; $err->expect( map { "# $_" } @_ ); } =item test_test Actually performs the output check testing the tests, comparing the data (with C<eq>) that we have captured from L<Test::Builder> against what was declared with C<test_out> and C<test_err>. This takes name/value pairs that effect how the test is run. =over =item title (synonym 'name', 'label') The name of the test that will be displayed after the C<ok> or C<not ok>. =item skip_out Setting this to a true value will cause the test to ignore if the output sent by the test to the output stream does not match that declared with C<test_out>. =item skip_err Setting this to a true value will cause the test to ignore if the output sent by the test to the error stream does not match that declared with C<test_err>. =back As a convenience, if only one argument is passed then this argument is assumed to be the name of the test (as in the above examples.) Once C<test_test> has been run test output will be redirected back to the original filehandles that L<Test::Builder> was connected to (probably STDOUT and STDERR,) meaning any further tests you run will function normally and cause success/errors for L<Test::Harness>. =cut sub test_test { # END the hack delete $INC{'Test/Stream.pm'} if $INC{'Test/Stream.pm'} && $INC{'Test/Stream.pm'} eq 'fake'; # decode the arguments as described in the pod my $mess; my %args; if( @_ == 1 ) { $mess = shift } else { %args = @_; $mess = $args{name} if exists( $args{name} ); $mess = $args{title} if exists( $args{title} ); $mess = $args{label} if exists( $args{label} ); } # er, are we testing? croak "Not testing. You must declare output with a test function first." unless $testing; my $hub = $t->{Hub} || Test2::API::test2_stack->top; $hub->format($original_formatter); # okay, reconnect the test suite back to the saved handles $t->output($original_output_handle); $t->failure_output($original_failure_handle); $t->todo_output($original_todo_handle); # restore the test no, etc, back to the original point $t->current_test($testing_num); $testing = 0; $t->is_passing($original_is_passing); # re-enable the original setting of the harness $ENV{HARNESS_ACTIVE} = $original_harness_env; # check the output we've stashed unless( $t->ok( ( $args{skip_out} || $out->check ) && ( $args{skip_err} || $err->check ), $mess ) ) { # print out the diagnostic information about why this # test failed local $_; $t->diag( map { "$_\n" } $out->complaint ) unless $args{skip_out} || $out->check; $t->diag( map { "$_\n" } $err->complaint ) unless $args{skip_err} || $err->check; } } =item line_num A utility function that returns the line number that the function was called on. You can pass it an offset which will be added to the result. This is very useful for working out the correct text of diagnostic functions that contain line numbers. Essentially this is the same as the C<__LINE__> macro, but the C<line_num(+3)> idiom is arguably nicer. =cut sub line_num { my( $package, $filename, $line ) = caller; return $line + ( shift() || 0 ); # prevent warnings } =back In addition to the six exported functions there exists one function that can only be accessed with a fully qualified function call. =over 4 =item color When C<test_test> is called and the output that your tests generate does not match that which you declared, C<test_test> will print out debug information showing the two conflicting versions. As this output itself is debug information it can be confusing which part of the output is from C<test_test> and which was the original output from your original tests. Also, it may be hard to spot things like extraneous whitespace at the end of lines that may cause your test to fail even though the output looks similar. To assist you C<test_test> can colour the background of the debug information to disambiguate the different types of output. The debug output will have its background coloured green and red. The green part represents the text which is the same between the executed and actual output, the red shows which part differs. The C<color> function determines if colouring should occur or not. Passing it a true or false value will enable or disable colouring respectively, and the function called with no argument will return the current setting. To enable colouring from the command line, you can use the L<Text::Builder::Tester::Color> module like so: perl -Mlib=Text::Builder::Tester::Color test.t Or by including the L<Test::Builder::Tester::Color> module directly in the PERL5LIB. =cut my $color; sub color { $color = shift if @_; $color; } =back =head1 BUGS Test::Builder::Tester does not handle plans well. It has never done anything special with plans. This means that plans from outside Test::Builder::Tester will effect Test::Builder::Tester, worse plans when using Test::Builder::Tester will effect overall testing. At this point there are no plans to fix this bug as people have come to depend on it, and Test::Builder::Tester is now discouraged in favor of C<Test2::API::intercept()>. See L<https://github.com/Test-More/test-more/issues/667> Calls C<< Test::Builder->no_ending >> turning off the ending tests. This is needed as otherwise it will trip out because we've run more tests than we strictly should have and it'll register any failures we had that we were testing for as real failures. The color function doesn't work unless L<Term::ANSIColor> is compatible with your terminal. Additionally, L<Win32::Console::ANSI> must be installed on windows platforms for color output. Bugs (and requests for new features) can be reported to the author though GitHub: L<https://github.com/Test-More/test-more/issues> =head1 AUTHOR Copyright Mark Fowler E<lt>mark@twoshortplanks.comE<gt> 2002, 2004. Some code taken from L<Test::More> and L<Test::Catch>, written by Michael G Schwern E<lt>schwern@pobox.comE<gt>. Hence, those parts Copyright Micheal G Schwern 2001. Used and distributed with permission. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 NOTES Thanks to Richard Clamp E<lt>richardc@unixbeard.netE<gt> for letting me use his testing system to try this module out on. =head1 SEE ALSO L<Test::Builder>, L<Test::Builder::Tester::Color>, L<Test::More>. =cut 1; #################################################################### # Helper class that is used to remember expected and received data package Test::Builder::Tester::Tie; ## # add line(s) to be expected sub expect { my $self = shift; my @checks = @_; foreach my $check (@checks) { $check = $self->_account_for_subtest($check); $check = $self->_translate_Failed_check($check); push @{ $self->{wanted} }, ref $check ? $check : "$check\n"; } } sub _account_for_subtest { my( $self, $check ) = @_; my $hub = $t->{Stack}->top; my $nesting = $hub->isa('Test2::Hub::Subtest') ? $hub->nested : 0; return ref($check) ? $check : (' ' x $nesting) . $check; } sub _translate_Failed_check { my( $self, $check ) = @_; if( $check =~ /\A(.*)# (Failed .*test) \((.*?) at line (\d+)\)\Z(?!\n)/ ) { $check = "/\Q$1\E#\\s+\Q$2\E.*?\\n?.*?\Qat $3\E line \Q$4\E.*\\n?/"; } return $check; } ## # return true iff the expected data matches the got data sub check { my $self = shift; # turn off warnings as these might be undef local $^W = 0; my @checks = @{ $self->{wanted} }; my $got = $self->{got}; foreach my $check (@checks) { $check = "\Q$check\E" unless( $check =~ s,^/(.*)/$,$1, or ref $check ); return 0 unless $got =~ s/^$check//; } return length $got == 0; } ## # a complaint message about the inputs not matching (to be # used for debugging messages) sub complaint { my $self = shift; my $type = $self->type; my $got = $self->got; my $wanted = join '', @{ $self->wanted }; # are we running in colour mode? if(Test::Builder::Tester::color) { # get color eval { require Term::ANSIColor }; unless($@) { eval { require Win32::Console::ANSI } if 'MSWin32' eq $^O; # support color on windows platforms # colours my $green = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_green"); my $red = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_red"); my $reset = Term::ANSIColor::color("reset"); # work out where the two strings start to differ my $char = 0; $char++ while substr( $got, $char, 1 ) eq substr( $wanted, $char, 1 ); # get the start string and the two end strings my $start = $green . substr( $wanted, 0, $char ); my $gotend = $red . substr( $got, $char ) . $reset; my $wantedend = $red . substr( $wanted, $char ) . $reset; # make the start turn green on and off $start =~ s/\n/$reset\n$green/g; # make the ends turn red on and off $gotend =~ s/\n/$reset\n$red/g; $wantedend =~ s/\n/$reset\n$red/g; # rebuild the strings $got = $start . $gotend; $wanted = $start . $wantedend; } } my @got = split "\n", $got; my @wanted = split "\n", $wanted; $got = ""; $wanted = ""; while (@got || @wanted) { my $g = shift @got || ""; my $w = shift @wanted || ""; if ($g ne $w) { if($g =~ s/(\s+)$/ |> /g) { $g .= ($_ eq ' ' ? '_' : '\t') for split '', $1; } if($w =~ s/(\s+)$/ |> /g) { $w .= ($_ eq ' ' ? '_' : '\t') for split '', $1; } $g = "> $g"; $w = "> $w"; } else { $g = " $g"; $w = " $w"; } $got = $got ? "$got\n$g" : $g; $wanted = $wanted ? "$wanted\n$w" : $w; } return "$type is:\n" . "$got\nnot:\n$wanted\nas expected"; } ## # forget all expected and got data sub reset { my $self = shift; %$self = ( type => $self->{type}, got => '', wanted => [], ); } sub got { my $self = shift; return $self->{got}; } sub wanted { my $self = shift; return $self->{wanted}; } sub type { my $self = shift; return $self->{type}; } ### # tie interface ### sub PRINT { my $self = shift; $self->{got} .= join '', @_; } sub TIEHANDLE { my( $class, $type ) = @_; my $self = bless { type => $type }, $class; $self->reset; return $self; } sub READ { } sub READLINE { } sub GETC { } sub FILENO { } 1; Builder/IO/Scalar.pm 0000644 00000032510 15137540314 0010210 0 ustar 00 package Test::Builder::IO::Scalar; =head1 NAME Test::Builder::IO::Scalar - A copy of IO::Scalar for Test::Builder =head1 DESCRIPTION This is a copy of L<IO::Scalar> which ships with L<Test::Builder> to support scalar references as filehandles on Perl 5.6. Newer versions of Perl simply use C<open()>'s built in support. L<Test::Builder> can not have dependencies on other modules without careful consideration, so its simply been copied into the distribution. =head1 COPYRIGHT and LICENSE This file came from the "IO-stringy" Perl5 toolkit. Copyright (c) 1996 by Eryq. All rights reserved. Copyright (c) 1999,2001 by ZeeGee Software Inc. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =cut # This is copied code, I don't care. ##no critic use Carp; use strict; use vars qw($VERSION @ISA); use IO::Handle; use 5.005; ### The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.114"; ### Inheritance: @ISA = qw(IO::Handle); #============================== =head2 Construction =over 4 =cut #------------------------------ =item new [ARGS...] I<Class method.> Return a new, unattached scalar handle. If any arguments are given, they're sent to open(). =cut sub new { my $proto = shift; my $class = ref($proto) || $proto; my $self = bless \do { local *FH }, $class; tie *$self, $class, $self; $self->open(@_); ### open on anonymous by default $self; } sub DESTROY { shift->close; } #------------------------------ =item open [SCALARREF] I<Instance method.> Open the scalar handle on a new scalar, pointed to by SCALARREF. If no SCALARREF is given, a "private" scalar is created to hold the file data. Returns the self object on success, undefined on error. =cut sub open { my ($self, $sref) = @_; ### Sanity: defined($sref) or do {my $s = ''; $sref = \$s}; (ref($sref) eq "SCALAR") or croak "open() needs a ref to a scalar"; ### Setup: *$self->{Pos} = 0; ### seek position *$self->{SR} = $sref; ### scalar reference $self; } #------------------------------ =item opened I<Instance method.> Is the scalar handle opened on something? =cut sub opened { *{shift()}->{SR}; } #------------------------------ =item close I<Instance method.> Disassociate the scalar handle from its underlying scalar. Done automatically on destroy. =cut sub close { my $self = shift; %{*$self} = (); 1; } =back =cut #============================== =head2 Input and output =over 4 =cut #------------------------------ =item flush I<Instance method.> No-op, provided for OO compatibility. =cut sub flush { "0 but true" } #------------------------------ =item getc I<Instance method.> Return the next character, or undef if none remain. =cut sub getc { my $self = shift; ### Return undef right away if at EOF; else, move pos forward: return undef if $self->eof; substr(${*$self->{SR}}, *$self->{Pos}++, 1); } #------------------------------ =item getline I<Instance method.> Return the next line, or undef on end of string. Can safely be called in an array context. Currently, lines are delimited by "\n". =cut sub getline { my $self = shift; ### Return undef right away if at EOF: return undef if $self->eof; ### Get next line: my $sr = *$self->{SR}; my $i = *$self->{Pos}; ### Start matching at this point. ### Minimal impact implementation! ### We do the fast fast thing (no regexps) if using the ### classic input record separator. ### Case 1: $/ is undef: slurp all... if (!defined($/)) { *$self->{Pos} = length $$sr; return substr($$sr, $i); } ### Case 2: $/ is "\n": zoom zoom zoom... elsif ($/ eq "\012") { ### Seek ahead for "\n"... yes, this really is faster than regexps. my $len = length($$sr); for (; $i < $len; ++$i) { last if ord (substr ($$sr, $i, 1)) == 10; } ### Extract the line: my $line; if ($i < $len) { ### We found a "\n": $line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos} + 1); *$self->{Pos} = $i+1; ### Remember where we finished up. } else { ### No "\n"; slurp the remainder: $line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos}); *$self->{Pos} = $len; } return $line; } ### Case 3: $/ is ref to int. Do fixed-size records. ### (Thanks to Dominique Quatravaux.) elsif (ref($/)) { my $len = length($$sr); my $i = ${$/} + 0; my $line = substr ($$sr, *$self->{Pos}, $i); *$self->{Pos} += $i; *$self->{Pos} = $len if (*$self->{Pos} > $len); return $line; } ### Case 4: $/ is either "" (paragraphs) or something weird... ### This is Graham's general-purpose stuff, which might be ### a tad slower than Case 2 for typical data, because ### of the regexps. else { pos($$sr) = $i; ### If in paragraph mode, skip leading lines (and update i!): length($/) or (($$sr =~ m/\G\n*/g) and ($i = pos($$sr))); ### If we see the separator in the buffer ahead... if (length($/) ? $$sr =~ m,\Q$/\E,g ### (ordinary sep) TBD: precomp! : $$sr =~ m,\n\n,g ### (a paragraph) ) { *$self->{Pos} = pos $$sr; return substr($$sr, $i, *$self->{Pos}-$i); } ### Else if no separator remains, just slurp the rest: else { *$self->{Pos} = length $$sr; return substr($$sr, $i); } } } #------------------------------ =item getlines I<Instance method.> Get all remaining lines. It will croak() if accidentally called in a scalar context. =cut sub getlines { my $self = shift; wantarray or croak("can't call getlines in scalar context!"); my ($line, @lines); push @lines, $line while (defined($line = $self->getline)); @lines; } #------------------------------ =item print ARGS... I<Instance method.> Print ARGS to the underlying scalar. B<Warning:> this continues to always cause a seek to the end of the string, but if you perform seek()s and tell()s, it is still safer to explicitly seek-to-end before subsequent print()s. =cut sub print { my $self = shift; *$self->{Pos} = length(${*$self->{SR}} .= join('', @_) . (defined($\) ? $\ : "")); 1; } sub _unsafe_print { my $self = shift; my $append = join('', @_) . $\; ${*$self->{SR}} .= $append; *$self->{Pos} += length($append); 1; } sub _old_print { my $self = shift; ${*$self->{SR}} .= join('', @_) . $\; *$self->{Pos} = length(${*$self->{SR}}); 1; } #------------------------------ =item read BUF, NBYTES, [OFFSET] I<Instance method.> Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error. =cut sub read { my $self = $_[0]; my $n = $_[2]; my $off = $_[3] || 0; my $read = substr(${*$self->{SR}}, *$self->{Pos}, $n); $n = length($read); *$self->{Pos} += $n; ($off ? substr($_[1], $off) : $_[1]) = $read; return $n; } #------------------------------ =item write BUF, NBYTES, [OFFSET] I<Instance method.> Write some bytes to the scalar. =cut sub write { my $self = $_[0]; my $n = $_[2]; my $off = $_[3] || 0; my $data = substr($_[1], $off, $n); $n = length($data); $self->print($data); return $n; } #------------------------------ =item sysread BUF, LEN, [OFFSET] I<Instance method.> Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error. =cut sub sysread { my $self = shift; $self->read(@_); } #------------------------------ =item syswrite BUF, NBYTES, [OFFSET] I<Instance method.> Write some bytes to the scalar. =cut sub syswrite { my $self = shift; $self->write(@_); } =back =cut #============================== =head2 Seeking/telling and other attributes =over 4 =cut #------------------------------ =item autoflush I<Instance method.> No-op, provided for OO compatibility. =cut sub autoflush {} #------------------------------ =item binmode I<Instance method.> No-op, provided for OO compatibility. =cut sub binmode {} #------------------------------ =item clearerr I<Instance method.> Clear the error and EOF flags. A no-op. =cut sub clearerr { 1 } #------------------------------ =item eof I<Instance method.> Are we at end of file? =cut sub eof { my $self = shift; (*$self->{Pos} >= length(${*$self->{SR}})); } #------------------------------ =item seek OFFSET, WHENCE I<Instance method.> Seek to a given position in the stream. =cut sub seek { my ($self, $pos, $whence) = @_; my $eofpos = length(${*$self->{SR}}); ### Seek: if ($whence == 0) { *$self->{Pos} = $pos } ### SEEK_SET elsif ($whence == 1) { *$self->{Pos} += $pos } ### SEEK_CUR elsif ($whence == 2) { *$self->{Pos} = $eofpos + $pos} ### SEEK_END else { croak "bad seek whence ($whence)" } ### Fixup: if (*$self->{Pos} < 0) { *$self->{Pos} = 0 } if (*$self->{Pos} > $eofpos) { *$self->{Pos} = $eofpos } return 1; } #------------------------------ =item sysseek OFFSET, WHENCE I<Instance method.> Identical to C<seek OFFSET, WHENCE>, I<q.v.> =cut sub sysseek { my $self = shift; $self->seek (@_); } #------------------------------ =item tell I<Instance method.> Return the current position in the stream, as a numeric offset. =cut sub tell { *{shift()}->{Pos} } #------------------------------ =item use_RS [YESNO] I<Instance method.> B<Deprecated and ignored.> Obey the current setting of $/, like IO::Handle does? Default is false in 1.x, but cold-welded true in 2.x and later. =cut sub use_RS { my ($self, $yesno) = @_; carp "use_RS is deprecated and ignored; \$/ is always consulted\n"; } #------------------------------ =item setpos POS I<Instance method.> Set the current position, using the opaque value returned by C<getpos()>. =cut sub setpos { shift->seek($_[0],0) } #------------------------------ =item getpos I<Instance method.> Return the current position in the string, as an opaque object. =cut *getpos = \&tell; #------------------------------ =item sref I<Instance method.> Return a reference to the underlying scalar. =cut sub sref { *{shift()}->{SR} } #------------------------------ # Tied handle methods... #------------------------------ # Conventional tiehandle interface: sub TIEHANDLE { ((defined($_[1]) && UNIVERSAL::isa($_[1], __PACKAGE__)) ? $_[1] : shift->new(@_)); } sub GETC { shift->getc(@_) } sub PRINT { shift->print(@_) } sub PRINTF { shift->print(sprintf(shift, @_)) } sub READ { shift->read(@_) } sub READLINE { wantarray ? shift->getlines(@_) : shift->getline(@_) } sub WRITE { shift->write(@_); } sub CLOSE { shift->close(@_); } sub SEEK { shift->seek(@_); } sub TELL { shift->tell(@_); } sub EOF { shift->eof(@_); } sub FILENO { -1 } #------------------------------------------------------------ 1; __END__ =back =cut =head1 WARNINGS Perl's TIEHANDLE spec was incomplete prior to 5.005_57; it was missing support for C<seek()>, C<tell()>, and C<eof()>. Attempting to use these functions with an IO::Scalar will not work prior to 5.005_57. IO::Scalar will not have the relevant methods invoked; and even worse, this kind of bug can lie dormant for a while. If you turn warnings on (via C<$^W> or C<perl -w>), and you see something like this... attempt to seek on unopened filehandle ...then you are probably trying to use one of these functions on an IO::Scalar with an old Perl. The remedy is to simply use the OO version; e.g.: $SH->seek(0,0); ### GOOD: will work on any 5.005 seek($SH,0,0); ### WARNING: will only work on 5.005_57 and beyond =head1 VERSION $Id: Scalar.pm,v 1.6 2005/02/10 21:21:53 dfs Exp $ =head1 AUTHORS =head2 Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =head2 Principal author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =head2 Other contributors The full set of contributors always includes the folks mentioned in L<IO::Stringy/"CHANGE LOG">. But just the same, special thanks to the following individuals for their invaluable contributions (if I've forgotten or misspelled your name, please email me!): I<Andy Glew,> for contributing C<getc()>. I<Brandon Browning,> for suggesting C<opened()>. I<David Richter,> for finding and fixing the bug in C<PRINTF()>. I<Eric L. Brine,> for his offset-using read() and write() implementations. I<Richard Jones,> for his patches to massively improve the performance of C<getline()> and add C<sysread> and C<syswrite>. I<B. K. Oxley (binkley),> for stringification and inheritance improvements, and sundry good ideas. I<Doug Wilson,> for the IO::Handle inheritance and automatic tie-ing. =head1 SEE ALSO L<IO::String>, which is quite similar but which was designed more-recently and with an IO::Handle-like interface in mind, so you could mix OO- and native-filehandle usage without using tied(). I<Note:> as of version 2.x, these classes all work like their IO::Handle counterparts, so we have comparable functionality to IO::String. =cut Builder/Module.pm 0000644 00000007757 15137540314 0007740 0 ustar 00 package Test::Builder::Module; use strict; use Test::Builder; require Exporter; our @ISA = qw(Exporter); our $VERSION = '1.302175'; =head1 NAME Test::Builder::Module - Base class for test modules =head1 SYNOPSIS # Emulates Test::Simple package Your::Module; my $CLASS = __PACKAGE__; use parent 'Test::Builder::Module'; @EXPORT = qw(ok); sub ok ($;$) { my $tb = $CLASS->builder; return $tb->ok(@_); } 1; =head1 DESCRIPTION This is a superclass for L<Test::Builder>-based modules. It provides a handful of common functionality and a method of getting at the underlying L<Test::Builder> object. =head2 Importing Test::Builder::Module is a subclass of L<Exporter> which means your module is also a subclass of Exporter. @EXPORT, @EXPORT_OK, etc... all act normally. A few methods are provided to do the C<< use Your::Module tests => 23 >> part for you. =head3 import Test::Builder::Module provides an C<import()> method which acts in the same basic way as L<Test::More>'s, setting the plan and controlling exporting of functions and variables. This allows your module to set the plan independent of L<Test::More>. All arguments passed to C<import()> are passed onto C<< Your::Module->builder->plan() >> with the exception of C<< import =>[qw(things to import)] >>. use Your::Module import => [qw(this that)], tests => 23; says to import the functions C<this()> and C<that()> as well as set the plan to be 23 tests. C<import()> also sets the C<exported_to()> attribute of your builder to be the caller of the C<import()> function. Additional behaviors can be added to your C<import()> method by overriding C<import_extra()>. =cut sub import { my($class) = shift; Test2::API::test2_load() unless Test2::API::test2_in_preload(); # Don't run all this when loading ourself. return 1 if $class eq 'Test::Builder::Module'; my $test = $class->builder; my $caller = caller; $test->exported_to($caller); $class->import_extra( \@_ ); my(@imports) = $class->_strip_imports( \@_ ); $test->plan(@_); local $Exporter::ExportLevel = $Exporter::ExportLevel + 1; $class->Exporter::import(@imports); } sub _strip_imports { my $class = shift; my $list = shift; my @imports = (); my @other = (); my $idx = 0; while( $idx <= $#{$list} ) { my $item = $list->[$idx]; if( defined $item and $item eq 'import' ) { push @imports, @{ $list->[ $idx + 1 ] }; $idx++; } else { push @other, $item; } $idx++; } @$list = @other; return @imports; } =head3 import_extra Your::Module->import_extra(\@import_args); C<import_extra()> is called by C<import()>. It provides an opportunity for you to add behaviors to your module based on its import list. Any extra arguments which shouldn't be passed on to C<plan()> should be stripped off by this method. See L<Test::More> for an example of its use. B<NOTE> This mechanism is I<VERY ALPHA AND LIKELY TO CHANGE> as it feels like a bit of an ugly hack in its current form. =cut sub import_extra { } =head2 Builder Test::Builder::Module provides some methods of getting at the underlying Test::Builder object. =head3 builder my $builder = Your::Class->builder; This method returns the L<Test::Builder> object associated with Your::Class. It is not a constructor so you can call it as often as you like. This is the preferred way to get the L<Test::Builder> object. You should I<not> get it via C<< Test::Builder->new >> as was previously recommended. The object returned by C<builder()> may change at runtime so you should call C<builder()> inside each function rather than store it in a global. sub ok { my $builder = Your::Class->builder; return $builder->ok(@_); } =cut sub builder { return Test::Builder->new; } =head1 SEE ALSO L<< Test2::Manual::Tooling::TestBuilder >> describes the improved options for writing testing modules provided by L<< Test2 >>. =cut 1; Builder/Formatter.pm 0000644 00000004112 15137540314 0010434 0 ustar 00 package Test::Builder::Formatter; use strict; use warnings; our $VERSION = '1.302175'; BEGIN { require Test2::Formatter::TAP; our @ISA = qw(Test2::Formatter::TAP) } use Test2::Util::HashBase qw/no_header no_diag/; BEGIN { *OUT_STD = Test2::Formatter::TAP->can('OUT_STD'); *OUT_ERR = Test2::Formatter::TAP->can('OUT_ERR'); my $todo = OUT_ERR() + 1; *OUT_TODO = sub() { $todo }; } sub init { my $self = shift; $self->SUPER::init(@_); $self->{+HANDLES}->[OUT_TODO] = $self->{+HANDLES}->[OUT_STD]; } sub plan_tap { my ($self, $f) = @_; return if $self->{+NO_HEADER}; return $self->SUPER::plan_tap($f); } sub debug_tap { my ($self, $f, $num) = @_; return if $self->{+NO_DIAG}; my @out = $self->SUPER::debug_tap($f, $num); $self->redirect(\@out) if @out && ref $f->{about} && defined $f->{about}->{package} && $f->{about}->{package} eq 'Test::Builder::TodoDiag'; return @out; } sub info_tap { my ($self, $f) = @_; return if $self->{+NO_DIAG}; my @out = $self->SUPER::info_tap($f); $self->redirect(\@out) if @out && ref $f->{about} && defined $f->{about}->{package} && $f->{about}->{package} eq 'Test::Builder::TodoDiag'; return @out; } sub redirect { my ($self, $out) = @_; $_->[0] = OUT_TODO for @$out; } sub no_subtest_space { 1 } 1; __END__ =pod =encoding UTF-8 =head1 NAME Test::Builder::Formatter - Test::Builder subclass of Test2::Formatter::TAP =head1 DESCRIPTION This is what takes events and turns them into TAP. =head1 SYNOPSIS use Test::Builder; # Loads Test::Builder::Formatter for you =head1 SOURCE The source code repository for Test2 can be found at F<http://github.com/Test-More/test-more/>. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 AUTHORS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 COPYRIGHT Copyright 2019 Chad Granum E<lt>exodist@cpan.orgE<gt>. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See F<http://dev.perl.org/licenses/> =cut DummyTest.php 0000644 00000000373 15154120510 0007205 0 ustar 00 <?php namespace Psr\Log\Test; /** * This class is internal and does not follow the BC promise. * * Do NOT use this class in any way. * * @internal */ class DummyTest { public function __toString() { return 'DummyTest'; } } TestLogger.php 0000644 00000010657 15154120510 0007337 0 ustar 00 <?php namespace Psr\Log\Test; use Psr\Log\AbstractLogger; /** * Used for testing purposes. * * It records all records and gives you access to them for verification. * * @method bool hasEmergency($record) * @method bool hasAlert($record) * @method bool hasCritical($record) * @method bool hasError($record) * @method bool hasWarning($record) * @method bool hasNotice($record) * @method bool hasInfo($record) * @method bool hasDebug($record) * * @method bool hasEmergencyRecords() * @method bool hasAlertRecords() * @method bool hasCriticalRecords() * @method bool hasErrorRecords() * @method bool hasWarningRecords() * @method bool hasNoticeRecords() * @method bool hasInfoRecords() * @method bool hasDebugRecords() * * @method bool hasEmergencyThatContains($message) * @method bool hasAlertThatContains($message) * @method bool hasCriticalThatContains($message) * @method bool hasErrorThatContains($message) * @method bool hasWarningThatContains($message) * @method bool hasNoticeThatContains($message) * @method bool hasInfoThatContains($message) * @method bool hasDebugThatContains($message) * * @method bool hasEmergencyThatMatches($message) * @method bool hasAlertThatMatches($message) * @method bool hasCriticalThatMatches($message) * @method bool hasErrorThatMatches($message) * @method bool hasWarningThatMatches($message) * @method bool hasNoticeThatMatches($message) * @method bool hasInfoThatMatches($message) * @method bool hasDebugThatMatches($message) * * @method bool hasEmergencyThatPasses($message) * @method bool hasAlertThatPasses($message) * @method bool hasCriticalThatPasses($message) * @method bool hasErrorThatPasses($message) * @method bool hasWarningThatPasses($message) * @method bool hasNoticeThatPasses($message) * @method bool hasInfoThatPasses($message) * @method bool hasDebugThatPasses($message) */ class TestLogger extends AbstractLogger { /** * @var array */ public $records = []; public $recordsByLevel = []; /** * @inheritdoc */ public function log($level, $message, array $context = []) { $record = [ 'level' => $level, 'message' => $message, 'context' => $context, ]; $this->recordsByLevel[$record['level']][] = $record; $this->records[] = $record; } public function hasRecords($level) { return isset($this->recordsByLevel[$level]); } public function hasRecord($record, $level) { if (is_string($record)) { $record = ['message' => $record]; } return $this->hasRecordThatPasses(function ($rec) use ($record) { if ($rec['message'] !== $record['message']) { return false; } if (isset($record['context']) && $rec['context'] !== $record['context']) { return false; } return true; }, $level); } public function hasRecordThatContains($message, $level) { return $this->hasRecordThatPasses(function ($rec) use ($message) { return strpos($rec['message'], $message) !== false; }, $level); } public function hasRecordThatMatches($regex, $level) { return $this->hasRecordThatPasses(function ($rec) use ($regex) { return preg_match($regex, $rec['message']) > 0; }, $level); } public function hasRecordThatPasses(callable $predicate, $level) { if (!isset($this->recordsByLevel[$level])) { return false; } foreach ($this->recordsByLevel[$level] as $i => $rec) { if (call_user_func($predicate, $rec, $i)) { return true; } } return false; } public function __call($method, $args) { if (preg_match('/(.*)(Debug|Info|Notice|Warning|Error|Critical|Alert|Emergency)(.*)/', $method, $matches) > 0) { $genericMethod = $matches[1] . ('Records' !== $matches[3] ? 'Record' : '') . $matches[3]; $level = strtolower($matches[2]); if (method_exists($this, $genericMethod)) { $args[] = $level; return call_user_func_array([$this, $genericMethod], $args); } } throw new \BadMethodCallException('Call to undefined method ' . get_class($this) . '::' . $method . '()'); } public function reset() { $this->records = []; $this->recordsByLevel = []; } } LoggerInterfaceTest.php 0000644 00000011051 15154120510 0011145 0 ustar 00 <?php namespace Psr\Log\Test; use Psr\Log\LoggerInterface; use Psr\Log\LogLevel; use PHPUnit\Framework\TestCase; /** * Provides a base test class for ensuring compliance with the LoggerInterface. * * Implementors can extend the class and implement abstract methods to run this * as part of their test suite. */ abstract class LoggerInterfaceTest extends TestCase { /** * @return LoggerInterface */ abstract public function getLogger(); /** * This must return the log messages in order. * * The simple formatting of the messages is: "<LOG LEVEL> <MESSAGE>". * * Example ->error('Foo') would yield "error Foo". * * @return string[] */ abstract public function getLogs(); public function testImplements() { $this->assertInstanceOf('Psr\Log\LoggerInterface', $this->getLogger()); } /** * @dataProvider provideLevelsAndMessages */ public function testLogsAtAllLevels($level, $message) { $logger = $this->getLogger(); $logger->{$level}($message, array('user' => 'Bob')); $logger->log($level, $message, array('user' => 'Bob')); $expected = array( $level.' message of level '.$level.' with context: Bob', $level.' message of level '.$level.' with context: Bob', ); $this->assertEquals($expected, $this->getLogs()); } public function provideLevelsAndMessages() { return array( LogLevel::EMERGENCY => array(LogLevel::EMERGENCY, 'message of level emergency with context: {user}'), LogLevel::ALERT => array(LogLevel::ALERT, 'message of level alert with context: {user}'), LogLevel::CRITICAL => array(LogLevel::CRITICAL, 'message of level critical with context: {user}'), LogLevel::ERROR => array(LogLevel::ERROR, 'message of level error with context: {user}'), LogLevel::WARNING => array(LogLevel::WARNING, 'message of level warning with context: {user}'), LogLevel::NOTICE => array(LogLevel::NOTICE, 'message of level notice with context: {user}'), LogLevel::INFO => array(LogLevel::INFO, 'message of level info with context: {user}'), LogLevel::DEBUG => array(LogLevel::DEBUG, 'message of level debug with context: {user}'), ); } /** * @expectedException \Psr\Log\InvalidArgumentException */ public function testThrowsOnInvalidLevel() { $logger = $this->getLogger(); $logger->log('invalid level', 'Foo'); } public function testContextReplacement() { $logger = $this->getLogger(); $logger->info('{Message {nothing} {user} {foo.bar} a}', array('user' => 'Bob', 'foo.bar' => 'Bar')); $expected = array('info {Message {nothing} Bob Bar a}'); $this->assertEquals($expected, $this->getLogs()); } public function testObjectCastToString() { if (method_exists($this, 'createPartialMock')) { $dummy = $this->createPartialMock('Psr\Log\Test\DummyTest', array('__toString')); } else { $dummy = $this->getMock('Psr\Log\Test\DummyTest', array('__toString')); } $dummy->expects($this->once()) ->method('__toString') ->will($this->returnValue('DUMMY')); $this->getLogger()->warning($dummy); $expected = array('warning DUMMY'); $this->assertEquals($expected, $this->getLogs()); } public function testContextCanContainAnything() { $closed = fopen('php://memory', 'r'); fclose($closed); $context = array( 'bool' => true, 'null' => null, 'string' => 'Foo', 'int' => 0, 'float' => 0.5, 'nested' => array('with object' => new DummyTest), 'object' => new \DateTime, 'resource' => fopen('php://memory', 'r'), 'closed' => $closed, ); $this->getLogger()->warning('Crazy context data', $context); $expected = array('warning Crazy context data'); $this->assertEquals($expected, $this->getLogs()); } public function testContextExceptionKeyCanBeExceptionOrOtherValues() { $logger = $this->getLogger(); $logger->warning('Random message', array('exception' => 'oops')); $logger->critical('Uncaught Exception!', array('exception' => new \LogicException('Fail'))); $expected = array( 'warning Random message', 'critical Uncaught Exception!' ); $this->assertEquals($expected, $this->getLogs()); } }
| ver. 1.4 |
Github
|
.
| PHP 7.4.33 | Generation time: 0.28 |
proxy
|
phpinfo
|
Settings