Test-Tester-0.109/0000755000175000017500000000000012150267565013201 5ustar fergalfergalTest-Tester-0.109/lib/0000755000175000017500000000000012150267565013747 5ustar fergalfergalTest-Tester-0.109/lib/Test/0000755000175000017500000000000012150267565014666 5ustar fergalfergalTest-Tester-0.109/lib/Test/Tester.pm0000644000175000017500000004162012150267352016467 0ustar fergalfergaluse strict; package Test::Tester; BEGIN { if (*Test::Builder::new{CODE}) { warn "You should load Test::Tester before Test::Builder (or anything that loads Test::Builder)" } } use Test::Builder; use Test::Tester::CaptureRunner; use Test::Tester::Delegate; require Exporter; use vars qw( @ISA @EXPORT $VERSION ); $VERSION = "0.109"; @EXPORT = qw( run_tests check_tests check_test cmp_results show_space ); @ISA = qw( Exporter ); my $Test = Test::Builder->new; my $Capture = Test::Tester::Capture->new; my $Delegator = Test::Tester::Delegate->new; $Delegator->{Object} = $Test; my $runner = Test::Tester::CaptureRunner->new; my $want_space = $ENV{TESTTESTERSPACE}; sub show_space { $want_space = 1; } my $colour = ''; my $reset = ''; if (my $want_colour = $ENV{TESTTESTERCOLOUR} || $ENV{TESTTESTERCOLOUR}) { if (eval "require Term::ANSIColor") { my ($f, $b) = split(",", $want_colour); $colour = Term::ANSIColor::color($f).Term::ANSIColor::color("on_$b"); $reset = Term::ANSIColor::color("reset"); } } sub new_new { return $Delegator; } sub capture { return Test::Tester::Capture->new; } sub fh { # experiment with capturing output, I don't like it $runner = Test::Tester::FHRunner->new; return $Test; } sub find_run_tests { my $d = 1; my $found = 0; while ((not $found) and (my ($sub) = (caller($d))[3]) ) { # print "$d: $sub\n"; $found = ($sub eq "Test::Tester::run_tests"); $d++; } # die "Didn't find 'run_tests' in caller stack" unless $found; return $d; } sub run_tests { local($Delegator->{Object}) = $Capture; $runner->run_tests(@_); return ($runner->get_premature, $runner->get_results); } sub check_test { my $test = shift; my $expect = shift; my $name = shift; $name = "" unless defined($name); @_ = ($test, [$expect], $name); goto &check_tests; } sub check_tests { my $test = shift; my $expects = shift; my $name = shift; $name = "" unless defined($name); my ($prem, @results) = eval { run_tests($test, $name) }; $Test->ok(! $@, "Test '$name' completed") || $Test->diag($@); $Test->ok(! length($prem), "Test '$name' no premature diagnostication") || $Test->diag("Before any testing anything, your tests said\n$prem"); local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_results(\@results, $expects, $name); return ($prem, @results); } sub cmp_field { my ($result, $expect, $field, $desc) = @_; if (defined $expect->{$field}) { $Test->is_eq($result->{$field}, $expect->{$field}, "$desc compare $field"); } } sub cmp_result { my ($result, $expect, $name) = @_; my $sub_name = $result->{name}; $sub_name = "" unless defined($name); my $desc = "subtest '$sub_name' of '$name'"; { local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_field($result, $expect, "ok", $desc); cmp_field($result, $expect, "actual_ok", $desc); cmp_field($result, $expect, "type", $desc); cmp_field($result, $expect, "reason", $desc); cmp_field($result, $expect, "name", $desc); } # if we got no depth then default to 1 my $depth = 1; if (exists $expect->{depth}) { $depth = $expect->{depth}; } # if depth was explicitly undef then don't test it if (defined $depth) { $Test->is_eq($result->{depth}, $depth, "checking depth") || $Test->diag('You need to change $Test::Builder::Level'); } if (defined(my $exp = $expect->{diag})) { # if there actually is some diag then put a \n on the end if it's not # there already $exp .= "\n" if (length($exp) and $exp !~ /\n$/); if (not $Test->ok($result->{diag} eq $exp, "subtest '$sub_name' of '$name' compare diag") ) { my $got = $result->{diag}; my $glen = length($got); my $elen = length($exp); for ($got, $exp) { my @lines = split("\n", $_); $_ = join("\n", map { if ($want_space) { $_ = $colour.escape($_).$reset; } else { "'$colour$_$reset'" } } @lines); } $Test->diag(<32 and $c<125) or $c == 10) { $res .= $char; } else { $res .= sprintf('\x{%x}', $c) } } return $res; } sub cmp_results { my ($results, $expects, $name) = @_; $Test->is_num(scalar @$results, scalar @$expects, "Test '$name' result count"); for (my $i = 0; $i < @$expects; $i++) { my $expect = $expects->[$i]; my $result = $results->[$i]; local $Test::Builder::Level = $Test::Builder::Level + 1; cmp_result($result, $expect, $name); } } ######## nicked from Test::More sub plan { my(@plan) = @_; my $caller = caller; $Test->exported_to($caller); my @imports = (); foreach my $idx (0..$#plan) { if( $plan[$idx] eq 'import' ) { my($tag, $imports) = splice @plan, $idx, 2; @imports = @$imports; last; } } $Test->plan(@plan); __PACKAGE__->_export_to_level(1, __PACKAGE__, @imports); } sub import { my($class) = shift; { no warnings 'redefine'; *Test::Builder::new = \&new_new; } goto &plan; } sub _export_to_level { my $pkg = shift; my $level = shift; (undef) = shift; # redundant arg my $callpkg = caller($level); $pkg->export($callpkg, @_); } ############ 1; __END__ =head1 NAME Test::Tester - Ease testing test modules built with Test::Builder =head1 SYNOPSIS use Test::Tester tests => 6; use Test::MyStyle; check_test( sub { is_mystyle_eq("this", "that", "not eq"); }, { ok => 0, # expect this to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); or use Test::Tester; use Test::More tests => 3; use Test::MyStyle; my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); # now use Test::More::like to check the diagnostic output like($results[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); =head1 DESCRIPTION If you have written a test module based on Test::Builder then Test::Tester allows you to test it with the minimum of effort. =head1 HOW TO USE (THE EASY WAY) From version 0.08 Test::Tester no longer requires you to included anything special in your test modules. All you need to do is use Test::Tester; in your test script B any other Test::Builder based modules and away you go. Other modules based on Test::Builder can be used to help with the testing. In fact you can even use functions from your module to test other functions from the same module (while this is possible it is probably not a good idea, if your module has bugs, then using it to test itself may give the wrong answers). The easiest way to test is to do something like check_test( sub { is_mystyle_eq("this", "that", "not eq") }, { ok => 0, # we expect the test to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); this will execute the is_mystyle_eq test, capturing it's results and checking that they are what was expected. You may need to examine the test results in a more flexible way, for example, the diagnostic output may be quite long or complex or it may involve something that you cannot predict in advance like a timestamp. In this case you can get direct access to the test results: my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); like($result[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); We cannot predict how long the database ping will take so we use Test::More's like() test to check that the diagnostic string is of the right form. =head1 HOW TO USE (THE HARD WAY) I Make your module use the Test::Tester::Capture object instead of the Test::Builder one. How to do this depends on your module but assuming that your module holds the Test::Builder object in $Test and that all your test routines access it through $Test then providing a function something like this sub set_builder { $Test = shift; } should allow your test scripts to do Test::YourModule::set_builder(Test::Tester->capture); and after that any tests inside your module will captured. =head1 TEST RESULTS The result of each test is captured in a hash. These hashes are the same as the hashes returned by Test::Builder->details but with a couple of extra fields. These fields are documented in L in the details() function =over 2 =item ok Did the test pass? =item actual_ok Did the test really pass? That is, did the pass come from Test::Builder->ok() or did it pass because it was a TODO test? =item name The name supplied for the test. =item type What kind of test? Possibilities include, skip, todo etc. See L for more details. =item reason The reason for the skip, todo etc. See L for more details. =back These fields are exclusive to Test::Tester. =over 2 =item diag Any diagnostics that were output for the test. This only includes diagnostics output B the test result is declared. Note that Test::Builder ensures that any diagnostics end in a \n and it in earlier versions of Test::Tester it was essential that you have the final \n in your expected diagnostics. From version 0.10 onwards, Test::Tester will add the \n if you forgot it. It will not add a \n if you are expecting no diagnostics. See below for help tracking down hard to find space and tab related problems. =item depth This allows you to check that your test module is setting the correct value for $Test::Builder::Level and thus giving the correct file and line number when a test fails. It is calculated by looking at caller() and $Test::Builder::Level. It should count how many subroutines there are before jumping into the function you are testing. So for example in run_tests( sub { my_test_function("a", "b") } ); the depth should be 1 and in sub deeper { my_test_function("a", "b") } run_tests(sub { deeper() }); depth should be 2, that is 1 for the sub {} and one for deeper(). This might seem a little complex but if your tests look like the simple examples in this doc then you don't need to worry as the depth will always be 1 and that's what Test::Tester expects by default. B: if you do not specify a value for depth in check_test() then it automatically compares it against 1, if you really want to skip the depth test then pass in undef. B: depth will not be correctly calculated for tests that run from a signal handler or an END block or anywhere else that hides the call stack. =back Some of Test::Tester's functions return arrays of these hashes, just like Test::Builder->details. That is, the hash for the first test will be array element 1 (not 0). Element 0 will not be a hash it will be a string which contains any diagnostic output that came before the first test. This should usually be empty, if it's not, it means something output diagnostics before any test results showed up. =head1 SPACES AND TABS Appearances can be deceptive, especially when it comes to emptiness. If you are scratching your head trying to work out why Test::Tester is saying that your diagnostics are wrong when they look perfectly right then the answer is probably whitespace. From version 0.10 on, Test::Tester surrounds the expected and got diag values with single quotes to make it easier to spot trailing whitesapce. So in this example # Got diag (5 bytes): # 'abcd ' # Expected diag (4 bytes): # 'abcd' it is quite clear that there is a space at the end of the first string. Another way to solve this problem is to use colour and inverse video on an ANSI terminal, see below COLOUR below if you want this. Unfortunately this is sometimes not enough, neither colour nor quotes will help you with problems involving tabs, other non-printing characters and certain kinds of problems inherent in Unicode. To deal with this, you can switch Test::Tester into a mode whereby all "tricky" characters are shown as \{xx}. Tricky characters are those with ASCII code less than 33 or higher than 126. This makes the output more difficult to read but much easier to find subtle differences between strings. To turn on this mode either call show_space() in your test script or set the TESTTESTERSPACE environment variable to be a true value. The example above would then look like # Got diag (5 bytes): # abcd\x{20} # Expected diag (4 bytes): # abcd =head1 COLOUR If you prefer to use colour as a means of finding tricky whitespace characters then you can set the TESTTESTCOLOUR environment variable to a comma separated pair of colours, the first for the foreground, the second for the background. For example "white,red" will print white text on a red background. This requires the Term::ANSIColor module. You can specify any colour that would be acceptable to the Term::ANSIColor::color function. If you spell colour differently, that's no problem. The TESTTESTERCOLOR variable also works (if both are set then the British spelling wins out). =head1 EXPORTED FUNCTIONS =head3 ($premature, @results) = run_tests(\&test_sub) \&test_sub is a reference to a subroutine. run_tests runs the subroutine in $test_sub and captures the results of any tests inside it. You can run more than 1 test inside this subroutine if you like. $premature is a string containing any diagnostic output from before the first test. @results is an array of test result hashes. =head3 cmp_result(\%result, \%expect, $name) \%result is a ref to a test result hash. \%expect is a ref to a hash of expected values for the test result. cmp_result compares the result with the expected values. If any differences are found it outputs diagnostics. You may leave out any field from the expected result and cmp_result will not do the comparison of that field. =head3 cmp_results(\@results, \@expects, $name) \@results is a ref to an array of test results. \@expects is a ref to an array of hash refs. cmp_results checks that the results match the expected results and if any differences are found it outputs diagnostics. It first checks that the number of elements in \@results and \@expects is the same. Then it goes through each result checking it against the expected result as in cmp_result() above. =head3 ($premature, @results) = check_tests(\&test_sub, \@expects, $name) \&test_sub is a reference to a subroutine. \@expect is a ref to an array of hash refs which are expected test results. check_tests combines run_tests and cmp_tests into a single call. It also checks if the tests died at any stage. It returns the same values as run_tests, so you can further examine the test results if you need to. =head3 ($premature, @results) = check_test(\&test_sub, \%expect, $name) \&test_sub is a reference to a subroutine. \%expect is a ref to an hash of expected values for the test result. check_test is a wrapper around check_tests. It combines run_tests and cmp_tests into a single call, checking if the test died. It assumes that only a single test is run inside \&test_sub and include a test to make sure this is true. It returns the same values as run_tests, so you can further examine the test results if you need to. =head3 show_space() Turn on the escaping of characters as described in the SPACES AND TABS section. =head1 HOW IT WORKS Normally, a test module (let's call it Test:MyStyle) calls Test::Builder->new to get the Test::Builder object. Test::MyStyle calls methods on this object to record information about test results. When Test::Tester is loaded, it replaces Test::Builder's new() method with one which returns a Test::Tester::Delegate object. Most of the time this object behaves as the real Test::Builder object. Any methods that are called are delegated to the real Test::Builder object so everything works perfectly. However once we go into test mode, the method calls are no longer passed to the real Test::Builder object, instead they go to the Test::Tester::Capture object. This object seems exactly like the real Test::Builder object, except, instead of outputting test results and diagnostics, it just records all the information for later analysis. =head1 CAVEATS Support for calling Test::Builder->note is minimal. It's implemented as an empty stub, so modules that use it will not crash but the calls are not recorded for testing purposes like the others. Patches welcome. =head1 SEE ALSO L the source of testing goodness. L for an alternative approach to the problem tackled by Test::Tester - captures the strings output by Test::Builder. This means you cannot get separate access to the individual pieces of information and you must predict B what your test will output. =head1 AUTHOR This module is copyright 2005 Fergal Daly , some parts are based on other people's work. Plan handling lifted from Test::More. written by Michael G Schwern . Test::Tester::Capture is a cut down and hacked up version of Test::Builder. Test::Builder was written by chromatic and Michael G Schwern . =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut Test-Tester-0.109/lib/Test/Tester/0000755000175000017500000000000012150267565016134 5ustar fergalfergalTest-Tester-0.109/lib/Test/Tester/Capture.pm0000644000175000017500000001040412150267276020073 0ustar fergalfergaluse strict; package Test::Tester::Capture; use Test::Builder; use vars qw( @ISA ); @ISA = qw( Test::Builder ); # Make Test::Tester::Capture thread-safe for ithreads. BEGIN { use Config; if( $] >= 5.008 && $Config{useithreads} ) { require threads::shared; threads::shared->import; } else { *share = sub { 0 }; *lock = sub { 0 }; } } my $Curr_Test = 0; share($Curr_Test); my @Test_Results = (); share(@Test_Results); my $Prem_Diag = {diag => ""}; share($Curr_Test); sub new { # Test::Tester::Capgture::new used to just return __PACKAGE__ # because Test::Builder::new enforced it's singleton nature by # return __PACKAGE__. That has since changed, Test::Builder::new now # returns a blessed has and around version 0.78, Test::Builder::todo # started wanting to modify $self. To cope with this, we now return # a blessed hash. This is a short-term hack, the correct thing to do # is to detect which style of Test::Builder we're dealing with and # act appropriately. my $class = shift; return bless {}, $class; } sub ok { my($self, $test, $name) = @_; # $test might contain an object which we don't want to accidentally # store, so we turn it into a boolean. $test = $test ? 1 : 0; lock $Curr_Test; $Curr_Test++; my($pack, $file, $line) = $self->caller; my $todo = $self->todo($pack); my $result = {}; share($result); unless( $test ) { @$result{ 'ok', 'actual_ok' } = ( ( $todo ? 1 : 0 ), 0 ); } else { @$result{ 'ok', 'actual_ok' } = ( 1, $test ); } if( defined $name ) { $name =~ s|#|\\#|g; # # in a name can confuse Test::Harness. $result->{name} = $name; } else { $result->{name} = ''; } if( $todo ) { my $what_todo = $todo; $result->{reason} = $what_todo; $result->{type} = 'todo'; } else { $result->{reason} = ''; $result->{type} = ''; } $Test_Results[$Curr_Test-1] = $result; unless( $test ) { my $msg = $todo ? "Failed (TODO)" : "Failed"; $result->{fail_diag} = (" $msg test ($file at line $line)\n"); } $result->{diag} = ""; $result->{_level} = $Test::Builder::Level; $result->{_depth} = Test::Tester::find_run_tests(); return $test ? 1 : 0; } sub skip { my($self, $why) = @_; $why ||= ''; lock($Curr_Test); $Curr_Test++; my %result; share(%result); %result = ( 'ok' => 1, actual_ok => 1, name => '', type => 'skip', reason => $why, diag => "", _level => $Test::Builder::Level, _depth => Test::Tester::find_run_tests(), ); $Test_Results[$Curr_Test-1] = \%result; return 1; } sub todo_skip { my($self, $why) = @_; $why ||= ''; lock($Curr_Test); $Curr_Test++; my %result; share(%result); %result = ( 'ok' => 1, actual_ok => 0, name => '', type => 'todo_skip', reason => $why, diag => "", _level => $Test::Builder::Level, _depth => Test::Tester::find_run_tests(), ); $Test_Results[$Curr_Test-1] = \%result; return 1; } sub diag { my($self, @msgs) = @_; return unless @msgs; # Prevent printing headers when compiling (i.e. -c) return if $^C; # Escape each line with a #. foreach (@msgs) { $_ = 'undef' unless defined; } push @msgs, "\n" unless $msgs[-1] =~ /\n\Z/; my $result = $Curr_Test ? $Test_Results[$Curr_Test - 1] : $Prem_Diag; $result->{diag} .= join("", @msgs); return 0; } sub details { return @Test_Results; } # Stub. Feel free to send me a patch to implement this. sub note { } sub explain { return Test::Builder::explain(@_); } sub premature { return $Prem_Diag->{diag}; } sub current_test { if (@_ > 1) { die "Don't try to change the test number!"; } else { return $Curr_Test; } } sub reset { $Curr_Test = 0; @Test_Results = (); $Prem_Diag = {diag => ""}; } 1; __END__ =head1 NAME Test::Tester::Capture - Help testing test modules built with Test::Builder =head1 DESCRIPTION This is a subclass of Test::Builder that overrides many of the methods so that they don't output anything. It also keeps track of it's own set of test results so that you can use Test::Builder based modules to perform tests on other Test::Builder based modules. =head1 AUTHOR Most of the code here was lifted straight from Test::Builder and then had chunks removed by Fergal Daly . =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut Test-Tester-0.109/lib/Test/Tester/CaptureRunner.pm0000644000175000017500000000237111665433545021275 0ustar fergalfergal# $Header: /home/fergal/my/cvs/Test-Tester/lib/Test/Tester/CaptureRunner.pm,v 1.3 2003/03/05 01:07:55 fergal Exp $ use strict; package Test::Tester::CaptureRunner; use Test::Tester::Capture; require Exporter; sub new { my $pkg = shift; my $self = bless {}, $pkg; return $self; } sub run_tests { my $self = shift; my $test = shift; capture()->reset; $self->{StartLevel} = $Test::Builder::Level; &$test(); } sub get_results { my $self = shift; my @results = capture()->details; my $start = $self->{StartLevel}; foreach my $res (@results) { next if defined $res->{depth}; my $depth = $res->{_depth} - $res->{_level} - $start - 3; # print "my $depth = $res->{_depth} - $res->{_level} - $start - 1\n"; $res->{depth} = $depth; } return @results; } sub get_premature { return capture()->premature; } sub capture { return Test::Tester::Capture->new; } __END__ =head1 NAME Test::Tester::CaptureRunner - Help testing test modules built with Test::Builder =head1 DESCRIPTION This stuff if needed to allow me to play with other ways of monitoring the test results. =head1 AUTHOR Copyright 2003 by Fergal Daly . =head1 LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html =cut Test-Tester-0.109/lib/Test/Tester/Delegate.pm0000644000175000017500000000056410262334336020202 0ustar fergalfergaluse strict; use warnings; package Test::Tester::Delegate; use vars '$AUTOLOAD'; sub new { my $pkg = shift; my $obj = shift; my $self = bless {}, $pkg; return $self; } sub AUTOLOAD { my ($sub) = $AUTOLOAD =~ /.*::(.*?)$/; return if $sub eq "DESTROY"; my $obj = $_[0]->{Object}; my $ref = $obj->can($sub); shift(@_); unshift(@_, $obj); goto &$ref; } 1; Test-Tester-0.109/README0000644000175000017500000003062612150267566014071 0ustar fergalfergalNAME Test::Tester - Ease testing test modules built with Test::Builder SYNOPSIS use Test::Tester tests => 6; use Test::MyStyle; check_test( sub { is_mystyle_eq("this", "that", "not eq"); }, { ok => 0, # expect this to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); or use Test::Tester; use Test::More tests => 3; use Test::MyStyle; my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); # now use Test::More::like to check the diagnostic output like($results[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); DESCRIPTION If you have written a test module based on Test::Builder then Test::Tester allows you to test it with the minimum of effort. HOW TO USE (THE EASY WAY) From version 0.08 Test::Tester no longer requires you to included anything special in your test modules. All you need to do is use Test::Tester; in your test script before any other Test::Builder based modules and away you go. Other modules based on Test::Builder can be used to help with the testing. In fact you can even use functions from your module to test other functions from the same module (while this is possible it is probably not a good idea, if your module has bugs, then using it to test itself may give the wrong answers). The easiest way to test is to do something like check_test( sub { is_mystyle_eq("this", "that", "not eq") }, { ok => 0, # we expect the test to fail name => "not eq", diag => "Expected: 'this'\nGot: 'that'", } ); this will execute the is_mystyle_eq test, capturing it's results and checking that they are what was expected. You may need to examine the test results in a more flexible way, for example, the diagnostic output may be quite long or complex or it may involve something that you cannot predict in advance like a timestamp. In this case you can get direct access to the test results: my ($premature, @results) = run_tests( sub { is_database_alive("dbname"); } ); like($result[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag"); We cannot predict how long the database ping will take so we use Test::More's like() test to check that the diagnostic string is of the right form. HOW TO USE (THE HARD WAY) *This is here for backwards compatibility only* Make your module use the Test::Tester::Capture object instead of the Test::Builder one. How to do this depends on your module but assuming that your module holds the Test::Builder object in $Test and that all your test routines access it through $Test then providing a function something like this sub set_builder { $Test = shift; } should allow your test scripts to do Test::YourModule::set_builder(Test::Tester->capture); and after that any tests inside your module will captured. TEST RESULTS The result of each test is captured in a hash. These hashes are the same as the hashes returned by Test::Builder->details but with a couple of extra fields. These fields are documented in Test::Builder in the details() function ok Did the test pass? actual_ok Did the test really pass? That is, did the pass come from Test::Builder->ok() or did it pass because it was a TODO test? name The name supplied for the test. type What kind of test? Possibilities include, skip, todo etc. See Test::Builder for more details. reason The reason for the skip, todo etc. See Test::Builder for more details. These fields are exclusive to Test::Tester. diag Any diagnostics that were output for the test. This only includes diagnostics output after the test result is declared. Note that Test::Builder ensures that any diagnostics end in a \n and it in earlier versions of Test::Tester it was essential that you have the final \n in your expected diagnostics. From version 0.10 onwards, Test::Tester will add the \n if you forgot it. It will not add a \n if you are expecting no diagnostics. See below for help tracking down hard to find space and tab related problems. depth This allows you to check that your test module is setting the correct value for $Test::Builder::Level and thus giving the correct file and line number when a test fails. It is calculated by looking at caller() and $Test::Builder::Level. It should count how many subroutines there are before jumping into the function you are testing. So for example in run_tests( sub { my_test_function("a", "b") } ); the depth should be 1 and in sub deeper { my_test_function("a", "b") } run_tests(sub { deeper() }); depth should be 2, that is 1 for the sub {} and one for deeper(). This might seem a little complex but if your tests look like the simple examples in this doc then you don't need to worry as the depth will always be 1 and that's what Test::Tester expects by default. Note: if you do not specify a value for depth in check_test() then it automatically compares it against 1, if you really want to skip the depth test then pass in undef. Note: depth will not be correctly calculated for tests that run from a signal handler or an END block or anywhere else that hides the call stack. Some of Test::Tester's functions return arrays of these hashes, just like Test::Builder->details. That is, the hash for the first test will be array element 1 (not 0). Element 0 will not be a hash it will be a string which contains any diagnostic output that came before the first test. This should usually be empty, if it's not, it means something output diagnostics before any test results showed up. SPACES AND TABS Appearances can be deceptive, especially when it comes to emptiness. If you are scratching your head trying to work out why Test::Tester is saying that your diagnostics are wrong when they look perfectly right then the answer is probably whitespace. From version 0.10 on, Test::Tester surrounds the expected and got diag values with single quotes to make it easier to spot trailing whitesapce. So in this example # Got diag (5 bytes): # 'abcd ' # Expected diag (4 bytes): # 'abcd' it is quite clear that there is a space at the end of the first string. Another way to solve this problem is to use colour and inverse video on an ANSI terminal, see below COLOUR below if you want this. Unfortunately this is sometimes not enough, neither colour nor quotes will help you with problems involving tabs, other non-printing characters and certain kinds of problems inherent in Unicode. To deal with this, you can switch Test::Tester into a mode whereby all "tricky" characters are shown as \{xx}. Tricky characters are those with ASCII code less than 33 or higher than 126. This makes the output more difficult to read but much easier to find subtle differences between strings. To turn on this mode either call show_space() in your test script or set the TESTTESTERSPACE environment variable to be a true value. The example above would then look like # Got diag (5 bytes): # abcd\x{20} # Expected diag (4 bytes): # abcd COLOUR If you prefer to use colour as a means of finding tricky whitespace characters then you can set the TESTTESTCOLOUR environment variable to a comma separated pair of colours, the first for the foreground, the second for the background. For example "white,red" will print white text on a red background. This requires the Term::ANSIColor module. You can specify any colour that would be acceptable to the Term::ANSIColor::color function. If you spell colour differently, that's no problem. The TESTTESTERCOLOR variable also works (if both are set then the British spelling wins out). EXPORTED FUNCTIONS ($premature, @results) = run_tests(\&test_sub) \&test_sub is a reference to a subroutine. run_tests runs the subroutine in $test_sub and captures the results of any tests inside it. You can run more than 1 test inside this subroutine if you like. $premature is a string containing any diagnostic output from before the first test. @results is an array of test result hashes. cmp_result(\%result, \%expect, $name) \%result is a ref to a test result hash. \%expect is a ref to a hash of expected values for the test result. cmp_result compares the result with the expected values. If any differences are found it outputs diagnostics. You may leave out any field from the expected result and cmp_result will not do the comparison of that field. cmp_results(\@results, \@expects, $name) \@results is a ref to an array of test results. \@expects is a ref to an array of hash refs. cmp_results checks that the results match the expected results and if any differences are found it outputs diagnostics. It first checks that the number of elements in \@results and \@expects is the same. Then it goes through each result checking it against the expected result as in cmp_result() above. ($premature, @results) = check_tests(\&test_sub, \@expects, $name) \&test_sub is a reference to a subroutine. \@expect is a ref to an array of hash refs which are expected test results. check_tests combines run_tests and cmp_tests into a single call. It also checks if the tests died at any stage. It returns the same values as run_tests, so you can further examine the test results if you need to. ($premature, @results) = check_test(\&test_sub, \%expect, $name) \&test_sub is a reference to a subroutine. \%expect is a ref to an hash of expected values for the test result. check_test is a wrapper around check_tests. It combines run_tests and cmp_tests into a single call, checking if the test died. It assumes that only a single test is run inside \&test_sub and include a test to make sure this is true. It returns the same values as run_tests, so you can further examine the test results if you need to. show_space() Turn on the escaping of characters as described in the SPACES AND TABS section. HOW IT WORKS Normally, a test module (let's call it Test:MyStyle) calls Test::Builder->new to get the Test::Builder object. Test::MyStyle calls methods on this object to record information about test results. When Test::Tester is loaded, it replaces Test::Builder's new() method with one which returns a Test::Tester::Delegate object. Most of the time this object behaves as the real Test::Builder object. Any methods that are called are delegated to the real Test::Builder object so everything works perfectly. However once we go into test mode, the method calls are no longer passed to the real Test::Builder object, instead they go to the Test::Tester::Capture object. This object seems exactly like the real Test::Builder object, except, instead of outputting test results and diagnostics, it just records all the information for later analysis. CAVEATS Support for calling Test::Builder->note is minimal. It's implemented as an empty stub, so modules that use it will not crash but the calls are not recorded for testing purposes like the others. Patches welcome. SEE ALSO Test::Builder the source of testing goodness. Test::Builder::Tester for an alternative approach to the problem tackled by Test::Tester - captures the strings output by Test::Builder. This means you cannot get separate access to the individual pieces of information and you must predict exactly what your test will output. AUTHOR This module is copyright 2005 Fergal Daly , some parts are based on other people's work. Plan handling lifted from Test::More. written by Michael G Schwern . Test::Tester::Capture is a cut down and hacked up version of Test::Builder. Test::Builder was written by chromatic and Michael G Schwern . LICENSE Under the same license as Perl itself See http://www.perl.com/perl/misc/Artistic.html Test-Tester-0.109/TODO0000644000175000017500000000002307631260237013661 0ustar fergalfergal* write fail tests Test-Tester-0.109/t/0000755000175000017500000000000012150267565013444 5ustar fergalfergalTest-Tester-0.109/t/MyTest.pm0000644000175000017500000000017710072357514015227 0ustar fergalfergaluse strict; use warnings; package MyTest; use Test::Builder; my $Test = Test::Builder->new; sub ok { $Test->ok(@_); } 1; Test-Tester-0.109/t/run_test.t0000644000175000017500000001020711665433623015474 0ustar fergalfergaluse strict; use Test::Tester; use Data::Dumper qw(Dumper); my $test = Test::Builder->new; $test->plan(tests => 54); my $cap; { $cap = Test::Tester->capture; my ($prem, @results) = run_tests( sub {$cap->ok(1, "run pass")} ); local $Test::Builder::Level = 0; $test->is_eq($prem, "", "run pass no prem"); $test->is_num(scalar (@results), 1, "run pass result count"); my $res = $results[0]; $test->is_eq($res->{name}, "run pass", "run pass name"); $test->is_eq($res->{ok}, 1, "run pass ok"); $test->is_eq($res->{actual_ok}, 1, "run pass actual_ok"); $test->is_eq($res->{reason}, "", "run pass reason"); $test->is_eq($res->{type}, "", "run pass type"); $test->is_eq($res->{diag}, "", "run pass diag"); $test->is_num($res->{depth}, 0, "run pass depth"); } { my ($prem, @results) = run_tests( sub {$cap->ok(0, "run fail")} ); local $Test::Builder::Level = 0; $test->is_eq($prem, "", "run fail no prem"); $test->is_num(scalar (@results), 1, "run fail result count"); my $res = $results[0]; $test->is_eq($res->{name}, "run fail", "run fail name"); $test->is_eq($res->{actual_ok}, 0, "run fail actual_ok"); $test->is_eq($res->{ok}, 0, "run fail ok"); $test->is_eq($res->{reason}, "", "run fail reason"); $test->is_eq($res->{type}, "", "run fail type"); $test->is_eq($res->{diag}, "", "run fail diag"); $test->is_num($res->{depth}, 0, "run fail depth"); } { my ($prem, @results) = run_tests( sub {$cap->skip("just because")} ); local $Test::Builder::Level = 0; $test->is_eq($prem, "", "skip no prem"); $test->is_num(scalar (@results), 1, "skip result count"); my $res = $results[0]; $test->is_eq($res->{name}, "", "skip name"); $test->is_eq($res->{actual_ok}, 1, "skip actual_ok"); $test->is_eq($res->{ok}, 1, "skip ok"); $test->is_eq($res->{reason}, "just because", "skip reason"); $test->is_eq($res->{type}, "skip", "skip type"); $test->is_eq($res->{diag}, "", "skip diag"); $test->is_num($res->{depth}, 0, "skip depth"); } { my ($prem, @results) = run_tests( sub {$cap->todo_skip("just because")} ); local $Test::Builder::Level = 0; $test->is_eq($prem, "", "todo_skip no prem"); $test->is_num(scalar (@results), 1, "todo_skip result count"); my $res = $results[0]; $test->is_eq($res->{name}, "", "todo_skip name"); $test->is_eq($res->{actual_ok}, 0, "todo_skip actual_ok"); $test->is_eq($res->{ok}, 1, "todo_skip ok"); $test->is_eq($res->{reason}, "just because", "todo_skip reason"); $test->is_eq($res->{type}, "todo_skip", "todo_skip type"); $test->is_eq($res->{diag}, "", "todo_skip diag"); $test->is_num($res->{depth}, 0, "todo_skip depth"); } { my ($prem, @results) = run_tests( sub {$cap->diag("run diag")} ); local $Test::Builder::Level = 0; $test->is_eq($prem, "run diag\n", "run diag prem"); $test->is_num(scalar (@results), 0, "run diag result count"); } { my ($prem, @results) = run_tests( sub { $cap->ok(1, "multi pass"); $cap->diag("multi pass diag1"); $cap->diag("multi pass diag2"); $cap->ok(0, "multi fail"); $cap->diag("multi fail diag"); } ); local $Test::Builder::Level = 0; $test->is_eq($prem, "", "run multi no prem"); $test->is_num(scalar (@results), 2, "run multi result count"); my $res_pass = $results[0]; $test->is_eq($res_pass->{name}, "multi pass", "run multi pass name"); $test->is_eq($res_pass->{actual_ok}, 1, "run multi pass actual_ok"); $test->is_eq($res_pass->{ok}, 1, "run multi pass ok"); $test->is_eq($res_pass->{reason}, "", "run multi pass reason"); $test->is_eq($res_pass->{type}, "", "run multi pass type"); $test->is_eq($res_pass->{diag}, "multi pass diag1\nmulti pass diag2\n", "run multi pass diag"); $test->is_num($res_pass->{depth}, 0, "run multi pass depth"); my $res_fail = $results[1]; $test->is_eq($res_fail->{name}, "multi fail", "run multi fail name"); $test->is_eq($res_pass->{actual_ok}, 1, "run multi fail actual_ok"); $test->is_eq($res_fail->{ok}, 0, "run multi fail ok"); $test->is_eq($res_pass->{reason}, "", "run multi fail reason"); $test->is_eq($res_pass->{type}, "", "run multi fail type"); $test->is_eq($res_fail->{diag}, "multi fail diag\n", "run multi fail diag"); $test->is_num($res_pass->{depth}, 0, "run multi fail depth"); } Test-Tester-0.109/t/SmallTest.pm0000644000175000017500000000052010262335574015705 0ustar fergalfergaluse strict; use warnings; package SmallTest; require Exporter; use vars qw( @ISA @EXPORT ); @ISA = qw( Exporter ); @EXPORT = qw( ok is_eq is_num ); use Test::Builder; my $Test = Test::Builder->new; sub ok { $Test->ok(@_); } sub is_eq { $Test->is_eq(@_); } sub is_num { $Test->is_num(@_); } sub getTest { return $Test; } 1; Test-Tester-0.109/t/check_tests.t0000644000175000017500000000335710321722471016126 0ustar fergalfergaluse strict; use Test::Tester; use Data::Dumper qw(Dumper); my $test = Test::Builder->new; $test->plan(tests => 105); my $cap; $cap = Test::Tester->capture; my @tests = ( [ 'pass', '$cap->ok(1, "pass");', { name => "pass", ok => 1, actual_ok => 1, reason => "", type => "", diag => "", depth => 0, }, ], [ 'pass diag', '$cap->ok(1, "pass diag"); $cap->diag("pass diag1"); $cap->diag("pass diag2");', { name => "pass diag", ok => 1, actual_ok => 1, reason => "", type => "", diag => "pass diag1\npass diag2\n", depth => 0, }, ], [ 'pass diag no \\n', '$cap->ok(1, "pass diag"); $cap->diag("pass diag1"); $cap->diag("pass diag2");', { name => "pass diag", ok => 1, actual_ok => 1, reason => "", type => "", diag => "pass diag1\npass diag2", depth => 0, }, ], [ 'fail', '$cap->ok(0, "fail"); $cap->diag("fail diag");', { name => "fail", ok => 0, actual_ok => 0, reason => "", type => "", diag => "fail diag\n", depth => 0, }, ], [ 'skip', '$cap->skip("just because");', { name => "", ok => 1, actual_ok => 1, reason => "just because", type => "skip", diag => "", depth => 0, }, ], [ 'todo_skip', '$cap->todo_skip("why not");', { name => "", ok => 1, actual_ok => 0, reason => "why not", type => "todo_skip", diag => "", depth => 0, }, ], ); my $big_code = ""; my @big_expect; foreach my $test (@tests) { my ($name, $code, $expect) = @$test; $big_code .= "$code\n"; push(@big_expect, $expect); my $test_sub = eval "sub {$code}"; check_test($test_sub, $expect, $name); } my $big_test_sub = eval "sub {$big_code}"; check_tests($big_test_sub, \@big_expect, "run all"); Test-Tester-0.109/t/depth.t0000644000175000017500000000057311665433623014742 0ustar fergalfergaluse strict; use warnings; use lib 't'; use Test::Tester; use MyTest; my $test = Test::Builder->new; $test->plan(tests => 2); sub deeper { MyTest::ok(1); } { my @results = run_tests( sub { MyTest::ok(1); deeper(); } ); local $Test::Builder::Level = 0; $test->is_num($results[1]->{depth}, 1, "depth 1"); $test->is_num($results[2]->{depth}, 2, "deeper"); } Test-Tester-0.109/t/auto.t0000644000175000017500000000072011665433623014600 0ustar fergalfergaluse strict; use warnings; use lib 't'; use Test::Tester tests => 5; use SmallTest; use MyTest; { my ($prem, @results) = run_tests( sub { MyTest::ok(1, "run pass")} ); is_eq($results[0]->{name}, "run pass"); is_num($results[0]->{ok}, 1); } { my ($prem, @results) = run_tests( sub { MyTest::ok(0, "run fail")} ); is_eq($results[0]->{name}, "run fail"); is_num($results[0]->{ok}, 0); } is_eq(ref(SmallTest::getTest()), "Test::Tester::Delegate"); Test-Tester-0.109/t/capture.t0000644000175000017500000000075010262340666015272 0ustar fergalfergaluse strict; use Test::Tester; my $Test = Test::Builder->new; $Test->plan(tests => 3); my $cap; $cap = Test::Tester->capture; { no warnings 'redefine'; sub Test::Tester::find_run_tests { return 0}; } local $Test::Builder::Level = 0; { my $cur = $cap->current_test; $Test->is_num($cur, 0, "current test"); eval {$cap->current_test(2)}; $Test->ok($@, "can't set test_num"); } { $cap->ok(1, "a test"); my @res = $cap->details; $Test->is_num(scalar @res, 1, "res count"); } Test-Tester-0.109/t/fail/0000755000175000017500000000000012150267565014357 5ustar fergalfergalTest-Tester-0.109/t/fail/fail.t0000644000175000017500000000027310203341757015452 0ustar fergalfergaluse strict; use warnings; use Test::More 'no_plan'; use Test::Tester; Test::Tester::cmp_result( {diag => "abcd \nabcd", name => ''}, {diag => "abcd\t\nabcd", name => ''}, "diag"); Test-Tester-0.109/CHANGES0000644000175000017500000000652312150267521014172 0ustar fergalfergal0.109 Add minimal support for note and explain (i.e. don't crash). 0.108 At the request Michael Schwern, do not "require threads" any longer as it can upset Test::Builder. Anyone using threads should already have done that themselves. Anyway not, doesn't need it. 0.107 Test::Tester::Capgture::new used to just return __PACKAGE__ because Test::Builder::new enforced it's singleton nature by return __PACKAGE__. That has since changed, Test::Builder::new now returns a blessed has and around version 0.78, Test::Builder::todo started wanting to modify $self. To cope with this, we now return a blessed hash. This is a short-term hack, the correct thing to do is to detect which style of Test::Builder we're dealing with and act appropriately. 0.106 Some doc clarifications and improvements. 0.105 Fixed documentation for run_tests - looks like I copied code for check_test and didn't alter it correctly. Ricardo Signes pointed out the errors. Removed the code and docs related to the $name parameter of run_tests, it had no effect and a name makes no sense (again a hang over from factoring this out of check_tests I guess). 0.104 Removed a bogus entry for Exporter in @ISA in Test::Tester::Capture and add a "use Test::Builder". This can cause problems that I can't reproduce. Thanks to Andres Koenig. 0.103 Default to checking depth (check against 1 if none specified). This means that by default we are checking that the line number/file in failure messages will be correct. 0.102 Fixed a problem due to subroutine argument aliasing which meant the delegate object could be replaced by the real Test::Builder object. Test::Tester must be loaded before Test::Builder. Check for this and warn if it's not true. 0.101 Added colourisation support. 0.10 Automatically add \n to the expected diagnostics if it's not already there, because Test::Builder::diag does the same. Thanks to James FitzGibbon for the suggestion. Now it's much easier to spot trailing spaces in diagnostics and also possible to escape all "tricky" characters to make them easy to troubleshoot. 0.09 The depth calculation would die if it couldn't find the correct things in the call stack. This seemed like a good idea but it breaks tests which are triggered in signal handlers. So now the calculation will fail silently. Using local rather than set/unset for the delegator so that if something dies, we will drop back to the correct test object. 0.08 check_test was getting the Level wrong Made it all very nice so that don't have to worry about providing capture objects or any of that messiness. Things are automatically captured once you're inside one of Test::Tester's testing routines. Added depth to allow testing $Test::Builder::Level. Updated the POD a bit. 0.07 Spellchecked the POD 0.06 Allow getting current_test but still die if someone tries to set it. Needed for testing Test::Warn::None 0.05 Added string length length to the diags diags Corrected level problem cmp_results 0.04 Got rid of undef warning when no name supplied got rid of unneeded result counting in hceck_test 0.03 Can now capture test output in a veriety of ways, although at the moment I'm only including one way as I think the other way is quite sucky. added capturing of skip, and todo_skip big changes resulting in better handling of everything 0.02 made the diag comparison output nicer 0.01 First release Test-Tester-0.109/MANIFEST0000644000175000017500000000060712150267565014335 0ustar fergalfergalARTISTIC CHANGES lib/Test/Tester.pm lib/Test/Tester/Capture.pm lib/Test/Tester/CaptureRunner.pm lib/Test/Tester/Delegate.pm Makefile.PL MANIFEST META.yml Module meta-data (added by MakeMaker) README t/auto.t t/capture.t t/check_tests.t t/depth.t t/MyTest.pm t/run_test.t t/SmallTest.pm t/fail/fail.t TODO META.json Module JSON meta-data (added by MakeMaker) Test-Tester-0.109/Makefile.PL0000644000175000017500000000113310072352261015136 0ustar fergalfergal# $Header: $ use strict; use ExtUtils::MakeMaker; my $module = "Test::Tester"; my $file_base = $module; $file_base =~ s/::/\//g; $file_base = "./lib/$file_base"; my $pm_file = "$file_base.pm"; my $pod_file = "$file_base.pod"; my $doc_file = -e $pod_file ? $pod_file : $pm_file; (my $dir = $module) =~ s/::/-/g; WriteMakefile( AUTHOR => 'Fergal Daly ', NAME => $module, VERSION_FROM => $pm_file, INSTALLDIRS => 'perl', PREREQ_PM => { 'Test::Builder' => 0, }, dist => { PREOP=> "rm -f README; pod2text -80 < $doc_file > README; cp -f README $dir-\$(VERSION); " } ); Test-Tester-0.109/META.json0000644000175000017500000000152612150267565014626 0ustar fergalfergal{ "abstract" : "unknown", "author" : [ "Fergal Daly " ], "dynamic_config" : 1, "generated_by" : "ExtUtils::MakeMaker version 6.62, CPAN::Meta::Converter version 2.112150", "license" : [ "unknown" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : "2" }, "name" : "Test-Tester", "no_index" : { "directory" : [ "t", "inc" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : 0 } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : 0 } }, "runtime" : { "requires" : { "Test::Builder" : 0 } } }, "release_status" : "stable", "version" : "0.109" } Test-Tester-0.109/ARTISTIC0000644000175000017500000001446310074204727014350 0ustar fergalfergalThe "Artistic License" Preamble The intent of this document is to state the conditions under which a Package may be copied, such that the Copyright Holder maintains some semblance of artistic control over the development of the package, while giving the users of the package the right to use and distribute the Package in a more-or-less customary fashion, plus the right to make reasonable modifications. Definitions "Package" refers to the collection of files distributed by the Copyright Holder, and derivatives of that collection of files created through textual modification. "Standard Version" refers to such a Package if it has not been modified, or has been modified in accordance with the wishes of the Copyright Holder as specified below. "Copyright Holder" is whoever is named in the copyright or copyrights for the package. "You" is you, if you're thinking about copying or distributing this Package. "Reasonable copying fee" is whatever you can justify on the basis of media cost, duplication charges, time of people involved, and so on. (You will not be required to justify it to the Copyright Holder, but only to the computing community at large as a market that must bear the fee.) "Freely Available" means that no fee is charged for the item itself, though there may be fees involved in handling the item. It also means that recipients of the item may redistribute it under the same conditions they received it. 1. You may make and give away verbatim copies of the source form of the Standard Version of this Package without restriction, provided that you duplicate all of the original copyright notices and associated disclaimers. 2. You may apply bug fixes, portability fixes and other modifications derived from the Public Domain or from the Copyright Holder. A Package modified in such a way shall still be considered the Standard Version. 3. You may otherwise modify your copy of this Package in any way, provided that you insert a prominent notice in each changed file stating how and when you changed that file, and provided that you do at least ONE of the following: a. place your modifications in the Public Domain or otherwise make them Freely Available, such as by posting said modifications to Usenet or an equivalent medium, or placing the modifications on a major archive site such as uunet.uu.net, or by allowing the Copyright Holder to include your modifications in the Standard Version of the Package. b. use the modified Package only within your corporation or organization. c. rename any non-standard executables so the names do not conflict with standard executables, which must also be provided, and provide a separate manual page for each non-standard executable that clearly documents how it differs from the Standard Version. d. make other distribution arrangements with the Copyright Holder. You may distribute the programs of this Package in object code or executable form, provided that you do at least ONE of the following: a. distribute a Standard Version of the executables and library files, together with instructions (in the manual page or equivalent) on where to get the Standard Version. b. accompany the distribution with the machine-readable source of the Package with your modifications. c. give non-standard executables non-standard names, and clearly document the differences in manual pages (or equivalent), together with instructions on where to get the Standard Version. d. make other distribution arrangements with the Copyright Holder. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own. You may embed this Package's interpreter within an executable of yours (by linking); this shall be construed as a mere form of aggregation, provided that the complete Standard Version of the interpreter is so embedded. The scripts and library files supplied as input to or produced as output from the programs of this Package do not automatically fall under the copyright of this Package, but belong to whomever generated them, and may be sold commercially, and may be aggregated with this Package. If such scripts or library files are aggregated with this Package via the so-called "undump" or "unexec" methods of producing a binary executable image, then distribution of such an image shall neither be construed as a distribution of this Package nor shall it fall under the restrictions of Paragraphs 3 and 4, provided that you do not represent such an executable image as a Standard Version of this Package. C subroutines (or comparably compiled subroutines in other languages) supplied by you and linked into this Package in order to emulate subroutines and variables of the language defined by this Package shall not be considered part of this Package, but are the equivalent of input as in Paragraph 6, provided these subroutines do not change the language in any way that would cause it to fail the regression tests for the language. Aggregation of this Package with a commercial distribution is always permitted provided that the use of this Package is embedded; that is, when no overt attempt is made to make this Package's interfaces visible to the end user of the commercial distribution. Such use shall not be construed as a distribution of this Package. The name of the Copyright Holder may not be used to endorse or promote products derived from this software without specific prior written permission. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. The End Test-Tester-0.109/META.yml0000644000175000017500000000073012150267565014452 0ustar fergalfergal--- abstract: unknown author: - 'Fergal Daly ' build_requires: ExtUtils::MakeMaker: 0 configure_requires: ExtUtils::MakeMaker: 0 dynamic_config: 1 generated_by: 'ExtUtils::MakeMaker version 6.62, CPAN::Meta::Converter version 2.112150' license: unknown meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: 1.4 name: Test-Tester no_index: directory: - t - inc requires: Test::Builder: 0 version: 0.109