1 | use strict; |
---|
2 | package Test; |
---|
3 | use 5.005_64; |
---|
4 | use Test::Harness 1.1601 (); |
---|
5 | use Carp; |
---|
6 | our($VERSION, @ISA, @EXPORT, @EXPORT_OK, $ntest, $TestLevel); #public-ish |
---|
7 | our($TESTOUT, $ONFAIL, %todo, %history, $planned, @FAILDETAIL); #private-ish |
---|
8 | $VERSION = '1.13'; |
---|
9 | require Exporter; |
---|
10 | @ISA=('Exporter'); |
---|
11 | @EXPORT=qw(&plan &ok &skip); |
---|
12 | @EXPORT_OK=qw($ntest $TESTOUT); |
---|
13 | |
---|
14 | $TestLevel = 0; # how many extra stack frames to skip |
---|
15 | $|=1; |
---|
16 | #$^W=1; ? |
---|
17 | $ntest=1; |
---|
18 | $TESTOUT = *STDOUT{IO}; |
---|
19 | |
---|
20 | # Use of this variable is strongly discouraged. It is set mainly to |
---|
21 | # help test coverage analyzers know which test is running. |
---|
22 | $ENV{REGRESSION_TEST} = $0; |
---|
23 | |
---|
24 | sub plan { |
---|
25 | croak "Test::plan(%args): odd number of arguments" if @_ & 1; |
---|
26 | croak "Test::plan(): should not be called more than once" if $planned; |
---|
27 | my $max=0; |
---|
28 | for (my $x=0; $x < @_; $x+=2) { |
---|
29 | my ($k,$v) = @_[$x,$x+1]; |
---|
30 | if ($k =~ /^test(s)?$/) { $max = $v; } |
---|
31 | elsif ($k eq 'todo' or |
---|
32 | $k eq 'failok') { for (@$v) { $todo{$_}=1; }; } |
---|
33 | elsif ($k eq 'onfail') { |
---|
34 | ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE"; |
---|
35 | $ONFAIL = $v; |
---|
36 | } |
---|
37 | else { carp "Test::plan(): skipping unrecognized directive '$k'" } |
---|
38 | } |
---|
39 | my @todo = sort { $a <=> $b } keys %todo; |
---|
40 | if (@todo) { |
---|
41 | print $TESTOUT "1..$max todo ".join(' ', @todo).";\n"; |
---|
42 | } else { |
---|
43 | print $TESTOUT "1..$max\n"; |
---|
44 | } |
---|
45 | ++$planned; |
---|
46 | } |
---|
47 | |
---|
48 | sub to_value { |
---|
49 | my ($v) = @_; |
---|
50 | (ref $v or '') eq 'CODE' ? $v->() : $v; |
---|
51 | } |
---|
52 | |
---|
53 | sub ok ($;$$) { |
---|
54 | croak "ok: plan before you test!" if !$planned; |
---|
55 | my ($pkg,$file,$line) = caller($TestLevel); |
---|
56 | my $repetition = ++$history{"$file:$line"}; |
---|
57 | my $context = ("$file at line $line". |
---|
58 | ($repetition > 1 ? " fail \#$repetition" : '')); |
---|
59 | my $ok=0; |
---|
60 | my $result = to_value(shift); |
---|
61 | my ($expected,$diag); |
---|
62 | if (@_ == 0) { |
---|
63 | $ok = $result; |
---|
64 | } else { |
---|
65 | $expected = to_value(shift); |
---|
66 | my ($regex,$ignore); |
---|
67 | if (!defined $expected) { |
---|
68 | $ok = !defined $result; |
---|
69 | } elsif (!defined $result) { |
---|
70 | $ok = 0; |
---|
71 | } elsif ((ref($expected)||'') eq 'Regexp') { |
---|
72 | $ok = $result =~ /$expected/; |
---|
73 | } elsif (($regex) = ($expected =~ m,^ / (.+) / $,sx) or |
---|
74 | ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) { |
---|
75 | $ok = $result =~ /$regex/; |
---|
76 | } else { |
---|
77 | $ok = $result eq $expected; |
---|
78 | } |
---|
79 | } |
---|
80 | my $todo = $todo{$ntest}; |
---|
81 | if ($todo and $ok) { |
---|
82 | $context .= ' TODO?!' if $todo; |
---|
83 | print $TESTOUT "ok $ntest # ($context)\n"; |
---|
84 | } else { |
---|
85 | print $TESTOUT "not " if !$ok; |
---|
86 | print $TESTOUT "ok $ntest\n"; |
---|
87 | |
---|
88 | if (!$ok) { |
---|
89 | my $detail = { 'repetition' => $repetition, 'package' => $pkg, |
---|
90 | 'result' => $result, 'todo' => $todo }; |
---|
91 | $$detail{expected} = $expected if defined $expected; |
---|
92 | $diag = $$detail{diagnostic} = to_value(shift) if @_; |
---|
93 | $context .= ' *TODO*' if $todo; |
---|
94 | if (!defined $expected) { |
---|
95 | if (!$diag) { |
---|
96 | print $TESTOUT "# Failed test $ntest in $context\n"; |
---|
97 | } else { |
---|
98 | print $TESTOUT "# Failed test $ntest in $context: $diag\n"; |
---|
99 | } |
---|
100 | } else { |
---|
101 | my $prefix = "Test $ntest"; |
---|
102 | print $TESTOUT "# $prefix got: ". |
---|
103 | (defined $result? "'$result'":'<UNDEF>')." ($context)\n"; |
---|
104 | $prefix = ' ' x (length($prefix) - 5); |
---|
105 | if ((ref($expected)||'') eq 'Regexp') { |
---|
106 | $expected = 'qr/'.$expected.'/' |
---|
107 | } else { |
---|
108 | $expected = "'$expected'"; |
---|
109 | } |
---|
110 | if (!$diag) { |
---|
111 | print $TESTOUT "# $prefix Expected: $expected\n"; |
---|
112 | } else { |
---|
113 | print $TESTOUT "# $prefix Expected: $expected ($diag)\n"; |
---|
114 | } |
---|
115 | } |
---|
116 | push @FAILDETAIL, $detail; |
---|
117 | } |
---|
118 | } |
---|
119 | ++ $ntest; |
---|
120 | $ok; |
---|
121 | } |
---|
122 | |
---|
123 | sub skip ($$;$$) { |
---|
124 | my $whyskip = to_value(shift); |
---|
125 | if ($whyskip) { |
---|
126 | $whyskip = 'skip' if $whyskip =~ m/^\d+$/; |
---|
127 | print $TESTOUT "ok $ntest # $whyskip\n"; |
---|
128 | ++ $ntest; |
---|
129 | 1; |
---|
130 | } else { |
---|
131 | local($TestLevel) = $TestLevel+1; #ignore this stack frame |
---|
132 | &ok; |
---|
133 | } |
---|
134 | } |
---|
135 | |
---|
136 | END { |
---|
137 | $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL; |
---|
138 | } |
---|
139 | |
---|
140 | 1; |
---|
141 | __END__ |
---|
142 | |
---|
143 | =head1 NAME |
---|
144 | |
---|
145 | Test - provides a simple framework for writing test scripts |
---|
146 | |
---|
147 | =head1 SYNOPSIS |
---|
148 | |
---|
149 | use strict; |
---|
150 | use Test; |
---|
151 | |
---|
152 | # use a BEGIN block so we print our plan before MyModule is loaded |
---|
153 | BEGIN { plan tests => 14, todo => [3,4] } |
---|
154 | |
---|
155 | # load your module... |
---|
156 | use MyModule; |
---|
157 | |
---|
158 | ok(0); # failure |
---|
159 | ok(1); # success |
---|
160 | |
---|
161 | ok(0); # ok, expected failure (see todo list, above) |
---|
162 | ok(1); # surprise success! |
---|
163 | |
---|
164 | ok(0,1); # failure: '0' ne '1' |
---|
165 | ok('broke','fixed'); # failure: 'broke' ne 'fixed' |
---|
166 | ok('fixed','fixed'); # success: 'fixed' eq 'fixed' |
---|
167 | ok('fixed',qr/x/); # success: 'fixed' =~ qr/x/ |
---|
168 | |
---|
169 | ok(sub { 1+1 }, 2); # success: '2' eq '2' |
---|
170 | ok(sub { 1+1 }, 3); # failure: '2' ne '3' |
---|
171 | ok(0, int(rand(2)); # (just kidding :-) |
---|
172 | |
---|
173 | my @list = (0,0); |
---|
174 | ok @list, 3, "\@list=".join(',',@list); #extra diagnostics |
---|
175 | ok 'segmentation fault', '/(?i)success/'; #regex match |
---|
176 | |
---|
177 | skip($feature_is_missing, ...); #do platform specific test |
---|
178 | |
---|
179 | =head1 DESCRIPTION |
---|
180 | |
---|
181 | L<Test::Harness> expects to see particular output when it executes |
---|
182 | tests. This module aims to make writing proper test scripts just a |
---|
183 | little bit easier (and less error prone :-). |
---|
184 | |
---|
185 | =head1 TEST TYPES |
---|
186 | |
---|
187 | =over 4 |
---|
188 | |
---|
189 | =item * NORMAL TESTS |
---|
190 | |
---|
191 | These tests are expected to succeed. If they don't something's |
---|
192 | screwed up! |
---|
193 | |
---|
194 | =item * SKIPPED TESTS |
---|
195 | |
---|
196 | Skip is for tests that might or might not be possible to run depending |
---|
197 | on the availability of platform specific features. The first argument |
---|
198 | should evaluate to true (think "yes, please skip") if the required |
---|
199 | feature is not available. After the first argument, skip works |
---|
200 | exactly the same way as do normal tests. |
---|
201 | |
---|
202 | =item * TODO TESTS |
---|
203 | |
---|
204 | TODO tests are designed for maintaining an B<executable TODO list>. |
---|
205 | These tests are expected NOT to succeed. If a TODO test does succeed, |
---|
206 | the feature in question should not be on the TODO list, now should it? |
---|
207 | |
---|
208 | Packages should NOT be released with succeeding TODO tests. As soon |
---|
209 | as a TODO test starts working, it should be promoted to a normal test |
---|
210 | and the newly working feature should be documented in the release |
---|
211 | notes or change log. |
---|
212 | |
---|
213 | =back |
---|
214 | |
---|
215 | =head1 RETURN VALUE |
---|
216 | |
---|
217 | Both C<ok> and C<skip> return true if their test succeeds and false |
---|
218 | otherwise in a scalar context. |
---|
219 | |
---|
220 | =head1 ONFAIL |
---|
221 | |
---|
222 | BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } } |
---|
223 | |
---|
224 | While test failures should be enough, extra diagnostics can be |
---|
225 | triggered at the end of a test run. C<onfail> is passed an array ref |
---|
226 | of hash refs that describe each test failure. Each hash will contain |
---|
227 | at least the following fields: C<package>, C<repetition>, and |
---|
228 | C<result>. (The file, line, and test number are not included because |
---|
229 | their correspondence to a particular test is tenuous.) If the test |
---|
230 | had an expected value or a diagnostic string, these will also be |
---|
231 | included. |
---|
232 | |
---|
233 | The B<optional> C<onfail> hook might be used simply to print out the |
---|
234 | version of your package and/or how to report problems. It might also |
---|
235 | be used to generate extremely sophisticated diagnostics for a |
---|
236 | particularly bizarre test failure. However it's not a panacea. Core |
---|
237 | dumps or other unrecoverable errors prevent the C<onfail> hook from |
---|
238 | running. (It is run inside an C<END> block.) Besides, C<onfail> is |
---|
239 | probably over-kill in most cases. (Your test code should be simpler |
---|
240 | than the code it is testing, yes?) |
---|
241 | |
---|
242 | =head1 SEE ALSO |
---|
243 | |
---|
244 | L<Test::Harness> and, perhaps, test coverage analysis tools. |
---|
245 | |
---|
246 | =head1 AUTHOR |
---|
247 | |
---|
248 | Copyright (c) 1998-1999 Joshua Nathaniel Pritikin. All rights reserved. |
---|
249 | |
---|
250 | This package is free software and is provided "as is" without express |
---|
251 | or implied warranty. It may be used, redistributed and/or modified |
---|
252 | under the terms of the Perl Artistic License (see |
---|
253 | http://www.perl.com/perl/misc/Artistic.html) |
---|
254 | |
---|
255 | =cut |
---|