-
Notifications
You must be signed in to change notification settings - Fork 0
/
testsuite.tcl
1181 lines (968 loc) · 32 KB
/
testsuite.tcl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- tcl -*- Copyright (c) 2012-2024 Andreas Kupries
# # ## ### ##### ######## ############# #####################
## Handle a tcltest-based testsuite
namespace eval ::kettle { namespace export testsuite }
kettle option define --constraints {
Tcl list of constraints to activate.
} {} listsimple
kettle option define --file {
Tcl list of glob patterns for test files to be run exclusively.
} {} listsimple
kettle option define --limitconstraints {
Contraint handling. When set run only tests with the active
constraints (see -constraints).
} 0 boolean
kettle option define --tmatch {
Tcl list of glob patterns.
Run only the tests matching at least one of the patterns.
Default is the * (match all), disabling the filter.
} {} listsimple
kettle option define --notfile {
Tcl list of glob patterns for test files to be skipped.
} {} listsimple
kettle option define --single {
Run each test case completely independent.
} 0 boolean
kettle option define --valgrind {
Run the tests under valgrind
} 0 boolean
kettle option define --tskip {
Tcl list of glob patterns for tests to be skipped.
} {} listsimple
kettle option no-work-key --constraints
kettle option no-work-key --file
kettle option no-work-key --limitconstraints
kettle option no-work-key --tmatch
kettle option no-work-key --notfile
kettle option no-work-key --single
kettle option no-work-key --valgrind
kettle option no-work-key --tskip
# # ## ### ##### ######## ############# #####################
## API.
proc ::kettle::testsuite {{testsrcdir tests}} {
# Overwrite self, we run only once for effect.
proc ::kettle::testsuite args {}
# Heuristic search for testsuite
# Aborts caller when nothing is found.
lassign [path scan \
{tcltest testsuite} \
$testsrcdir \
{path tcltest-file}] \
root testsuite
# Put the testsuite into recipes.
recipe define test {
Run the testsuite
} {testsrcdir testsuite} {
Test::SetupAnd Run $testsrcdir $testsuite
} $root $testsuite
recipe define testcases {
Report the names of all test cases found in the testsuites.
} {testsrcdir testsuite} {
Test::SetupAnd Scan $testsrcdir $testsuite
} $root $testsuite
recipe define testcheck {
Report all duplicate test case names.
} {testsrcdir testsuite} {
Test::SetupAnd Check $testsrcdir $testsuite
} $root $testsuite
return
}
# # ## ### ##### ######## ############# #####################
## Support code for the recipe.
namespace eval ::kettle::Test {
namespace import ::kettle::path
namespace import ::kettle::io
namespace import ::kettle::status
namespace import ::kettle::option
namespace import ::kettle::strutil
namespace import ::kettle::stream
namespace import ::kettle::invoke
# Map from testsuite states to readable labels. These include
# trailing whitespace to align the following text vertically.
variable statelabel {
ok {OK }
none {None }
aborted {Skip }
error {ERR }
fail {FAILS}
}
}
proc ::kettle::Test::SetupAnd {args} {
# Note: We build and install the package under test (and its
# dependencies) into a local directory (in the current working
# directory). We try to install a debug variant first, and if that
# fails a regular one.
#
# Note 2: If the user explicitly specified a location to build to
# we use that, and do not clean it up aftre the test. This makes
# it easy to investigate a core dump generated during test.
if {[option userdefined --prefix]} {
set tmp [option get --prefix]
set cleanup 0
} else {
set tmp [path norm [path tmpfile test_install_]]
path ensure-cleanup $tmp
set cleanup 1
}
try {
if {![invoke self debug --prefix $tmp] &&
![invoke self install --prefix $tmp]
} {
status fail "Unable to generate local test installation"
}
{*}$args $tmp
} finally {
if {$cleanup} {
file delete -force $tmp
}
}
return
}
proc ::kettle::Test::Skip {skiplist testfiles} {
lmap t $testfiles {
if {[Skip1 $skiplist $t]} continue
set t
}
}
proc ::kettle::Test::Skip1 {skiplist path} {
set path [file tail $path]
foreach pattern $skiplist {
if {[string match $pattern $path]} { return 1 }
}
return 0
}
proc ::kettle::Test::Match {matchlist testfiles} {
lmap t $testfiles {
if {![Match1 $matchlist $t]} continue
set t
}
}
proc ::kettle::Test::Match1 {matchlist path} {
set path [file tail $path]
foreach pattern $matchlist {
if {[string match $pattern $path]} { return 1 }
}
return 0
}
proc ::kettle::Test::Run {srcdir testfiles localprefix} {
# We are running each test file in a separate sub process, to
# catch crashes, etc. ... We assume that the test file is self
# contained in terms of loading all its dependencies, like
# tcltest itself, utility commands it may need, etc. This
# assumption allows us to run it directly, using our own
# tcl executable as interpreter.
# Translate kettle test options into tcltest options. Note that
# the file matching options are handled here, and not by tcltest.
# Further note that the --tmatch default is added here also, if
# necessary.
set options {}
foreach {o v} {
constraints constraints
limitconstraints limitconstraints
tskip skip
} {
lappend options -$v [option get --$o]
}
set t [option get --match] ; if {$t eq {}} { lappend t * }
set s [option get --notfile] ; if {$s eq {}} { lappend s l.*.test }
set m [option get --file] ; if {$m eq {}} { lappend m * }
lappend option -match $t
set testfiles [Skip $s [Match $m $testfiles]]
stream to log ============================================================
set main [path norm [option get @kettledir]/testmain.tcl]
InitState
# Generate map of padded test file names to ensure vertical
# alignment of output across them.
set short {}
foreach t $testfiles {
lappend short [file tail $t]
}
foreach t $testfiles pt [strutil padr $short] {
dict set state fmap $t $pt
dict set state fmap [file tail $t] $pt
}
path in $srcdir {
set valgrind [option get --valgrind]
if {$valgrind} {
lappend cmd {*}[auto_execok valgrind]
}
lappend cmd [option get --with-shell] $main $localprefix
if {$valgrind} {
lappend options --valgrind
}
if {[option get --single]} {
dict set state singled 1 ;# Test::Summary
foreach test $testfiles {
# change next to log/log
#io note { io puts ${test}... }
set cases [ScanFile $main $localprefix $test]
# Per file initialization...
dict set state suite/status ok
#dict set state
dict set state numcases [llength $cases]
dict set state xtotal 0
dict set state xpassed 0
dict set state xskipped 0
dict set state xfailed 0
foreach testcase $cases {
dict set state summary 0
dict incr state numcases -1
stream aopen
path pipe line {
io trace {TEST: $line}
ProcessLine $line
} {*}$cmd $test run {*}$options -match $testcase
}
}
} else {
dict set state singled 0 ;# Test::Summary
foreach test $testfiles {
# change next to log/log
#io note { io puts ${test}... }
stream aopen
# Per file initialization...
dict set state summary 0
dict set state suite/status ok
#dict set state
path pipe line {
io trace {TEST: $line}
ProcessLine $line
} {*}$cmd $test run {*}$options
}
}
}
# Summary results...
# ... the numbers
set fn [dict get $state cfailed]
set en [dict get $state cerrors]
set tn [dict get $state ctotal]
set pn [dict get $state cpassed]
set sn [dict get $state cskipped]
# ... formatted
set t $tn;#[format %6d $tn]
set p [format %6d $pn]
set s [format %6d $sn]
set f [format %6d $fn]
set e [format %6d $en]
# ... and colorized where needed.
if {$pn} { set p [io mgreen $p] }
if {$sn} { set s [io mblue $s] }
if {$fn} { set f [io mred $f] }
if {$en} { set e [io mmagenta $e] }
# Show in terminal, always...
stream term always "\nPassed $p of $t"
stream term always "Skipped $s of $t"
stream term always "Failed $f of $t"
stream term always "#Errors $e"
# And in the main stream...
stream to log {Passed $p of $t}
stream to log {Skipped $s of $t}
stream to log {Failed $f of $t}
stream to log {#Errors $e}
stream to summary {[FormatTimings $state]}
# Report ok/fail
status [dict get $state status]
return
}
proc ::kettle::Test::Scan {srcdir testfiles localprefix} {
stream to log ============================================================
set main [path norm [option get @kettledir]/testmain.tcl]
# Generate map of padded test file names to ensure vertical
# alignment of output across them.
set short {}
foreach t $testfiles {
lappend short [file tail $t]
}
foreach t $testfiles pt [strutil padr $short] {
dict set state fmap $t $pt
}
dict set state suite/status ok ;# for aclose
set testcases {}
path in $srcdir {
foreach test $testfiles {
# change next to log/log
#io note { io puts ${test}... }
set cases [ScanFile $main $localprefix $test]
dict set state file $test ;# for aclose
set msg "~~ [llength $cases]"
set test [dict get $state fmap $test]
stream aopen
stream aextend "$test "
stream aclose $msg
stream to log {$test $msg}
lappend testcases {*}$cases
}
}
set tn [llength $testcases]
stream to log {\#Testcases $tn}
# Report the found tests.
set testcases [join [lsort -dict $testcases] \n]
if {![stream active]} {
stream term always \n$testcases
}
stream to log {$testcases}
stream to testcases {$testcases}
status ok
return
}
proc ::kettle::Test::Check {srcdir testfiles localprefix} {
stream to log ============================================================
set main [path norm [option get @kettledir]/testmain.tcl]
# Generate map of padded test file names to ensure vertical
# alignment of output across them.
set short {}
foreach t $testfiles {
lappend short [file tail $t]
}
foreach t $testfiles pt [strutil padr $short] {
dict set state fmap $t $pt
}
dict set state suite/status ok ;# for aclose
set testcases {}
path in $srcdir {
foreach test $testfiles {
# change next to log/log
#io note { io puts ${test}... }
set cases [ScanFile $main $localprefix $test]
dict set state file $test ;# for aclose
set msg "~~ [llength $cases]"
set test [dict get $state fmap $test]
stream aopen
stream aextend "$test "
stream aclose $msg
stream to log {$test $msg}
foreach c $cases {
dict lappend testcases $c $test
}
}
}
# Drop unique names, compress files recorded for duplicates
dict for {c files} $testcases {
if {[llength $files] < 2} {
dict unset testcases $c
} else {
dict set testcases $c [lsort -unique $files]
}
}
# Show the duplicates
if {![stream active]} {
stream term always "Duplicates: [dict size $testcases]"
}
stream to log {Duplicates: [dict size $testcases]}
if {[dict size $testcases]} {
dict for {c files} $testcases {
if {![stream active]} { stream term always ${c}: }
stream to duplicates {$c}
stream to dupmap {$c}
foreach f $files {
if {![stream active]} { stream term always "\t$f" }
stream to dupmap { $f}
}
}
}
status ok
return
}
proc ::kettle::Test::ScanFile {main localprefix testfile} {
set tests {}
path pipe line {
set line [string trimright $line]
io trace {TEST: $line}
if {![string match {---- * DECL} $line]} continue
set testname [string range $line 5 end-5]
lappend tests $testname
} [option get --with-shell] $main $localprefix $testfile scan
return $tests
}
proc ::kettle::Test::FormatTimings {state} {
# Extract data ...
set times [dict get $state times]
# Sort by shell and testsuite, re-package into tuples.
set tmp {}
foreach k [lsort -dict [dict keys $times]] {
lassign $k shell suite
lassign [dict get $times $k] ntests sec usec
lappend tmp [list $shell $suite $ntests $sec $usec]
}
# Sort tuples by time per test (longest taken at the top), and
# transpose into columns. Add the header and footer lines.
lappend sh Shell =====
lappend ts Testsuite =========
lappend nt Tests =====
lappend ns Seconds =======
lappend us uSec/Test =========
foreach item [lsort -index 4 -integer -decreasing $tmp] {
lassign $item shell suite ntests sec usec
lappend sh $shell
lappend ts $suite
lappend nt $ntests
lappend ns $sec
lappend us $usec
}
lappend sh =====
lappend ts =========
lappend nt =====
lappend ns =======
lappend us =========
# Print the columns, each padded for vertical alignment.
lappend lines \nTimings...
foreach \
shell [strutil padr $sh] \
suite [strutil padr $ts] \
ntests [strutil padr $nt] \
sec [strutil padr $ns] \
usec [strutil padr $us] {
lappend lines "$shell $suite $ntests $sec $usec"
}
return [join $lines \n]
}
proc ::kettle::Test::ProcessLine {line} {
stream to rawlog {[string range $line 0 end-1]}
# Counters and other state in the calling environment.
upvar 1 state state
# Capture of test failure in progress.
# Take all lines, unprocessed.
CaptureFailureSync ; # cap/state: sync => body
CaptureFailureCollectBody ; # cap/state: body => actual|error|setup|cleanup|normal
CaptureFailureCollectSetup ; # cap/state: setup => none
CaptureFailureCollectCleanup ; # cap/state: cleanup => none
CaptureFailureCollectActual ; # cap/state: actual => expected
CaptureFailureCollectExpected ; # cap/state: expected => none
CaptureFailureCollectError ; # cap/state: error => expected
CaptureFailureCollectNormal ; # cap/state: normal => none
# Capture of Tcl stack trace in progress.
# Take all lines, unprocessed.
CaptureStack
# Start processing the input line for easier matching, and to
# reduce the log.
set line [string trimright $line]
stream term full $line
stream to log {$line}
set line [string trim $line]
if {[string equal $line ""]} return
# Recognize various parts written by the sub-shell and act on
# them. If a line is recognized and acted upon the remaining
# matchers are _not_ executed.
Host;Platform;Cwd;Shell;Tcl
Start;End
Testsuite;NoTestsuite
Support;Testing
Summary
TestStart;TestSkipped;TestPassed
TestFailed ; # cap/state => sync, see CaptureFailure* above
CaptureStackStart ; # cap/stack => on, see CaptureStaCK ABOVE
Aborted
AbortCause
Match||Skip||Sourced
# Unknown lines are simply shown (disturbing the animation, good
# for this situation, actually), also saved for review.
stream term compact !$line
stream to unprocessed {$line}
return
}
# # ## ### ##### ######## ############# #####################
proc ::kettle::Test::InitState {} {
upvar 1 state state
# The counters are all updated in ProcessLine.
# The status may change to 'fail' in ProcessLine.
set state {
ctotal 0
cpassed 0
cskipped 0
cfailed 0
cerrors 0
status ok
host {}
platform {}
cwd {}
shell {}
file {}
test {}
start {}
times {}
suite/status ok
cap/state none
cap/stack off
}
return
}
proc ::kettle::Test::Host {} {
upvar 1 line line state state
if {![regexp "^@@ Host (.*)$" $line -> host]} return
#stream aextend $host
#stream term compact "Host $host"
dict set state host $host
# FUTURE: Write tests results to a storage back end for analysis.
return -code return
}
proc ::kettle::Test::Platform {} {
upvar 1 line line state state
if {![regexp "^@@ Platform (.*)$" $line -> platform]} return
#stream term compact "Platform $platform"
dict set state platform $platform
#stream aextend ($platform)
return -code return
}
proc ::kettle::Test::Cwd {} {
upvar 1 line line state state
if {![regexp "^@@ TestCWD (.*)$" $line -> cwd]} return
#stream term compact "Cwd [path relativecwd $cwd]"
dict set state cwd $cwd
return -code return
}
proc ::kettle::Test::Shell {} {
upvar 1 line line state state
if {![regexp "^@@ Shell (.*)$" $line -> shell]} return
#stream term compact "Shell $shell"
dict set state shell $shell
#stream aextend [file tail $shell]
return -code return
}
proc ::kettle::Test::Tcl {} {
upvar 1 line line state state
if {![regexp "^@@ Tcl (.*)$" $line -> tcl]} return
#stream term compact "Tcl $tcl"
dict set state tcl $tcl
stream aextend "\[$tcl\] "
return -code return
}
proc ::kettle::Test::Match||Skip||Sourced {} {
upvar 1 line line state state
if {[string match "@@ TestDir*" $line]} {return -code return}
if {[string match "@@ LocalDir*" $line]} {return -code return}
if {[string match "@@ Skip*" $line]} {return -code return}
if {[string match "@@ Match*" $line]} {return -code return}
if {[string match "Sourced * Test Files." $line]} {return -code return}
if {[string match "Files with failing tests*" $line]} {return -code return}
if {[string match "Number of tests skipped*" $line]} {return -code return}
if {[string match "\[0-9\]*" $line]} {return -code return}
if {[string match "*error: test failed:*" $line]} {return -code return}
return
}
proc ::kettle::Test::Start {} {
upvar 1 line line state state
if {![regexp "^@@ Start (.*)$" $line -> start]} return
#stream term compact "Start [clock format $start]"
dict set state start $start
# Counters per test file. We use them in End to fake reasonable
# summary information if the test file itself did not provide that
# information.
dict set state testnum 0
dict set state testskip 0
dict set state testpass 0
dict set state testfail 0
return -code return
}
proc ::kettle::Test::End {} {
upvar 1 line line state state
if {![regexp "^@@ End (.*)$" $line -> end]} return
set start [dict get $state start]
set shell [dict get $state shell]
set file [dict get $state file]
set num [dict get $state testnum]
#stream term compact "Started [clock format $start]"
#stream term compact "End [clock format $end]"
set delta [expr {$end - $start}]
if {$num == 0} {
set score $delta
} else {
# Get average number of microseconds per test.
set score [expr {int(($delta/double($num))*1000000)}]
}
set key [list $shell $file]
dict lappend state times $key [list $num $delta $score]
stream to timings {[list TIME $key $num $delta $score]}
#variable xshell
#sak::registry::local set $xshell End $end
if {![dict get $state summary]} {
# We have to fake a summary, as the test file did not
# generate one. We use our own per-file counters to make a
# reasonable guess of the values. The code below works
# because the Summary processing in the caller, ProcessLine,
# is done this procedure. We manipulate the current line and
# then proceed as if we had not captured the current line,
# letting the Summary processing capture it.
set t [dict get $state testnum]
set s [dict get $state testskip]
set p [dict get $state testpass]
set f [dict get $state testfail]
set line "Total $t Passed $p Skipped $s Failed $f"
return
}
return -code return
}
proc ::kettle::Test::Testsuite {} {
upvar 1 line line state state ; variable xfile
if {![regexp "^@@ Testsuite (.*)$" $line -> file]} return
#stream term compact "Test $file"
dict set state file $file
# map from full path to short, and padded for alignment.
set padded [dict get $state fmap $file]
stream aextend "$padded "
return -code return
}
proc ::kettle::Test::NoTestsuite {} {
upvar 1 line line state state
if {![string match "Error: No test files remain after*" $line]} return
dict set state suite/status none
stream aclose {No tests}
return -code return
}
proc ::kettle::Test::Support {} {
upvar 1 line line state state
#stream awrite "S $package" /when caught
#if {[regexp "^SYSTEM - (.*)$" $line -> package]} {stream term compact "Ss $package";return -code return}
#if {[regexp "^LOCAL - (.*)$" $line -> package]} {stream term compact "Sl $package";return -code return}
if {[regexp "^SYSTEM - (.*)$" $line -> package]} {return -code return}
if {[regexp "^LOCAL - (.*)$" $line -> package]} {return -code return}
return
}
proc ::kettle::Test::Testing {} {
upvar 1 line line state state
#stream awrite "T $package" /when caught
#if {[regexp "^SYSTEM % (.*)$" $line -> package]} {stream term compact "Ts $package";return -code return}
#if {[regexp "^LOCAL % (.*)$" $line -> package]} {stream term compact "Tl $package";return -code return}
if {[regexp "^SYSTEM % (.*)$" $line -> package]} {return -code return}
if {[regexp "^LOCAL % (.*)$" $line -> package]} {return -code return}
return
}
proc ::kettle::Test::Summary {} {
upvar 1 line line state state
variable statelabel
#stream term compact S?$line
if {![regexp "(Total(.*)Passed(.*)Skipped(.*)Failed(.*))$" $line -> line]} return
lassign [string trim $line] _ total _ passed _ skipped _ failed
dict set state summary 1
if {[dict get $state singled]} {
set skipped 0
if {!$passed && !$failed} { set skipped 1 }
set total 1
dict incr state xtotal $total
dict incr state xpassed $passed
dict incr state xskipped $skipped
dict incr state xfailed $failed
set last [expr {[dict get $state numcases] == 0}]
if {!$last} {
return -code return
}
set total [dict get $state xtotal]
set passed [dict get $state xpassed]
set skipped [dict get $state xskipped]
set failed [dict get $state xfailed]
}
dict incr state ctotal $total
dict incr state cpassed $passed
dict incr state cskipped $skipped
dict incr state cfailed $failed
set total [format %5d $total]
set passed [format %5d $passed]
set skipped [format %5d $skipped]
set failed [format %5d $failed]
set thestate [dict get $state suite/status]
if {!$total && ($thestate eq "ok")} {
dict set state suite/status none
set thestate none
}
set st [dict get $statelabel $thestate]
if {$thestate eq "ok"} {
# Quick return for ok suite.
stream aclose "~~ [io mgreen $st] T $total P $passed S $skipped F $failed"
return -code return
}
# Clean out progress display using a non-highlighted string.
# Prevents the char count from being off. This is followed by
# construction and display of the highlighted version.
#stream awrite " $st T $total P $passed S $skipped F $failed"
switch -exact -- $thestate {
none { stream aclose "~~ [io myellow "$st T $total"] P $passed S $skipped F $failed" }
aborted { stream aclose "~~ [io mwhite $st] T $total P $passed S $skipped F $failed" }
error { stream aclose "~~ [io mmagenta $st] T $total P $passed S $skipped F $failed" }
fail { stream aclose "~~ [io mred $st] T $total P $passed S $skipped [io mred "F $failed"]" }
}
if {$thestate eq "error"} { dict incr state cerrors }
return -code return
}
proc ::kettle::Test::TestStart {} {
upvar 1 line line state state
if {![string match {---- * start} $line]} return
set testname [string range $line 5 end-6]
stream awrite "---- $testname"
dict set state test $testname
dict incr state testnum
return -code return
}
proc ::kettle::Test::TestSkipped {} {
upvar 1 line line state state
if {![string match {++++ * SKIPPED:*} $line]} return
regexp {^[^ ]* (.*)SKIPPED:.*$} $line -> testname
set testname [string trim $testname]
stream awrite "SKIP $testname"
dict set state test {}
dict incr state testskip
return -code return
}
proc ::kettle::Test::TestPassed {} {
upvar 1 line line state state
if {![string match {++++ * PASSED} $line]} return
set testname [string range $line 5 end-7]
stream awrite "PASS $testname"
dict set state test {}
dict incr state testpass
return -code return
}
proc ::kettle::Test::TestFailed {} {
upvar 1 line line state state
if {![string match {==== * FAILED} $line]} return
set testname [lindex [split [string range $line 5 end-7]] 0]
stream awrite "FAIL $testname"
dict set state suite/status fail
dict incr state testfail
if {![dict exists $state test] ||
([dict get $state test] eq {})} {
# Required for tests which fail during -setup. These are not
# reported as started, and TestStart above is never run for
# them.
dict set state test $testname
}
CaptureInit
return -code return
}
proc ::kettle::Test::CaptureFailureSync {} {
upvar 1 state state
if {[dict get $state cap/state] ne "sync"} return
upvar 1 line line
if {![string match {==== Contents*} $line]} return
CaptureNext body
return -code return
}
proc ::kettle::Test::CaptureFailureCollectBody {} {
upvar 1 state state
if {[dict get $state cap/state] ne "body"} return
upvar 1 line line
if {[string match {---- Result was*} $line]} {
CaptureNext actual
return -code return
} elseif {[string match {---- Test setup failed:*} $line]} {
CaptureNext setup
return -code return
} elseif {[string match {---- Test cleanup failed:*} $line]} {
CaptureNext cleanup
return -code return
} elseif {[string match {---- Test generated error*} $line]} {
CaptureNext error
return -code return
} elseif {[string match {---- Test completed normally*} $line]} {
CaptureNext normal
return -code return
}
if {[string trim $line] ne {}} {
dict update state cap c {
dict append c body $line
}
}
return -code return
}
proc ::kettle::Test::CaptureFailureCollectSetup {} {
upvar 1 state state
if {[dict get $state cap/state] ne "setup"} return
upvar 1 line line
if {![string match {==== *} $line]} {
dict update state cap c {
dict append c setup $line
}
return -code return
}
CaptureStop
return -code return
}
proc ::kettle::Test::CaptureFailureCollectCleanup {} {
upvar 1 state state
if {[dict get $state cap/state] ne "cleanup"} return
upvar 1 line line
if {![string match {==== *} $line]} {
dict update state cap c {
dict append c cleanup $line
}
return -code return
}
CaptureStop
return -code return
}
proc ::kettle::Test::CaptureFailureCollectActual {} {
upvar 1 state state
if {[dict get $state cap/state] ne "actual"} return
upvar 1 line line
if {[string match {---- Result should*} $line]} {
CaptureNext expected
return -code return
}
dict update state cap c {
dict append c actual $line
}
return -code return
}
proc ::kettle::Test::CaptureFailureCollectExpected {} {
upvar 1 state state
if {[dict get $state cap/state] ne "expected"} return
upvar 1 line line
if {![string match {==== *} $line]} {
dict update state cap c {
dict append c expected $line
}
return -code return
}
CaptureStop
return -code return
}
proc ::kettle::Test::CaptureFailureCollectNormal {} {
upvar 1 state state
if {[dict get $state cap/state] ne "normal"} return