1
1
{-# language ExtendedDefaultRules, ScopedTypeVariables, QuasiQuotes #-}
2
2
3
3
import Test.Tasty
4
- import Test.Tasty.Options
5
4
import Test.Tasty.Runners
6
- import Test.Tasty.Providers
7
5
import Test.Tasty.HUnit
8
6
import Test.Tasty.ExpectedFailure
9
7
import System.IO.Unsafe
@@ -14,11 +12,16 @@ import Text.RawString.QQ
14
12
import Data.List
15
13
import Data.List.Split
16
14
import Control.Monad
15
+ import Test.Tasty.Golden.Advanced
16
+ import qualified Data.ByteString as BS
17
+ import System.Process
18
+ import System.IO
17
19
18
20
-- * Random values for testing
19
21
20
22
uniforms :: (Random a , Num a ) => [a ]
21
23
uniforms = randoms (mkStdGen 42 )
24
+
22
25
uniforms' lo hi = randomRs (lo,hi) (mkStdGen 42 )
23
26
24
27
-- * Not so random values to enable some fully-reproducible tests
@@ -92,15 +95,50 @@ normals' (mean, sigma) g = map (\x -> x * sigma + mean) $ normals
92
95
93
96
-- * Tests
94
97
95
- main = defaultMain tests
96
-
97
- tests :: TestTree
98
- tests = testGroup " All tests" [basicTests, wrapTest (liftM (\ x -> x { resultOutcome = Success
99
- , resultShortDescription =
100
- case resultOutcome x of
101
- Success -> resultShortDescription x
102
- _ -> " Expected: " ++ resultShortDescription x
103
- })) $ fragileTests, ignoreTest failingTests]
98
+ main = defaultMain $ tests " All tests" testPlot
99
+
100
+ main' = defaultMain $ tests " Golden tests" testPlotGolden
101
+
102
+ main'' = defaultMain $ testGroup " All tests" [tests " Execution tests" testPlot
103
+ , toneDownTests " Unreliable across machines" $ tests " Golden tests" testPlotGolden]
104
+
105
+ tests name f = testGroup name [basicTests f,
106
+ toneDownTests " Can fail with old matplotlib" $ fragileTests f,
107
+ ignoreTest $ failingTests f]
108
+
109
+ toneDownTests reason tests = wrapTest (liftM (\ x -> x { resultOutcome = Success
110
+ , resultShortDescription =
111
+ case resultOutcome x of
112
+ Success -> resultShortDescription x
113
+ _ -> reason ++ " : " ++ resultShortDescription x
114
+ })) tests
115
+
116
+ testPlotGolden name fn =
117
+ unsafePerformIO $ tmp (\ filename ->
118
+ return $ goldenTest
119
+ name
120
+ (BS. readFile ref)
121
+ (file filename fn >> BS. readFile filename)
122
+ (\ g n ->
123
+ tmp (\ gfile ->
124
+ tmp (\ nfile -> do
125
+ BS. writeFile gfile g
126
+ BS. writeFile nfile n
127
+ (code, stdout, stderr) <-
128
+ readProcessWithExitCode " /usr/bin/compare" [" -metric"
129
+ ," PSNR"
130
+ ,gfile
131
+ ,nfile
132
+ ," null" ] " "
133
+ case (stderr, reads stderr) of
134
+ (" inf" , _) -> return Nothing
135
+ (_, [(x :: Double , _ )]) ->
136
+ if x < 30 then
137
+ return $ Just $ " Images very different; PSNR too low " ++ show x else
138
+ return Nothing )))
139
+ (BS. writeFile ref))
140
+ where ref = " imgs/" ++ name ++ " .png"
141
+ tmp f = withSystemTempFile " a.png" (\ filename h -> hClose h >> f filename)
104
142
105
143
-- | Test one plot; right now we just test that the command executed without
106
144
-- errors. We should visually compare plots somehow.
@@ -114,40 +152,40 @@ testPlot' name fn = testCase name $ tryit fn name @?= Right ""
114
152
print c
115
153
file (" /tmp/imgs/" ++ name ++ " .png" ) fn
116
154
117
- basicTests = testGroup " Basic tests"
118
- [ testPlot " histogram" m1
119
- , testPlot " cumulative" m2
120
- , testPlot " scatter" m3
121
- , testPlot " contour" m4
122
- , testPlot " labelled-histogram" m5
123
- , testPlot " density-bandwidth" m7
124
- , testPlot " density" m8
125
- , testPlot " line-function" m9
126
- , testPlot " quadratic" m10
127
- , testPlot " projections" m11
128
- , testPlot " line-options" m12
129
- , testPlot " corr" mxcorr
130
- , testPlot " show-matrix" mmat
131
- , testPlot " legend" mlegend
132
- , testPlot " hist2DLog" mhist2DLog
133
- , testPlot " eventplot" meventplot
134
- , testPlot " errorbar" merrorbar
135
- , testPlot " scatterhist" mscatterHist
155
+ basicTests f = testGroup " Basic tests"
156
+ [ f " histogram" m1
157
+ , f " cumulative" m2
158
+ , f " scatter" m3
159
+ , f " contour" m4
160
+ , f " labelled-histogram" m5
161
+ , f " density-bandwidth" m7
162
+ , f " density" m8
163
+ , f " line-function" m9
164
+ , f " quadratic" m10
165
+ , f " projections" m11
166
+ , f " line-options" m12
167
+ , f " corr" mxcorr
168
+ , f " show-matrix" mmat
169
+ , f " legend" mlegend
170
+ , f " hist2DLog" mhist2DLog
171
+ , f " eventplot" meventplot
172
+ , f " errorbar" merrorbar
173
+ , f " scatterhist" mscatterHist
136
174
]
137
175
138
- fragileTests = testGroup " Fragile tests"
176
+ fragileTests f = testGroup " Fragile tests"
139
177
[ -- TODO Fails on circle ci (with latex)
140
- testPlot " tex" mtex
178
+ f " tex" mtex
141
179
-- TODO Fails on circle ci (labels is not valid; matplotlib too old)
142
- , testPlot " boxplot" mboxplot
180
+ , f " boxplot" mboxplot
143
181
-- TODO Fails on circle ci (no violin plots; matplotlib too old)
144
- , testPlot " violinplot" mviolinplot
182
+ , f " violinplot" mviolinplot
145
183
]
146
184
147
- failingTests = testGroup " Failing tests"
185
+ failingTests f = testGroup " Failing tests"
148
186
[
149
187
-- TODO This test case is broken
150
- testPlot " sub-bars" m6
188
+ f " sub-bars" m6
151
189
]
152
190
153
191
-- * These tests are fully-reproducible, the output must be identical every time
0 commit comments