diff --git a/test/Project.toml b/test/Project.toml index 6f40ee61a..657d4d390 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -53,7 +53,7 @@ LinearAlgebra = "1" LogDensityProblems = "2" LogDensityProblemsAD = "1.4" MCMCChains = "5, 6" -Mooncake = "0.4.19" +Mooncake = "0.4.61" NamedArrays = "0.9.4, 0.10" Optim = "1" Optimization = "3, 4" diff --git a/test/mcmc/Inference.jl b/test/mcmc/Inference.jl index b98f1daf2..87779d0f0 100644 --- a/test/mcmc/Inference.jl +++ b/test/mcmc/Inference.jl @@ -12,11 +12,15 @@ using LinearAlgebra: I import MCMCChains import Random import ReverseDiff +using StableRNGs: StableRNG import Mooncake using Test: @test, @test_throws, @testset using Turing @testset "Testing inference.jl with $adbackend" for adbackend in ADUtils.adbackends + @info "Starting Inference.jl tests with $adbackend" + seed = 23 + @testset "threaded sampling" begin # Test that chains with the same seed will sample identically. @testset "rng" begin @@ -32,10 +36,10 @@ using Turing ) for sampler in samplers Random.seed!(5) - chain1 = sample(model, sampler, MCMCThreads(), 1000, 4) + chain1 = sample(model, sampler, MCMCThreads(), 10, 4) Random.seed!(5) - chain2 = sample(model, sampler, MCMCThreads(), 1000, 4) + chain2 = sample(model, sampler, MCMCThreads(), 10, 4) @test chain1.value == chain2.value end @@ -45,55 +49,76 @@ using Turing rng = Random.MersenneTwister(seed) for sampler in samplers Random.seed!(rng, seed) - chain1 = sample(rng, model, sampler, MCMCThreads(), 1000, 4) + chain1 = sample(rng, model, sampler, MCMCThreads(), 10, 4) Random.seed!(rng, seed) - chain2 = sample(rng, model, sampler, MCMCThreads(), 1000, 4) + chain2 = sample(rng, model, sampler, MCMCThreads(), 10, 4) @test chain1.value == chain2.value end end # Smoke test for default sample call. - Random.seed!(100) - chain = sample(gdemo_default, HMC(0.1, 7; adtype=adbackend), MCMCThreads(), 1000, 4) - check_gdemo(chain) - - # run sampler: progress logging should be disabled and - # it should return a Chains object - sampler = Sampler(HMC(0.1, 7; adtype=adbackend), gdemo_default) - chains = sample(gdemo_default, sampler, MCMCThreads(), 1000, 4) - @test chains isa MCMCChains.Chains + @testset "gdemo_default" begin + chain = sample( + StableRNG(seed), + gdemo_default, + HMC(0.1, 7; adtype=adbackend), + MCMCThreads(), + 1_000, + 4, + ) + check_gdemo(chain) + + # run sampler: progress logging should be disabled and + # it should return a Chains object + sampler = Sampler(HMC(0.1, 7; adtype=adbackend), gdemo_default) + chains = sample(StableRNG(seed), gdemo_default, sampler, MCMCThreads(), 10, 4) + @test chains isa MCMCChains.Chains + end end @testset "chain save/resume" begin - Random.seed!(1234) - alg1 = HMCDA(1000, 0.65, 0.15; adtype=adbackend) alg2 = PG(20) alg3 = Gibbs(PG(30, :s), HMC(0.2, 4, :m; adtype=adbackend)) - chn1 = sample(gdemo_default, alg1, 5000; save_state=true) + chn1 = sample(StableRNG(seed), gdemo_default, alg1, 2_000; save_state=true) check_gdemo(chn1) - chn1_contd = sample(gdemo_default, alg1, 5000; resume_from=chn1) + chn1_contd = sample(StableRNG(seed), gdemo_default, alg1, 2_000; resume_from=chn1) check_gdemo(chn1_contd) - chn1_contd2 = sample(gdemo_default, alg1, 5000; resume_from=chn1) + chn1_contd2 = sample(StableRNG(seed), gdemo_default, alg1, 2_000; resume_from=chn1) check_gdemo(chn1_contd2) - chn2 = sample(gdemo_default, alg2, 5000; discard_initial=2000, save_state=true) + chn2 = sample( + StableRNG(seed), + gdemo_default, + alg2, + 2_000; + discard_initial=100, + save_state=true, + ) check_gdemo(chn2) - chn2_contd = sample(gdemo_default, alg2, 2000; resume_from=chn2) + chn2_contd = sample(StableRNG(seed), gdemo_default, alg2, 2_000; resume_from=chn2) check_gdemo(chn2_contd) - chn3 = sample(gdemo_default, alg3, 5_000; discard_initial=2000, save_state=true) + chn3 = sample( + StableRNG(seed), + gdemo_default, + alg3, + 2_000; + discard_initial=100, + save_state=true, + ) check_gdemo(chn3) - chn3_contd = sample(gdemo_default, alg3, 5_000; resume_from=chn3) + chn3_contd = sample(StableRNG(seed), gdemo_default, alg3, 2_000; resume_from=chn3) check_gdemo(chn3_contd) end + @testset "Contexts" begin # Test LikelihoodContext @model function testmodel1(x) @@ -123,32 +148,38 @@ using Turing ) @test isapprox(getlogp(varinfo2) / getlogp(varinfo1), 10) end + @testset "Prior" begin N = 10_000 # Note that all chains contain 3 values per sample: 2 variables + log probability - Random.seed!(100) - chains = sample(gdemo_d(), Prior(), N) - @test chains isa MCMCChains.Chains - @test size(chains) == (N, 3, 1) - @test mean(chains, :s) ≈ 3 atol = 0.1 - @test mean(chains, :m) ≈ 0 atol = 0.1 - - Random.seed!(100) - chains = sample(gdemo_d(), Prior(), MCMCThreads(), N, 4) - @test chains isa MCMCChains.Chains - @test size(chains) == (N, 3, 4) - @test mean(chains, :s) ≈ 3 atol = 0.1 - @test mean(chains, :m) ≈ 0 atol = 0.1 - - Random.seed!(100) - chains = sample(gdemo_d(), Prior(), N; chain_type=Vector{NamedTuple}) - @test chains isa Vector{<:NamedTuple} - @test length(chains) == N - @test all(length(x) == 3 for x in chains) - @test all(haskey(x, :lp) for x in chains) - @test mean(x[:s][1] for x in chains) ≈ 3 atol = 0.1 - @test mean(x[:m][1] for x in chains) ≈ 0 atol = 0.1 + @testset "Single-threaded vanilla" begin + chains = sample(StableRNG(seed), gdemo_d(), Prior(), N) + @test chains isa MCMCChains.Chains + @test size(chains) == (N, 3, 1) + @test mean(chains, :s) ≈ 3 atol = 0.11 + @test mean(chains, :m) ≈ 0 atol = 0.1 + end + + @testset "Multi-threaded" begin + chains = sample(StableRNG(seed), gdemo_d(), Prior(), MCMCThreads(), N, 4) + @test chains isa MCMCChains.Chains + @test size(chains) == (N, 3, 4) + @test mean(chains, :s) ≈ 3 atol = 0.11 + @test mean(chains, :m) ≈ 0 atol = 0.1 + end + + @testset "Vector chain_type" begin + chains = sample( + StableRNG(seed), gdemo_d(), Prior(), N; chain_type=Vector{NamedTuple} + ) + @test chains isa Vector{<:NamedTuple} + @test length(chains) == N + @test all(length(x) == 3 for x in chains) + @test all(haskey(x, :lp) for x in chains) + @test mean(x[:s][1] for x in chains) ≈ 3 atol = 0.11 + @test mean(x[:m][1] for x in chains) ≈ 0 atol = 0.1 + end @testset "#2169" begin # Not exactly the same as the issue, but similar. @@ -161,27 +192,31 @@ using Turing end model = issue2169_model() - chain = sample(model, Prior(), 10) + chain = sample(StableRNG(seed), model, Prior(), 10) @test all(mean(chain[:x]) .< 5) end end @testset "chain ordering" begin for alg in (Prior(), Emcee(10, 2.0)) - chain_sorted = sample(gdemo_default, alg, 1; sort_chain=true) + chain_sorted = sample(StableRNG(seed), gdemo_default, alg, 1; sort_chain=true) @test names(MCMCChains.get_sections(chain_sorted, :parameters)) == [:m, :s] - chain_unsorted = sample(gdemo_default, alg, 1; sort_chain=false) + chain_unsorted = sample( + StableRNG(seed), gdemo_default, alg, 1; sort_chain=false + ) @test names(MCMCChains.get_sections(chain_unsorted, :parameters)) == [:s, :m] end end @testset "chain iteration numbers" begin for alg in (Prior(), Emcee(10, 2.0)) - chain = sample(gdemo_default, alg, 10) + chain = sample(StableRNG(seed), gdemo_default, alg, 10) @test range(chain) == 1:10 - chain = sample(gdemo_default, alg, 10; discard_initial=5, thinning=2) + chain = sample( + StableRNG(seed), gdemo_default, alg, 10; discard_initial=5, thinning=2 + ) @test range(chain) == range(6; step=2, length=10) end end @@ -197,8 +232,8 @@ using Turing smc = SMC() pg = PG(10) - res1 = sample(test_assume(), smc, 1000) - res2 = sample(test_assume(), pg, 1000) + res1 = sample(StableRNG(seed), test_assume(), smc, 1_000) + res2 = sample(StableRNG(seed), test_assume(), pg, 1_000) check_numerical(res1, [:y], [0.5]; atol=0.1) check_numerical(res2, [:y], [0.5]; atol=0.1) @@ -207,6 +242,7 @@ using Turing @test all(isone, res1[:x]) @test all(isone, res2[:x]) end + @testset "beta binomial" begin prior = Beta(2, 2) obs = [0, 1, 0, 1, 1, 1, 1, 1, 1, 1] @@ -226,14 +262,15 @@ using Turing pg = PG(10) gibbs = Gibbs(HMC(0.2, 3, :p; adtype=adbackend), PG(10, :x)) - chn_s = sample(testbb(obs), smc, 1000) - chn_p = sample(testbb(obs), pg, 2000) - chn_g = sample(testbb(obs), gibbs, 1500) + chn_s = sample(StableRNG(seed), testbb(obs), smc, 200) + chn_p = sample(StableRNG(seed), testbb(obs), pg, 200) + chn_g = sample(StableRNG(seed), testbb(obs), gibbs, 200) check_numerical(chn_s, [:p], [meanp]; atol=0.05) check_numerical(chn_p, [:x], [meanp]; atol=0.1) check_numerical(chn_g, [:x], [meanp]; atol=0.1) end + @testset "forbid global" begin xs = [1.5 2.0] # xx = 1 @@ -252,8 +289,9 @@ using Turing end gibbs = Gibbs(PG(10, :s), HMC(0.4, 8, :m; adtype=adbackend)) - chain = sample(fggibbstest(xs), gibbs, 2) + chain = sample(StableRNG(seed), fggibbstest(xs), gibbs, 2) end + @testset "new grammar" begin x = Float64[1 2] @@ -267,8 +305,8 @@ using Turing return priors end - chain = sample(gauss(x), PG(10), 10) - chain = sample(gauss(x), SMC(), 10) + chain = sample(StableRNG(seed), gauss(x), PG(10), 10) + chain = sample(StableRNG(seed), gauss(x), SMC(), 10) @model function gauss2(::Type{TV}=Vector{Float64}; x) where {TV} priors = TV(undef, 2) @@ -280,14 +318,18 @@ using Turing return priors end - @test_throws ErrorException chain = sample(gauss2(; x=x), PG(10), 10) - @test_throws ErrorException chain = sample(gauss2(; x=x), SMC(), 10) + @test_throws ErrorException chain = sample( + StableRNG(seed), gauss2(; x=x), PG(10), 10 + ) + @test_throws ErrorException chain = sample( + StableRNG(seed), gauss2(; x=x), SMC(), 10 + ) @test_throws ErrorException chain = sample( - gauss2(DynamicPPL.TypeWrap{Vector{Float64}}(); x=x), PG(10), 10 + StableRNG(seed), gauss2(DynamicPPL.TypeWrap{Vector{Float64}}(); x=x), PG(10), 10 ) @test_throws ErrorException chain = sample( - gauss2(DynamicPPL.TypeWrap{Vector{Float64}}(); x=x), SMC(), 10 + StableRNG(seed), gauss2(DynamicPPL.TypeWrap{Vector{Float64}}(); x=x), SMC(), 10 ) @model function gauss3(x, ::Type{TV}=Vector{Float64}) where {TV} @@ -300,12 +342,18 @@ using Turing return priors end - chain = sample(gauss3(x), PG(10), 10) - chain = sample(gauss3(x), SMC(), 10) + chain = sample(StableRNG(seed), gauss3(x), PG(10), 10) + chain = sample(StableRNG(seed), gauss3(x), SMC(), 10) - chain = sample(gauss3(x, DynamicPPL.TypeWrap{Vector{Real}}()), PG(10), 10) - chain = sample(gauss3(x, DynamicPPL.TypeWrap{Vector{Real}}()), SMC(), 10) + chain = sample( + StableRNG(seed), gauss3(x, DynamicPPL.TypeWrap{Vector{Real}}()), PG(10), 10 + ) + chain = sample( + StableRNG(seed), gauss3(x, DynamicPPL.TypeWrap{Vector{Real}}()), SMC(), 10 + ) end + + # TODO(mhauru) What is this testing? Why does it not use the looped-over adbackend? @testset "new interface" begin obs = [0, 1, 0, 1, 1, 1, 1, 1, 1, 1] @@ -318,13 +366,14 @@ using Turing end sample( + StableRNG(seed), newinterface(obs), HMC(0.75, 3, :p, :x; adtype=Turing.AutoForwardDiff(; chunksize=2)), 100, ) end + @testset "no return" begin - Random.seed!(5) @model function noreturn(x) s ~ InverseGamma(2, 3) m ~ Normal(0, sqrt(s)) @@ -333,11 +382,13 @@ using Turing end end - chain = sample(noreturn([1.5 2.0]), HMC(0.1, 10; adtype=adbackend), 4000) + chain = sample( + StableRNG(seed), noreturn([1.5 2.0]), HMC(0.1, 10; adtype=adbackend), 4000 + ) check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]) end + @testset "observe" begin - Random.seed!(5) @model function test() z ~ Normal(0, 1) x ~ Bernoulli(1) @@ -350,9 +401,9 @@ using Turing smc = SMC() pg = PG(10) - res_is = sample(test(), is, 10000) - res_smc = sample(test(), smc, 1000) - res_pg = sample(test(), pg, 100) + res_is = sample(StableRNG(seed), test(), is, 1_000) + res_smc = sample(StableRNG(seed), test(), smc, 1_000) + res_pg = sample(StableRNG(seed), test(), pg, 100) @test all(isone, res_is[:x]) @test res_is.logevidence ≈ 2 * log(0.5) @@ -362,10 +413,12 @@ using Turing @test all(isone, res_pg[:x]) end + @testset "sample" begin alg = Gibbs(HMC(0.2, 3, :m; adtype=adbackend), PG(10, :s)) - chn = sample(gdemo_default, alg, 1000) + chn = sample(StableRNG(seed), gdemo_default, alg, 10) end + @testset "vectorization @." begin @model function vdemo1(x) s ~ InverseGamma(2, 3) @@ -376,7 +429,7 @@ using Turing alg = HMC(0.01, 5; adtype=adbackend) x = randn(100) - res = sample(vdemo1(x), alg, 250) + res = sample(StableRNG(seed), vdemo1(x), alg, 10) @model function vdemo1b(x) s ~ InverseGamma(2, 3) @@ -385,7 +438,7 @@ using Turing return s, m end - res = sample(vdemo1b(x), alg, 250) + res = sample(StableRNG(seed), vdemo1b(x), alg, 10) @model function vdemo2(x) μ ~ MvNormal(zeros(size(x, 1)), I) @@ -394,7 +447,7 @@ using Turing D = 2 alg = HMC(0.01, 5; adtype=adbackend) - res = sample(vdemo2(randn(D, 100)), alg, 250) + res = sample(StableRNG(seed), vdemo2(randn(D, 100)), alg, 10) # Vector assumptions N = 10 @@ -407,7 +460,9 @@ using Turing end end - t_loop = @elapsed res = sample(vdemo3(), alg, 1000) + # TODO(mhauru) What is the point of the below @elapsed stuff? It prints out some + # timings. Do we actually ever look at them? + t_loop = @elapsed res = sample(StableRNG(seed), vdemo3(), alg, 1000) # Test for vectorize UnivariateDistribution @model function vdemo4() @@ -415,11 +470,11 @@ using Turing @. x ~ Normal(0, 2) end - t_vec = @elapsed res = sample(vdemo4(), alg, 1000) + t_vec = @elapsed res = sample(StableRNG(seed), vdemo4(), alg, 1000) @model vdemo5() = x ~ MvNormal(zeros(N), 4 * I) - t_mv = @elapsed res = sample(vdemo5(), alg, 1000) + t_mv = @elapsed res = sample(StableRNG(seed), vdemo5(), alg, 1000) println("Time for") println(" Loop : ", t_loop) @@ -432,7 +487,7 @@ using Turing @. x ~ InverseGamma(2, 3) end - sample(vdemo6(), alg, 1000) + sample(StableRNG(seed), vdemo6(), alg, 10) N = 3 @model function vdemo7() @@ -440,8 +495,9 @@ using Turing @. x ~ [InverseGamma(2, 3) for i in 1:N] end - sample(vdemo7(), alg, 1000) + sample(StableRNG(seed), vdemo7(), alg, 10) end + @testset "vectorization .~" begin @model function vdemo1(x) s ~ InverseGamma(2, 3) @@ -452,7 +508,7 @@ using Turing alg = HMC(0.01, 5; adtype=adbackend) x = randn(100) - res = sample(vdemo1(x), alg, 250) + res = sample(StableRNG(seed), vdemo1(x), alg, 10) @model function vdemo2(x) μ ~ MvNormal(zeros(size(x, 1)), I) @@ -461,7 +517,7 @@ using Turing D = 2 alg = HMC(0.01, 5; adtype=adbackend) - res = sample(vdemo2(randn(D, 100)), alg, 250) + res = sample(StableRNG(seed), vdemo2(randn(D, 100)), alg, 10) # Vector assumptions N = 10 @@ -474,7 +530,8 @@ using Turing end end - t_loop = @elapsed res = sample(vdemo3(), alg, 1000) + # TODO(mhauru) Same question as above about @elapsed. + t_loop = @elapsed res = sample(StableRNG(seed), vdemo3(), alg, 1_000) # Test for vectorize UnivariateDistribution @model function vdemo4() @@ -482,11 +539,11 @@ using Turing return x .~ Normal(0, 2) end - t_vec = @elapsed res = sample(vdemo4(), alg, 1000) + t_vec = @elapsed res = sample(StableRNG(seed), vdemo4(), alg, 1_000) @model vdemo5() = x ~ MvNormal(zeros(N), 4 * I) - t_mv = @elapsed res = sample(vdemo5(), alg, 1000) + t_mv = @elapsed res = sample(StableRNG(seed), vdemo5(), alg, 1_000) println("Time for") println(" Loop : ", t_loop) @@ -499,15 +556,16 @@ using Turing return x .~ InverseGamma(2, 3) end - sample(vdemo6(), alg, 1000) + sample(StableRNG(seed), vdemo6(), alg, 10) @model function vdemo7() x = Array{Real}(undef, N, N) return x .~ [InverseGamma(2, 3) for i in 1:N] end - sample(vdemo7(), alg, 1000) + sample(StableRNG(seed), vdemo7(), alg, 10) end + @testset "Type parameters" begin N = 10 alg = HMC(0.01, 5; adtype=adbackend) @@ -519,37 +577,37 @@ using Turing end end - t_loop = @elapsed res = sample(vdemo1(), alg, 250) - t_loop = @elapsed res = sample(vdemo1(DynamicPPL.TypeWrap{Float64}()), alg, 250) + # TODO(mhauru) What are we testing below? Just that using a type parameter doesn't + # crash? + sample(StableRNG(seed), vdemo1(), alg, 10) + sample(StableRNG(seed), vdemo1(DynamicPPL.TypeWrap{Float64}()), alg, 10) vdemo1kw(; T) = vdemo1(T) - t_loop = @elapsed res = sample( - vdemo1kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 250 - ) + sample(StableRNG(seed), vdemo1kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 10) @model function vdemo2(::Type{T}=Float64) where {T<:Real} x = Vector{T}(undef, N) @. x ~ Normal(0, 2) end - t_vec = @elapsed res = sample(vdemo2(), alg, 250) - t_vec = @elapsed res = sample(vdemo2(DynamicPPL.TypeWrap{Float64}()), alg, 250) + sample(StableRNG(seed), vdemo2(), alg, 10) + sample(StableRNG(seed), vdemo2(DynamicPPL.TypeWrap{Float64}()), alg, 10) vdemo2kw(; T) = vdemo2(T) - t_vec = @elapsed res = sample( - vdemo2kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 250 - ) + sample(StableRNG(seed), vdemo2kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 10) @model function vdemo3(::Type{TV}=Vector{Float64}) where {TV<:AbstractVector} x = TV(undef, N) @. x ~ InverseGamma(2, 3) end - sample(vdemo3(), alg, 250) - sample(vdemo3(DynamicPPL.TypeWrap{Vector{Float64}}()), alg, 250) + sample(StableRNG(seed), vdemo3(), alg, 10) + sample(StableRNG(seed), vdemo3(DynamicPPL.TypeWrap{Vector{Float64}}()), alg, 10) vdemo3kw(; T) = vdemo3(T) - sample(vdemo3kw(; T=DynamicPPL.TypeWrap{Vector{Float64}}()), alg, 250) + sample( + StableRNG(seed), vdemo3kw(; T=DynamicPPL.TypeWrap{Vector{Float64}}()), alg, 10 + ) end @testset "names_values" begin @@ -565,17 +623,21 @@ using Turing end @test_throws ErrorException sample( - demo_repeated_varname(), NUTS(), 1000; check_model=true + StableRNG(seed), demo_repeated_varname(), NUTS(), 10; check_model=true ) # Make sure that disabling the check also works. - @test (sample(demo_repeated_varname(), Prior(), 10; check_model=false); - true) + @test ( + sample( + StableRNG(seed), demo_repeated_varname(), Prior(), 10; check_model=false + ); + true + ) @model function demo_incorrect_missing(y) return y[1:1] ~ MvNormal(zeros(1), I) end @test_throws ErrorException sample( - demo_incorrect_missing([missing]), NUTS(), 1000; check_model=true + StableRNG(seed), demo_incorrect_missing([missing]), NUTS(), 10; check_model=true ) end end diff --git a/test/mcmc/abstractmcmc.jl b/test/mcmc/abstractmcmc.jl index 9c7c58ce6..6486a8628 100644 --- a/test/mcmc/abstractmcmc.jl +++ b/test/mcmc/abstractmcmc.jl @@ -140,7 +140,7 @@ end DynamicPPL.TestUtils.test_sampler( [model], sampler_ext, - 5_000; + 2_000; rtol=0.2, sampler_name="AdvancedHMC", sample_kwargs..., @@ -187,7 +187,7 @@ end DynamicPPL.TestUtils.test_sampler( [model], sampler_ext, - 10_000; + 2_000; discard_initial=1_000, thinning=10, rtol=0.2, diff --git a/test/mcmc/ess.jl b/test/mcmc/ess.jl index 0a1c23a9e..23f4a11ae 100644 --- a/test/mcmc/ess.jl +++ b/test/mcmc/ess.jl @@ -6,10 +6,13 @@ using Distributions: Normal, sample using DynamicPPL: DynamicPPL using DynamicPPL: Sampler using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset using Turing @testset "ESS" begin + @info "Starting ESS tests" + @model function demo(x) m ~ Normal() return x ~ Normal(m, 0.5) @@ -24,8 +27,7 @@ using Turing demodot_default = demodot(1.0) @testset "ESS constructor" begin - Random.seed!(0) - N = 500 + N = 10 s1 = ESS() s2 = ESS(:m) @@ -43,41 +45,49 @@ using Turing end @testset "ESS inference" begin - Random.seed!(1) - chain = sample(demo_default, ESS(), 5_000) - check_numerical(chain, [:m], [0.8]; atol=0.1) - - Random.seed!(1) - chain = sample(demodot_default, ESS(), 5_000) - check_numerical(chain, ["m[1]", "m[2]"], [0.0, 0.8]; atol=0.1) - - Random.seed!(100) - alg = Gibbs(CSMC(15, :s), ESS(:m)) - chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) - - # MoGtest - Random.seed!(125) - alg = Gibbs(CSMC(15, :z1, :z2, :z3, :z4), ESS(:mu1), ESS(:mu2)) - chain = sample(MoGtest_default, alg, 6000) - check_MoGtest_default(chain; atol=0.1) - - # Different "equivalent" models. - # NOTE: Because `ESS` only supports "single" variables with - # Gaussian priors, we restrict ourselves to this subspace by conditioning - # on the non-Gaussian variables in `DEMO_MODELS`. - models_conditioned = map(DynamicPPL.TestUtils.DEMO_MODELS) do model - # Condition on the non-Gaussian random variables. - model | (s=DynamicPPL.TestUtils.posterior_mean(model).s,) + @info "Starting ESS inference tests" + seed = 23 + + @testset "demo_default" begin + chain = sample(StableRNG(seed), demo_default, ESS(), 5_000) + check_numerical(chain, [:m], [0.8]; atol=0.1) + end + + @testset "demodot_default" begin + chain = sample(StableRNG(seed), demodot_default, ESS(), 5_000) + check_numerical(chain, ["m[1]", "m[2]"], [0.0, 0.8]; atol=0.1) + end + + @testset "gdemo with CSMC + ESS" begin + alg = Gibbs(CSMC(15, :s), ESS(:m)) + chain = sample(StableRNG(seed), gdemo(1.5, 2.0), alg, 2000) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) + end + + @testset "MoGtest_default with CSMC + ESS" begin + alg = Gibbs(CSMC(15, :z1, :z2, :z3, :z4), ESS(:mu1), ESS(:mu2)) + chain = sample(StableRNG(seed), MoGtest_default, alg, 2000) + check_MoGtest_default(chain; atol=0.1) end - DynamicPPL.TestUtils.test_sampler( - models_conditioned, - DynamicPPL.Sampler(ESS()), - 10_000; - # Filter out the varnames we've conditioned on. - varnames_filter=vn -> DynamicPPL.getsym(vn) != :s, - ) + @testset "TestModels" begin + # Different "equivalent" models. + # NOTE: Because `ESS` only supports "single" variables with + # Gaussian priors, we restrict ourselves to this subspace by conditioning + # on the non-Gaussian variables in `DEMO_MODELS`. + models_conditioned = map(DynamicPPL.TestUtils.DEMO_MODELS) do model + # Condition on the non-Gaussian random variables. + model | (s=DynamicPPL.TestUtils.posterior_mean(model).s,) + end + + DynamicPPL.TestUtils.test_sampler( + models_conditioned, + DynamicPPL.Sampler(ESS()), + 2000; + # Filter out the varnames we've conditioned on. + varnames_filter=vn -> DynamicPPL.getsym(vn) != :s, + ) + end end end diff --git a/test/mcmc/hmc.jl b/test/mcmc/hmc.jl index 27c928896..7d3f3a380 100644 --- a/test/mcmc/hmc.jl +++ b/test/mcmc/hmc.jl @@ -19,8 +19,9 @@ using Test: @test, @test_logs, @testset, @test_throws using Turing @testset "Testing hmc.jl with $adbackend" for adbackend in ADUtils.adbackends - # Set a seed - rng = StableRNG(123) + @info "Starting HMC tests with $adbackend" + seed = 123 + @testset "constrained bounded" begin obs = [0, 1, 0, 1, 1, 1, 1, 1, 1, 1] @@ -33,14 +34,15 @@ using Turing end chain = sample( - rng, + StableRNG(seed), constrained_test(obs), HMC(1.5, 3; adtype=adbackend),# using a large step size (1.5) - 1000, + 1_000, ) check_numerical(chain, [:p], [10 / 14]; atol=0.1) end + @testset "constrained simplex" begin obs12 = [1, 2, 1, 2, 2, 2, 2, 2, 2, 2] @@ -54,31 +56,37 @@ using Turing end chain = sample( - rng, constrained_simplex_test(obs12), HMC(0.75, 2; adtype=adbackend), 1000 + StableRNG(seed), + constrained_simplex_test(obs12), + HMC(0.75, 2; adtype=adbackend), + 1000, ) check_numerical(chain, ["ps[1]", "ps[2]"], [5 / 16, 11 / 16]; atol=0.015) end + @testset "hmc reverse diff" begin alg = HMC(0.1, 10; adtype=adbackend) - res = sample(rng, gdemo_default, alg, 4000) + res = sample(StableRNG(seed), gdemo_default, alg, 4_000) check_gdemo(res; rtol=0.1) end - @testset "matrix support" begin - @model function hmcmatrixsup() - return v ~ Wishart(7, [1 0.5; 0.5 1]) - end + # Test the sampling of a matrix-value distribution. + @testset "matrix support" begin + dist = Wishart(7, [1 0.5; 0.5 1]) + @model hmcmatrixsup() = v ~ dist model_f = hmcmatrixsup() n_samples = 1_000 - vs = map(1:3) do _ - chain = sample(rng, model_f, HMC(0.15, 7; adtype=adbackend), n_samples) - r = reshape(Array(group(chain, :v)), n_samples, 2, 2) - reshape(mean(r; dims=1), 2, 2) - end - @test maximum(abs, mean(vs) - (7 * [1 0.5; 0.5 1])) <= 0.5 + chain = sample(StableRNG(24), model_f, HMC(0.15, 7; adtype=adbackend), n_samples) + # Reshape the chain into an array of 2x2 matrices, one per sample. Then compute + # the average of the samples, as a matrix + r = reshape(Array(chain), n_samples, 2, 2) + r_mean = dropdims(mean(r; dims=1); dims=1) + + @test isapprox(r_mean, mean(dist); atol=0.2) end + @testset "multivariate support" begin # Define NN flow function nn(x, b1, w11, w12, w13, bo, wo) @@ -124,58 +132,48 @@ using Turing end # Sampling - chain = sample(rng, bnn(ts), HMC(0.1, 5; adtype=adbackend), 10) + chain = sample(StableRNG(seed), bnn(ts), HMC(0.1, 5; adtype=adbackend), 10) end @testset "hmcda inference" begin alg1 = HMCDA(500, 0.8, 0.015; adtype=adbackend) - # alg2 = Gibbs(HMCDA(200, 0.8, 0.35, :m; adtype=adbackend), HMC(0.25, 3, :s; adtype=adbackend)) - - # alg3 = Gibbs(HMC(0.25, 3, :m; adtype=adbackend), PG(30, 3, :s)) - # alg3 = PG(50, 2000) - - res1 = sample(rng, gdemo_default, alg1, 3000) + res1 = sample(StableRNG(seed), gdemo_default, alg1, 3_000) check_gdemo(res1) - - # res2 = sample(gdemo([1.5, 2.0]), alg2) - # - # @test mean(res2[:s]) ≈ 49/24 atol=0.2 - # @test mean(res2[:m]) ≈ 7/6 atol=0.2 end + # TODO(mhauru) The below one is a) slow, b) flaky, in that changing the seed can + # easily make it fail, despite many more samples than taken by most other tests. Hence + # explicitly specifying the seeds here. @testset "hmcda+gibbs inference" begin - rng = StableRNG(123) - Random.seed!(12345) # particle samplers do not support user-provided `rng` yet - alg3 = Gibbs(PG(20, :s), HMCDA(500, 0.8, 0.25, :m; init_ϵ=0.05, adtype=adbackend)) - - res3 = sample(rng, gdemo_default, alg3, 3000; discard_initial=1000) - check_gdemo(res3) + Random.seed!(12345) + alg = Gibbs(PG(20, :s), HMCDA(500, 0.8, 0.25, :m; init_ϵ=0.05, adtype=adbackend)) + res = sample(StableRNG(123), gdemo_default, alg, 3000; discard_initial=1000) + check_gdemo(res) end @testset "hmcda constructor" begin alg = HMCDA(0.8, 0.75; adtype=adbackend) - println(alg) sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "HMCDA" alg = HMCDA(200, 0.8, 0.75; adtype=adbackend) - println(alg) sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "HMCDA" alg = HMCDA(200, 0.8, 0.75, :s; adtype=adbackend) - println(alg) sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "HMCDA" @test isa(alg, HMCDA) @test isa(sampler, Sampler{<:Turing.Hamiltonian}) end + @testset "nuts inference" begin alg = NUTS(1000, 0.8; adtype=adbackend) - res = sample(rng, gdemo_default, alg, 6000) + res = sample(StableRNG(seed), gdemo_default, alg, 500) check_gdemo(res) end + @testset "nuts constructor" begin alg = NUTS(200, 0.65; adtype=adbackend) sampler = Sampler(alg, gdemo_default) @@ -189,22 +187,24 @@ using Turing sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "NUTS" end + @testset "check discard" begin alg = NUTS(100, 0.8; adtype=adbackend) - c1 = sample(rng, gdemo_default, alg, 500; discard_adapt=true) - c2 = sample(rng, gdemo_default, alg, 500; discard_adapt=false) + c1 = sample(StableRNG(seed), gdemo_default, alg, 500; discard_adapt=true) + c2 = sample(StableRNG(seed), gdemo_default, alg, 500; discard_adapt=false) @test size(c1, 1) == 500 @test size(c2, 1) == 500 end + @testset "AHMC resize" begin alg1 = Gibbs(PG(10, :m), NUTS(100, 0.65, :s; adtype=adbackend)) alg2 = Gibbs(PG(10, :m), HMC(0.1, 3, :s; adtype=adbackend)) alg3 = Gibbs(PG(10, :m), HMCDA(100, 0.65, 0.3, :s; adtype=adbackend)) - @test sample(rng, gdemo_default, alg1, 300) isa Chains - @test sample(rng, gdemo_default, alg2, 300) isa Chains - @test sample(rng, gdemo_default, alg3, 300) isa Chains + @test sample(StableRNG(seed), gdemo_default, alg1, 10) isa Chains + @test sample(StableRNG(seed), gdemo_default, alg2, 10) isa Chains + @test sample(StableRNG(seed), gdemo_default, alg3, 10) isa Chains end @testset "Regression tests" begin @@ -213,28 +213,28 @@ using Turing m = Matrix{T}(undef, 2, 3) return m .~ MvNormal(zeros(2), I) end - @test sample(rng, mwe1(), HMC(0.2, 4; adtype=adbackend), 1_000) isa Chains + @test sample(StableRNG(seed), mwe1(), HMC(0.2, 4; adtype=adbackend), 100) isa Chains @model function mwe2(::Type{T}=Matrix{Float64}) where {T} m = T(undef, 2, 3) return m .~ MvNormal(zeros(2), I) end - @test sample(rng, mwe2(), HMC(0.2, 4; adtype=adbackend), 1_000) isa Chains + @test sample(StableRNG(seed), mwe2(), HMC(0.2, 4; adtype=adbackend), 100) isa Chains # https://github.com/TuringLang/Turing.jl/issues/1308 @model function mwe3(::Type{T}=Array{Float64}) where {T} m = T(undef, 2, 3) return m .~ MvNormal(zeros(2), I) end - @test sample(rng, mwe3(), HMC(0.2, 4; adtype=adbackend), 1_000) isa Chains + @test sample(StableRNG(seed), mwe3(), HMC(0.2, 4; adtype=adbackend), 100) isa Chains end # issue #1923 @testset "reproducibility" begin alg = NUTS(1000, 0.8; adtype=adbackend) - res1 = sample(StableRNG(123), gdemo_default, alg, 1000) - res2 = sample(StableRNG(123), gdemo_default, alg, 1000) - res3 = sample(StableRNG(123), gdemo_default, alg, 1000) + res1 = sample(StableRNG(seed), gdemo_default, alg, 10) + res2 = sample(StableRNG(seed), gdemo_default, alg, 10) + res3 = sample(StableRNG(seed), gdemo_default, alg, 10) @test Array(res1) == Array(res2) == Array(res3) end @@ -249,7 +249,7 @@ using Turing gdemo_default_prior = DynamicPPL.contextualize( demo_hmc_prior(), DynamicPPL.PriorContext() ) - chain = sample(gdemo_default_prior, alg, 10_000; initial_params=[3.0, 0.0]) + chain = sample(gdemo_default_prior, alg, 500; initial_params=[3.0, 0.0]) check_numerical( chain, [:s, :m], [mean(truncated(Normal(3, 1); lower=0)), 0]; atol=0.2 ) @@ -288,7 +288,7 @@ using Turing return xs[2] ~ Dirichlet(ones(5)) end model = vector_of_dirichlet() - chain = sample(model, NUTS(), 1000) + chain = sample(model, NUTS(), 1_000) @test mean(Array(chain)) ≈ 0.2 end @@ -335,7 +335,7 @@ using Turing gdemo_default, ADTypeCheckContext(adbackend, gdemo_default.context) ) # These will error if the adbackend being used is not the one set. - sample(rng, m, alg, 10) + sample(StableRNG(seed), m, alg, 10) end end diff --git a/test/mcmc/is.jl b/test/mcmc/is.jl index 47b20cc73..44fbe9201 100644 --- a/test/mcmc/is.jl +++ b/test/mcmc/is.jl @@ -68,7 +68,7 @@ using Turing return x end - chains = sample(test(), IS(), 10000) + chains = sample(test(), IS(), 1_000) @test all(isone, chains[:x]) @test chains.logevidence ≈ -2 * log(2) diff --git a/test/mcmc/mh.jl b/test/mcmc/mh.jl index 8813834ed..d71a5fbc6 100644 --- a/test/mcmc/mh.jl +++ b/test/mcmc/mh.jl @@ -18,9 +18,11 @@ using ..NumericalTests: check_MoGtest_default, check_gdemo, check_numerical GKernel(var) = (x) -> Normal(x, sqrt.(var)) @testset "mh.jl" begin + @info "Starting MH tests" + seed = 23 + @testset "mh constructor" begin - Random.seed!(10) - N = 500 + N = 10 s1 = MH((:s, InverseGamma(2, 3)), (:m, GKernel(3.0))) s2 = MH(:s, :m) s3 = MH() @@ -43,42 +45,51 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) # s6 = externalsampler(MH(gdemo_default, proposal_type=AdvancedMH.StaticProposal)) # c6 = sample(gdemo_default, s6, N) end + @testset "mh inference" begin # Set the initial parameters, because if we get unlucky with the initial state, # these chains are too short to converge to reasonable numbers. - discard_initial = 1000 + discard_initial = 1_000 initial_params = [1.0, 1.0] - Random.seed!(125) - alg = MH() - chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) - check_gdemo(chain; atol=0.1) - - Random.seed!(125) - # MH with Gaussian proposal - alg = MH((:s, InverseGamma(2, 3)), (:m, GKernel(1.0))) - chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) - check_gdemo(chain; atol=0.1) - - Random.seed!(125) - # MH within Gibbs - alg = Gibbs(MH(:m), MH(:s)) - chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) - check_gdemo(chain; atol=0.1) - - Random.seed!(125) - # MoGtest - gibbs = Gibbs( - CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1, GKernel(1)), (:mu2, GKernel(1))) - ) - chain = sample( - MoGtest_default, - gibbs, - 500; - discard_initial=100, - initial_params=[1.0, 1.0, 0.0, 0.0, 1.0, 4.0], - ) - check_MoGtest_default(chain; atol=0.2) + @testset "gdemo_default" begin + alg = MH() + chain = sample( + StableRNG(seed), gdemo_default, alg, 10_000; discard_initial, initial_params + ) + check_gdemo(chain; atol=0.1) + end + + @testset "gdemo_default with custom proposals" begin + alg = MH((:s, InverseGamma(2, 3)), (:m, GKernel(1.0))) + chain = sample( + StableRNG(seed), gdemo_default, alg, 10_000; discard_initial, initial_params + ) + check_gdemo(chain; atol=0.1) + end + + @testset "gdemo_default with MH-within-Gibbs" begin + alg = Gibbs(MH(:m), MH(:s)) + chain = sample( + StableRNG(seed), gdemo_default, alg, 10_000; discard_initial, initial_params + ) + check_gdemo(chain; atol=0.1) + end + + @testset "MoGtest_default with Gibbs" begin + gibbs = Gibbs( + CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1, GKernel(1)), (:mu2, GKernel(1))) + ) + chain = sample( + StableRNG(seed), + MoGtest_default, + gibbs, + 500; + discard_initial=100, + initial_params=[1.0, 1.0, 0.0, 0.0, 1.0, 4.0], + ) + check_MoGtest_default(chain; atol=0.2) + end end # Test MH shape passing. @@ -115,14 +126,12 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) @test vt[:m] isa Vector{Float64} && length(vt[:m]) == 2 @test vt[:s] isa Float64 - chain = sample(model, MH(), 100) + chain = sample(model, MH(), 10) @test chain isa MCMCChains.Chains end @testset "proposal matrix" begin - Random.seed!(100) - mat = [1.0 -0.05; -0.05 1.0] prop1 = mat # Matrix only constructor @@ -136,8 +145,8 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) @test spl1.proposals.proposal.Σ.mat == spl2.proposals.proposal.Σ.mat # Test inference. - chain1 = sample(gdemo_default, spl1, 10000) - chain2 = sample(gdemo_default, spl2, 10000) + chain1 = sample(StableRNG(seed), gdemo_default, spl1, 2_000) + chain2 = sample(StableRNG(seed), gdemo_default, spl2, 2_000) check_gdemo(chain1) check_gdemo(chain2) @@ -166,23 +175,16 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) # with small-valued VC matrix to check if we only see very small steps vc_μ = convert(Array, 1e-4 * I(2)) vc_σ = convert(Array, 1e-4 * I(2)) + alg_small = Gibbs(MH((:μ, vc_μ)), MH((:σ, vc_σ))) + alg_big = MH() - alg = Gibbs(MH((:μ, vc_μ)), MH((:σ, vc_σ))) - - chn = sample( - mod, - alg, - 3_000, # draws - ) - - chn2 = sample(mod, MH(), 3_000) + chn_small = sample(StableRNG(seed), mod, alg_small, 1_000) + chn_big = sample(StableRNG(seed), mod, alg_big, 1_000) # Test that the small variance version is actually smaller. - v1 = var(diff(Array(chn["μ[1]"]); dims=1)) - v2 = var(diff(Array(chn2["μ[1]"]); dims=1)) - - # FIXME: Do this properly. It sometimes fails. - # @test v1 < v2 + variance_small = var(diff(Array(chn_small["μ[1]"]); dims=1)) + variance_big = var(diff(Array(chn_big["μ[1]"]); dims=1)) + @test variance_small < variance_big / 1_000.0 end @testset "vector of multivariate distributions" begin @@ -193,14 +195,12 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) end end - Random.seed!(100) - chain = sample(test(1), MH(), 5_000) + chain = sample(StableRNG(seed), test(1), MH(), 5_000) for i in 1:5 @test mean(chain, "T[1][$i]") ≈ 0.2 atol = 0.01 end - Random.seed!(100) - chain = sample(test(10), MH(), 5_000) + chain = sample(StableRNG(seed), test(10), MH(), 5_000) for j in 1:10, i in 1:5 @test mean(chain, "T[$j][$i]") ≈ 0.2 atol = 0.01 end @@ -209,8 +209,7 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) @testset "LKJCholesky" begin for uplo in ['L', 'U'] @model f() = x ~ LKJCholesky(2, 1, uplo) - Random.seed!(100) - chain = sample(f(), MH(), 5_000) + chain = sample(StableRNG(seed), f(), MH(), 5_000) indices = [(1, 1), (2, 1), (2, 2)] values = [1, 0, 0.785] for ((i, j), v) in zip(indices, values) @@ -264,9 +263,6 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) end @testset "prior" begin - # HACK: MH can be so bad for this prior model for some reason that it's difficult to - # find a non-trivial `atol` where the tests will pass for all seeds. Hence we fix it :/ - rng = StableRNG(10) alg = MH() gdemo_default_prior = DynamicPPL.contextualize( gdemo_default, DynamicPPL.PriorContext() @@ -274,7 +270,12 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) burnin = 10_000 n = 10_000 chain = sample( - rng, gdemo_default_prior, alg, n; discard_initial=burnin, thinning=10 + StableRNG(seed), + gdemo_default_prior, + alg, + n; + discard_initial=burnin, + thinning=10, ) check_numerical(chain, [:s, :m], [mean(InverseGamma(2, 3)), 0]; atol=0.3) end @@ -282,6 +283,7 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) @testset "`filldist` proposal (issue #2180)" begin @model demo_filldist_issue2180() = x ~ MvNormal(zeros(3), I) chain = sample( + StableRNG(seed), demo_filldist_issue2180(), MH(AdvancedMH.RandomWalkProposal(filldist(Normal(), 3))), 10_000, diff --git a/test/mcmc/particle_mcmc.jl b/test/mcmc/particle_mcmc.jl index 2e3744fef..3378fea32 100644 --- a/test/mcmc/particle_mcmc.jl +++ b/test/mcmc/particle_mcmc.jl @@ -192,62 +192,4 @@ end end end -# @testset "pmmh.jl" begin -# @testset "pmmh constructor" begin -# N = 2000 -# s1 = PMMH(N, SMC(10, :s), MH(1,(:m, s -> Normal(s, sqrt(1))))) -# s2 = PMMH(N, SMC(10, :s), MH(1, :m)) -# s3 = PIMH(N, SMC()) -# -# c1 = sample(gdemo_default, s1) -# c2 = sample(gdemo_default, s2) -# c3 = sample(gdemo_default, s3) -# end -# @numerical_testset "pmmh inference" begin -# alg = PMMH(2000, SMC(20, :m), MH(1, (:s, GKernel(1)))) -# chain = sample(gdemo_default, alg) -# check_gdemo(chain, atol = 0.1) -# -# # PMMH with prior as proposal -# alg = PMMH(2000, SMC(20, :m), MH(1, :s)) -# chain = sample(gdemo_default, alg) -# check_gdemo(chain, atol = 0.1) -# -# # PIMH -# alg = PIMH(2000, SMC()) -# chain = sample(gdemo_default, alg) -# check_gdemo(chain) -# -# # MoGtest -# pmmh = PMMH(2000, -# SMC(10, :z1, :z2, :z3, :z4), -# MH(1, :mu1, :mu2)) -# chain = sample(MoGtest_default, pmmh) -# -# check_MoGtest_default(chain, atol = 0.1) -# end -# end - -# @testset "ipmcmc.jl" begin -# @testset "ipmcmc constructor" begin -# Random.seed!(125) -# -# N = 50 -# s1 = IPMCMC(10, N, 4, 2) -# s2 = IPMCMC(10, N, 4) -# -# c1 = sample(gdemo_default, s1) -# c2 = sample(gdemo_default, s2) -# end -# @numerical_testset "ipmcmc inference" begin -# alg = IPMCMC(30, 500, 4) -# chain = sample(gdemo_default, alg) -# check_gdemo(chain) -# -# alg2 = IPMCMC(15, 100, 10) -# chain2 = sample(MoGtest_default, alg2) -# check_MoGtest_default(chain2, atol = 0.2) -# end -# end - end diff --git a/test/runtests.jl b/test/runtests.jl index 530219c83..3bc08967d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -30,7 +30,7 @@ macro timeit_include(path::AbstractString) end end -@testset "Turing" begin +@testset "Turing" verbose = true begin @testset "Test utils" begin @timeit_include("test_utils/test_utils.jl") end @@ -39,12 +39,12 @@ end @timeit_include("Aqua.jl") end - @testset "essential" begin + @testset "essential" verbose = true begin @timeit_include("essential/ad.jl") @timeit_include("essential/container.jl") end - @testset "samplers (without AD)" begin + @testset "samplers (without AD)" verbose = true begin @timeit_include("mcmc/particle_mcmc.jl") @timeit_include("mcmc/emcee.jl") @timeit_include("mcmc/ess.jl") @@ -52,7 +52,7 @@ end end @timeit TIMEROUTPUT "inference" begin - @testset "inference with samplers" begin + @testset "inference with samplers" verbose = true begin @timeit_include("mcmc/gibbs.jl") @timeit_include("mcmc/gibbs_conditional.jl") @timeit_include("mcmc/hmc.jl") @@ -67,7 +67,7 @@ end @timeit_include("variational/advi.jl") end - @testset "mode estimation" begin + @testset "mode estimation" verbose = true begin @timeit_include("optimisation/Optimisation.jl") @timeit_include("ext/OptimInterface.jl") end @@ -81,7 +81,7 @@ end @timeit_include("variational/optimisers.jl") end - @testset "stdlib" begin + @testset "stdlib" verbose = true begin @timeit_include("stdlib/distributions.jl") @timeit_include("stdlib/RandomMeasures.jl") end