From fca2fb0a195a64fb72492d1f5917aff9cff58b38 Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Mon, 31 Jan 2022 16:00:07 +0000 Subject: [PATCH] Libtask Integration (#1766) * New Turing-libtask integration (#1757) * Update Project.toml * Update Project.toml * Update Project.toml * trace down into functions calling produce * trace into functions in testcases * update to the latest version * run tests against new libtask * temporarily disable 1.3 for testing * Update AdvancedSMC.jl * Update AdvancedSMC.jl * Update AdvancedSMC.jl * Update AdvancedSMC.jl * Update AdvancedSMC.jl * copy Trace on tape * Implement simplified evaluator for TracedModel * Remove some unnecessary trace functions. * Minor bugfix in TracedModel evaluator. * Update .github/workflows/TuringCI.yml * Minor bugfix in TracedModel evaluator. * Update container.jl * Update Project.toml * Commented out tests related to control flow. TuringLang/Libtask.jl/issues/96 * Commented out tests related to control flow. TuringLang/Libtask.jl/issues/96 * Update Project.toml * Update src/essential/container.jl * Update AdvancedSMC.jl Co-authored-by: KDr2 * CompatHelper: add new compat entry for Libtask at version 0.6 for package test, (keep existing compat) (#1765) Co-authored-by: CompatHelper Julia * Fix for HMCs `dot_assume` (#1758) * fixed dot_assume for hmc * copy-pasted tests from dynamicppl integration tests * inspecting what in the world is going on with tests * trying again * skip failing test for TrackerAD * bump patch version * fixed typo in tests * Rename `Turing.Core` to `Turing.Essential` * Deprecate Turing.Core Co-authored-by: Tor Erlend Fjelde * fixed a numerical test * version bump Co-authored-by: David Widmann Co-authored-by: David Widmann * Minor fixes. * Minor fixes. * Minor fix. * Update Julia version in CI * Merge branch 'libtask-integration' of github.com:TuringLang/Turing.jl into libtask-integration * Update Inference.jl * Minor fixes. * Add back `imm` test. * Minor tweaks to make single distribution tests more robust. * Update Project.toml Co-authored-by: David Widmann * Apply suggestions from code review Co-authored-by: David Widmann * Update Project.toml * Switch to StableRNGs for broken tests. * Apply suggestions from code review Co-authored-by: David Widmann * Minor tweaks. * Use StableRNG for GMM test. * Update Project.toml Co-authored-by: KDr2 Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: CompatHelper Julia Co-authored-by: David Widmann Co-authored-by: David Widmann --- .github/workflows/Documentation.yml | 2 +- .github/workflows/DynamicHMC.yml | 2 +- .github/workflows/Numerical.yml | 2 +- .github/workflows/StanCI.yml | 2 +- .github/workflows/TuringCI.yml | 2 +- Project.toml | 8 ++--- src/essential/container.jl | 37 +++++++++++++++++---- src/inference/AdvancedSMC.jl | 14 ++++++-- test/Project.toml | 4 ++- test/contrib/inference/sghmc.jl | 18 +++++----- test/inference/gibbs_conditional.jl | 21 ++++++------ test/inference/hmc.jl | 51 ++++++++++++++++------------- test/inference/mh.jl | 7 ++-- test/runtests.jl | 3 +- test/stdlib/RandomMeasures.jl | 18 +++++----- test/stdlib/distributions.jl | 13 ++++---- 16 files changed, 126 insertions(+), 78 deletions(-) diff --git a/.github/workflows/Documentation.yml b/.github/workflows/Documentation.yml index 4c01e75f8..212e821b0 100644 --- a/.github/workflows/Documentation.yml +++ b/.github/workflows/Documentation.yml @@ -15,7 +15,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v1 with: - version: '1.6' + version: '1' - name: Set up Ruby 2.6 uses: actions/setup-ruby@v1 with: diff --git a/.github/workflows/DynamicHMC.yml b/.github/workflows/DynamicHMC.yml index 48296e95c..88ea0aae0 100644 --- a/.github/workflows/DynamicHMC.yml +++ b/.github/workflows/DynamicHMC.yml @@ -12,8 +12,8 @@ jobs: strategy: matrix: version: - - '1.3' - '1.6' + - '1' os: - ubuntu-latest arch: diff --git a/.github/workflows/Numerical.yml b/.github/workflows/Numerical.yml index 06bb30b30..25ecf7e66 100644 --- a/.github/workflows/Numerical.yml +++ b/.github/workflows/Numerical.yml @@ -12,8 +12,8 @@ jobs: strategy: matrix: version: - - '1.3' - '1.6' + - '1' os: - ubuntu-latest arch: diff --git a/.github/workflows/StanCI.yml b/.github/workflows/StanCI.yml index 5a4c2da46..78cfb817a 100644 --- a/.github/workflows/StanCI.yml +++ b/.github/workflows/StanCI.yml @@ -12,8 +12,8 @@ jobs: strategy: matrix: version: - - '1.3' - '1.6' + - '1' os: - ubuntu-latest arch: diff --git a/.github/workflows/TuringCI.yml b/.github/workflows/TuringCI.yml index 56ca280cd..b54c35084 100644 --- a/.github/workflows/TuringCI.yml +++ b/.github/workflows/TuringCI.yml @@ -13,8 +13,8 @@ jobs: strategy: matrix: version: - - '1.3' - '1.6' + - '1' os: - ubuntu-latest arch: diff --git a/Project.toml b/Project.toml index 589169486..475ff250b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Turing" uuid = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" -version = "0.19.5" +version = "0.20" [deps] AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001" @@ -37,7 +37,7 @@ ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" AbstractMCMC = "3.2" AdvancedHMC = "0.3.0" AdvancedMH = "0.6" -AdvancedPS = "0.2.4" +AdvancedPS = "0.3.3" AdvancedVI = "0.1" BangBang = "0.3" Bijectors = "0.8, 0.9, 0.10" @@ -48,7 +48,7 @@ DocStringExtensions = "0.8" DynamicPPL = "0.17.2" EllipticalSliceSampling = "0.4" ForwardDiff = "0.10.3" -Libtask = "0.4, 0.5.3" +Libtask = "0.6.6" MCMCChains = "5" NamedArrays = "0.9" Reexport = "0.2, 1" @@ -59,4 +59,4 @@ StatsBase = "0.32, 0.33" StatsFuns = "0.8, 0.9" Tracker = "0.2.3" ZygoteRules = "0.2" -julia = "1.3, 1.4, 1.5, 1.6" +julia = "1.6" diff --git a/src/essential/container.jl b/src/essential/container.jl index 1c135e9f2..74fe845fe 100644 --- a/src/essential/container.jl +++ b/src/essential/container.jl @@ -1,19 +1,43 @@ -struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model} +struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model,E<:Tuple} model::M sampler::S varinfo::V + evaluator::E end -# needed? -function TracedModel{SampleFromPrior}( +function TracedModel( model::Model, sampler::AbstractSampler, varinfo::AbstractVarInfo, -) - return TracedModel(model, SampleFromPrior(), varinfo) +) + # evaluate!!(m.model, varinfo, SamplingContext(Random.AbstractRNG, m.sampler, DefaultContext())) + context = SamplingContext(DynamicPPL.Random.GLOBAL_RNG, sampler, DefaultContext()) + evaluator = _get_evaluator(model, varinfo, context) + return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}(model, sampler, varinfo, evaluator) end -(f::TracedModel)() = f.model(f.varinfo, f.sampler) +# Smiliar to `evaluate!!` except that we return the evaluator signature without excutation. +# TODO: maybe move to DynamicPPL +@generated function _get_evaluator( + model::Model{_F,argnames}, varinfo, context +) where {_F,argnames} + unwrap_args = [ + :($DynamicPPL.matchingvalue(context_new, varinfo, model.args.$var)) for var in argnames + ] + # We want to give `context` precedence over `model.context` while also + # preserving the leaf context of `context`. We can do this by + # 1. Set the leaf context of `model.context` to `leafcontext(context)`. + # 2. Set leaf context of `context` to the context resulting from (1). + # The result is: + # `context` -> `childcontext(context)` -> ... -> `model.context` + # -> `childcontext(model.context)` -> ... -> `leafcontext(context)` + return quote + context_new = DynamicPPL.setleafcontext( + context, DynamicPPL.setleafcontext(model.context, DynamicPPL.leafcontext(context)) + ) + (model.f, model, DynamicPPL.resetlogp!!(varinfo), context_new, $(unwrap_args...)) + end +end function Base.copy(trace::AdvancedPS.Trace{<:TracedModel}) f = trace.f @@ -46,4 +70,3 @@ function AdvancedPS.reset_logprob!(f::TracedModel) DynamicPPL.resetlogp!!(f.varinfo) return end - diff --git a/src/inference/AdvancedSMC.jl b/src/inference/AdvancedSMC.jl index 3296d615c..ba8bef99b 100644 --- a/src/inference/AdvancedSMC.jl +++ b/src/inference/AdvancedSMC.jl @@ -322,9 +322,19 @@ function DynamicPPL.assume( spl::Sampler{<:Union{PG,SMC}}, dist::Distribution, vn::VarName, - ::Any + __vi__::AbstractVarInfo ) - vi = AdvancedPS.current_trace().f.varinfo + local vi + try + vi = AdvancedPS.current_trace().f.varinfo + catch e + # NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`. + if e == KeyError(:__trace) || current_task().storage isa Nothing + vi = __vi__ + else + rethrow(e) + end + end if inspace(vn, spl) if ~haskey(vi, vn) r = rand(rng, dist) diff --git a/test/Project.toml b/test/Project.toml index 1d4368968..1c1ffc16d 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -27,11 +27,12 @@ StatsFuns = "4c63d2b9-4356-54db-8cca-17b64c39e42c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" [compat] AbstractMCMC = "3.2.1" AdvancedMH = "0.6" -AdvancedPS = "0.2" +AdvancedPS = "0.3" AdvancedVI = "0.1" Clustering = "0.14" CmdStan = "6.0.8" @@ -53,4 +54,5 @@ StatsBase = "0.33" StatsFuns = "0.9.5" Tracker = "0.2.11" Zygote = "0.5.4, 0.6" +StableRNGs = "1" julia = "1.3" diff --git a/test/contrib/inference/sghmc.jl b/test/contrib/inference/sghmc.jl index 054a0ecd7..959f40008 100644 --- a/test/contrib/inference/sghmc.jl +++ b/test/contrib/inference/sghmc.jl @@ -1,11 +1,4 @@ @testset "sghmc.jl" begin - @numerical_testset "sghmc inference" begin - Random.seed!(125) - - alg = SGHMC(; learning_rate=0.02, momentum_decay=0.5) - chain = sample(gdemo_default, alg, 10_000) - check_gdemo(chain, atol = 0.1) - end @turing_testset "sghmc constructor" begin alg = SGHMC(; learning_rate=0.01, momentum_decay=0.1) @test alg isa SGHMC @@ -22,6 +15,13 @@ sampler = Turing.Sampler(alg) @test sampler isa Turing.Sampler{<:SGHMC} end + @numerical_testset "sghmc inference" begin + rng = StableRNG(123) + + alg = SGHMC(; learning_rate=0.02, momentum_decay=0.5) + chain = sample(rng, gdemo_default, alg, 10_000) + check_gdemo(chain, atol = 0.1) + end end @testset "sgld.jl" begin @@ -42,9 +42,9 @@ end @test sampler isa Turing.Sampler{<:SGLD} end @numerical_testset "sgld inference" begin - Random.seed!(125) + rng = StableRNG(1) - chain = sample(gdemo_default, SGLD(; stepsize = PolynomialStepsize(0.5)), 10_000) + chain = sample(rng, gdemo_default, SGLD(; stepsize = PolynomialStepsize(0.5)), 20_000) check_gdemo(chain, atol = 0.2) # Weight samples by step sizes (cf section 4.2 in the paper by Welling and Teh) diff --git a/test/inference/gibbs_conditional.jl b/test/inference/gibbs_conditional.jl index a11a7a40d..094c949aa 100644 --- a/test/inference/gibbs_conditional.jl +++ b/test/inference/gibbs_conditional.jl @@ -1,5 +1,5 @@ -@turing_testset "gibbs conditionals" begin - Random.seed!(100) +@turing_testset "gibbs conditionals.jl" begin + Random.seed!(1000); rng = StableRNG(123) @turing_testset "gdemo" begin # We consider the model @@ -40,7 +40,7 @@ GibbsConditional(:m, cond_m), GibbsConditional(:s, _ -> Normal(s_posterior_mean, 0)), ) - chain = sample(gdemo_default, sampler1, 10_000) + chain = sample(rng, gdemo_default, sampler1, 10_000) cond_m_mean = mean(cond_m((s = s_posterior_mean,))) check_numerical(chain, [:m, :s], [cond_m_mean, s_posterior_mean]) @test all(==(s_posterior_mean), chain[:s][2:end]) @@ -50,18 +50,19 @@ GibbsConditional(:m, _ -> Normal(m_posterior_mean, 0)), GibbsConditional(:s, cond_s), ) - chain = sample(gdemo_default, sampler2, 10_000) + chain = sample(rng, gdemo_default, sampler2, 10_000) cond_s_mean = mean(cond_s((m = m_posterior_mean,))) check_numerical(chain, [:m, :s], [m_posterior_mean, cond_s_mean]) @test all(==(m_posterior_mean), chain[:m][2:end]) # and one for both using the conditional sampler3 = Gibbs(GibbsConditional(:m, cond_m), GibbsConditional(:s, cond_s)) - chain = sample(gdemo_default, sampler3, 10_000) + chain = sample(rng, gdemo_default, sampler3, 10_000) check_gdemo(chain) end @turing_testset "GMM" begin + Random.seed!(1000); rng = StableRNG(123) # We consider the model # ```math # μₖ ~ Normal(m, σ_μ), k = 1, …, K, @@ -77,9 +78,9 @@ N = 20 # number of observations # We generate data - μ_data = rand(Normal(m, sqrt(σ²_μ)), K) - z_data = rand(Categorical(π), N) - x_data = rand(MvNormal(μ_data[z_data], σ²_x * I)) + μ_data = rand(rng, Normal(m, sqrt(σ²_μ)), K) + z_data = rand(rng, Categorical(π), N) + x_data = rand(rng, MvNormal(μ_data[z_data], σ²_x * I)) @model function mixture(x) μ ~ $(MvNormal(fill(m, K), σ²_μ * I)) @@ -132,14 +133,14 @@ sampler2 = Gibbs(GibbsConditional(:z, cond_z), MH(:μ)) sampler3 = Gibbs(GibbsConditional(:z, cond_z), HMC(0.01, 7, :μ)) for sampler in (sampler1, sampler2, sampler3) - chain = sample(model, sampler, 10_000) + chain = sample(rng, model, sampler, 10_000) μ_hat = estimate(chain, :μ) lμ_hat, uμ_hat = extrema(μ_hat) @test isapprox([lμ_data, uμ_data], [lμ_hat, uμ_hat], atol=0.1) z_hat = estimatez(chain, :z, 1:2) - ari, _, _, _ = randindex(z_data, Int.(z_hat)) + ari, _, _, _ = Clustering.randindex(z_data, Int.(z_hat)) @test isapprox(ari, 1, atol=0.1) end end diff --git a/test/inference/hmc.jl b/test/inference/hmc.jl index a8098bb10..249ef76cb 100644 --- a/test/inference/hmc.jl +++ b/test/inference/hmc.jl @@ -1,8 +1,7 @@ @testset "hmc.jl" begin + # Set a seed + rng = StableRNG(123) @numerical_testset "constrained bounded" begin - # Set a seed - Random.seed!(5) - obs = [0,1,0,1,1,1,1,1,1,1] @model constrained_test(obs) = begin @@ -14,6 +13,7 @@ end chain = sample( + rng, constrained_test(obs), HMC(1.5, 3),# using a large step size (1.5) 1000) @@ -33,6 +33,7 @@ end chain = sample( + rng, constrained_simplex_test(obs12), HMC(0.75, 2), 1000) @@ -40,9 +41,8 @@ check_numerical(chain, ["ps[1]", "ps[2]"], [5/16, 11/16], atol=0.015) end @numerical_testset "hmc reverse diff" begin - Random.seed!(1) alg = HMC(0.1, 10) - res = sample(gdemo_default, alg, 4000) + res = sample(rng, gdemo_default, alg, 4000) check_gdemo(res, rtol=0.1) end @turing_testset "matrix support" begin @@ -53,7 +53,7 @@ model_f = hmcmatrixsup() n_samples = 1_000 vs = map(1:3) do _ - chain = sample(model_f, HMC(0.15, 7), n_samples) + chain = sample(rng, model_f, HMC(0.15, 7), n_samples) r = reshape(Array(group(chain, :v)), n_samples, 2, 2) reshape(mean(r; dims = 1), 2, 2) end @@ -103,27 +103,34 @@ end # Sampling - chain = sample(bnn(ts), HMC(0.1, 5), 10) + chain = sample(rng, bnn(ts), HMC(0.1, 5), 10) end - Random.seed!(123) - @numerical_testset "hmcda inference" begin - alg1 = HMCDA(1000, 0.8, 0.015) + + @numerical_testset "hmcda inference" begin + alg1 = HMCDA(500, 0.8, 0.015) # alg2 = Gibbs(HMCDA(200, 0.8, 0.35, :m), HMC(0.25, 3, :s)) - alg3 = Gibbs(PG(10, :s), HMCDA(200, 0.8, 0.005, :m)) + # alg3 = Gibbs(HMC(0.25, 3, :m), PG(30, 3, :s)) # alg3 = PG(50, 2000) - res1 = sample(gdemo_default, alg1, 3000) + res1 = sample(rng, gdemo_default, alg1, 3000) check_gdemo(res1) # res2 = sample(gdemo([1.5, 2.0]), alg2) # # @test mean(res2[:s]) ≈ 49/24 atol=0.2 # @test mean(res2[:m]) ≈ 7/6 atol=0.2 + end + + @numerical_testset "hmcda+gibbs inference" begin + rng = StableRNG(123) + Random.seed!(12345) # particle samplers do not support user-provided `rng` yet + alg3 = Gibbs(PG(20, :s), HMCDA(500, 0.8, 0.25, init_ϵ = 0.05, :m)) - res3 = sample(gdemo_default, alg3, 1000) + res3 = sample(rng, gdemo_default, alg3, 3000, discard_initial=1000) check_gdemo(res3) end + @turing_testset "hmcda constructor" begin alg = HMCDA(0.8, 0.75) println(alg) @@ -145,7 +152,7 @@ end @numerical_testset "nuts inference" begin alg = NUTS(1000, 0.8) - res = sample(gdemo_default, alg, 6000) + res = sample(rng, gdemo_default, alg, 6000) check_gdemo(res) end @turing_testset "nuts constructor" begin @@ -164,8 +171,8 @@ @turing_testset "check discard" begin alg = NUTS(100, 0.8) - c1 = sample(gdemo_default, alg, 500, discard_adapt = true) - c2 = sample(gdemo_default, alg, 500, discard_adapt = false) + c1 = sample(rng, gdemo_default, alg, 500, discard_adapt = true) + c2 = sample(rng, gdemo_default, alg, 500, discard_adapt = false) @test size(c1, 1) == 500 @test size(c2, 1) == 500 @@ -174,9 +181,9 @@ alg1 = Gibbs(PG(10, :m), NUTS(100, 0.65, :s)) alg2 = Gibbs(PG(10, :m), HMC(0.1, 3, :s)) alg3 = Gibbs(PG(10, :m), HMCDA(100, 0.65, 0.3, :s)) - @test sample(gdemo_default, alg1, 300) isa Chains - @test sample(gdemo_default, alg2, 300) isa Chains - @test sample(gdemo_default, alg3, 300) isa Chains + @test sample(rng, gdemo_default, alg1, 300) isa Chains + @test sample(rng, gdemo_default, alg2, 300) isa Chains + @test sample(rng, gdemo_default, alg3, 300) isa Chains end @turing_testset "Regression tests" begin @@ -185,19 +192,19 @@ m = Matrix{T}(undef, 2, 3) m .~ MvNormal(zeros(2), I) end - @test sample(mwe1(), HMC(0.2, 4), 1_000) isa Chains + @test sample(rng, mwe1(), HMC(0.2, 4), 1_000) isa Chains @model function mwe2(::Type{T} = Matrix{Float64}) where T m = T(undef, 2, 3) m .~ MvNormal(zeros(2), I) end - @test sample(mwe2(), HMC(0.2, 4), 1_000) isa Chains + @test sample(rng, mwe2(), HMC(0.2, 4), 1_000) isa Chains # https://github.com/TuringLang/Turing.jl/issues/1308 @model function mwe3(::Type{T} = Array{Float64}) where T m = T(undef, 2, 3) m .~ MvNormal(zeros(2), I) end - @test sample(mwe3(), HMC(0.2, 4), 1_000) isa Chains + @test sample(rng, mwe3(), HMC(0.2, 4), 1_000) isa Chains end end diff --git a/test/inference/mh.jl b/test/inference/mh.jl index d92f3c819..f32202394 100644 --- a/test/inference/mh.jl +++ b/test/inference/mh.jl @@ -1,6 +1,6 @@ @testset "mh.jl" begin @turing_testset "mh constructor" begin - Random.seed!(0) + Random.seed!(10) N = 500 s1 = MH( (:s, InverseGamma(2,3)), @@ -24,6 +24,7 @@ chain = sample(gdemo_default, alg, 2000) check_gdemo(chain, atol = 0.1) + Random.seed!(125) # MH with Gaussian proposal alg = MH( (:s, InverseGamma(2,3)), @@ -31,17 +32,19 @@ chain = sample(gdemo_default, alg, 7000) check_gdemo(chain, atol = 0.1) + Random.seed!(125) # MH within Gibbs alg = Gibbs(MH(:m), MH(:s)) chain = sample(gdemo_default, alg, 2000) check_gdemo(chain, atol = 0.1) + Random.seed!(125) # MoGtest gibbs = Gibbs( CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1,GKernel(1)), (:mu2,GKernel(1))) ) - chain = sample(MoGtest_default, gibbs, 5000) + chain = sample(MoGtest_default, gibbs, 500) check_MoGtest_default(chain, atol = 0.15) end diff --git a/test/runtests.jl b/test/runtests.jl index ad1e59695..bdc64df8f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -26,6 +26,7 @@ using LinearAlgebra using Pkg using Random using Test +using StableRNGs using AdvancedPS: ResampleWithESSThreshold, resample_systematic, resample_multinomial using AdvancedVI: TruncatedADAGrad, DecayedADAGrad, apply! @@ -41,7 +42,7 @@ using Turing.Variational: TruncatedADAGrad, DecayedADAGrad, AdvancedVI setprogress!(false) -include("test_utils/AllUtils.jl") +include(pkgdir(Turing)*"/test/test_utils/AllUtils.jl") @testset "Turing" begin @testset "essential" begin diff --git a/test/stdlib/RandomMeasures.jl b/test/stdlib/RandomMeasures.jl index db18b8f20..ed91b7854 100644 --- a/test/stdlib/RandomMeasures.jl +++ b/test/stdlib/RandomMeasures.jl @@ -41,19 +41,21 @@ end # Generate some test data. - Random.seed!(1) - data = vcat(randn(10), randn(10) .- 5, randn(10) .+ 10) - data .-= mean(data) + Random.seed!(1); + data = vcat(randn(10), randn(10) .- 5, randn(10) .+ 10); + data .-= mean(data); data /= std(data); # MCMC sampling - Random.seed!(2) - iterations = 500 + Random.seed!(2); + iterations = 500; model_fun = infiniteGMM(data); - chain = sample(model_fun, SMC(), iterations) + # TODO: control flow not supported, see + # https://github.com/TuringLang/Libtask.jl/issues/96 + # chain = sample(model_fun, SMC(), iterations); - @test chain isa MCMCChains.Chains - @test eltype(chain.value) === Union{Float64, Missing} + # @test chain isa MCMCChains.Chains + # @test eltype(chain.value) === Union{Float64, Missing} end # partitions = [ # [[1, 2, 3, 4]], diff --git a/test/stdlib/distributions.jl b/test/stdlib/distributions.jl index 17a686034..6fed87f19 100644 --- a/test/stdlib/distributions.jl +++ b/test/stdlib/distributions.jl @@ -1,7 +1,8 @@ @testset "distributions.jl" begin + rng = StableRNG(12345) @turing_testset "distributions functions" begin ns = 10 - logitp = randn() + logitp = randn(rng) d1 = BinomialLogit(ns, logitp) d2 = Binomial(ns, logistic(logitp)) k = 3 @@ -9,12 +10,10 @@ end @turing_testset "distributions functions" begin - Random.seed!(1) - d = OrderedLogistic(-2, [-1, 1]) n = 1_000_000 - y = rand(d, n) + y = rand(rng, d, n) K = length(d.cutpoints) + 1 p = [mean(==(k), y) for k in 1:K] # empirical probs pmf = [exp(logpdf(d, k)) for k in 1:K] @@ -31,9 +30,9 @@ end @numerical_testset "single distribution correctness" begin - Random.seed!(12321) + rng = StableRNG(1) - n_samples = 50_000 + n_samples = 10_000 mean_tol = 0.1 var_atol = 1.0 var_tol = 0.5 @@ -113,7 +112,7 @@ @model m() = x ~ dist - chn = sample(m(), HMC(0.2, 1), n_samples) + chn = sample(rng, m(), HMC(0.05, 20), n_samples) # Numerical tests. check_dist_numerical(dist,