Skip to content

Commit

Permalink
Merge branch 'master' into torfjelde/gibbs-new-improv
Browse files Browse the repository at this point in the history
  • Loading branch information
yebai authored Jul 15, 2024
2 parents 30ab9e0 + 142dab3 commit d40d82b
Show file tree
Hide file tree
Showing 9 changed files with 91 additions and 28 deletions.
2 changes: 1 addition & 1 deletion .JuliaFormatter.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ ignore = [
"src/mcmc/abstractmcmc.jl",
"test/experimental/gibbs.jl",
"test/test_utils/numerical_tests.jl",
# https://github.com/TuringLang/Turing.jl/pull/2218/files
# https://github.com/TuringLang/Turing.jl/pull/2218/files
"src/mcmc/Inference.jl",
"test/mcmc/Inference.jl",
# https://github.com/TuringLang/Turing.jl/pull/1887 # Enzyme PR
Expand Down
46 changes: 34 additions & 12 deletions .github/workflows/Tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,21 +67,43 @@ jobs:
echo "Julia version: ${{ matrix.version }}"
echo "Number of threads: ${{ matrix.num_threads }}"
echo "Test arguments: ${{ matrix.test-args }}"
- name: (De)activate coverage analysis
run: echo "COVERAGE=${{ matrix.version == '1' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 2 }}" >> "$GITHUB_ENV"
shell: bash
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '${{ matrix.version }}'
arch: ${{ matrix.arch }}
- uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- uses: julia-actions/julia-buildpkg@latest
- uses: julia-actions/cache@v1
- uses: julia-actions/julia-buildpkg@v1
# TODO: Use julia-actions/julia-runtest when test_args are supported
# Custom calls of Pkg.test tend to miss features such as e.g. adjustments for CompatHelper PRs
# Ref https://github.com/julia-actions/julia-runtest/pull/73
- name: Call Pkg.test
run: julia --color=yes --depwarn=yes --check-bounds=yes --threads=${{ matrix.num_threads }} --project=@. -e 'import Pkg; Pkg.test(; test_args=ARGS)' -- ${{ matrix.test-args }}
run: julia --color=yes --inline=yes --depwarn=yes --check-bounds=yes --threads=${{ matrix.num_threads }} --project=@. -e 'import Pkg; Pkg.test(; coverage=parse(Bool, ENV["COVERAGE"]), test_args=ARGS)' -- ${{ matrix.test-args }}
- uses: julia-actions/julia-processcoverage@v1
if: ${{ env.COVERAGE }}
- uses: codecov/codecov-action@v4
if: ${{ env.COVERAGE }}
with:
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
file: lcov.info
- uses: coverallsapp/github-action@v2
if: ${{ env.COVERAGE }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: lcov.info
flag-name: run-${{ join(matrix.*, '-') }}
parallel: true

finish:
needs: test
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Coveralls Finished
uses: coverallsapp/github-action@v2
with:
parallel-finished: true
7 changes: 0 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,6 @@ https://turinglang.org/docs/

See [releases](https://github.com/TuringLang/Turing.jl/releases).

## Want to contribute?

Turing was originally created and is now managed by Hong Ge. Current and past Turing team members include [Hong Ge](http://mlg.eng.cam.ac.uk/hong/), [Kai Xu](http://mlg.eng.cam.ac.uk/?portfolio=kai-xu), [Martin Trapp](http://martint.blog), [Mohamed Tarek](https://github.com/mohamed82008), [Cameron Pfiffer](https://business.uoregon.edu/faculty/cameron-pfiffer), [Tor Fjelde](http://retiredparkingguard.com/about.html).
You can see the complete list on Github: https://github.com/TuringLang/Turing.jl/graphs/contributors.

Turing is an open source project so if you feel you have some relevant skills and are interested in contributing, please get in touch. See the [Contributing](https://turinglang.org/dev/docs/contributing/guide) page for details on the process. You can contribute by opening issues on Github, implementing things yourself, and making a pull request. We would also appreciate example models written using Turing.

## Issues and Discussions

Issues related to bugs and feature requests are welcome on the [issues page](https://github.com/TuringLang/Turing.jl/issues), while discussions and questions about statistical applications and theory should place on the [Discussions page](https://github.com/TuringLang/Turing.jl/discussions) or [our channel](https://julialang.slack.com/messages/turing/) (`#turing`) in the Julia Slack chat. If you do not have an invitation to Julia's Slack, you can get one by going [here](https://julialang.org/slack/).
13 changes: 13 additions & 0 deletions src/mcmc/Inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,15 @@ DynamicPPL.getlogp(t::Transition) = t.lp
# Metadata of VarInfo object
metadata(vi::AbstractVarInfo) = (lp = getlogp(vi),)

# TODO: Implement additional checks for certain samplers, e.g.
# HMC not supporting discrete parameters.
function _check_model(model::DynamicPPL.Model)
return DynamicPPL.check_model(model; error_on_failure=true)
end
function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm)
return _check_model(model)
end

#########################################
# Default definitions for the interface #
#########################################
Expand All @@ -258,8 +267,10 @@ function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg, model), N; kwargs...)
end

Expand All @@ -282,8 +293,10 @@ function AbstractMCMC.sample(
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg, model), ensemble, N, n_chains;
kwargs...)
end
Expand Down
22 changes: 22 additions & 0 deletions test/mcmc/Inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -559,6 +559,28 @@ using Turing
@test all(xs[:, 1] .=== [1, missing, 3])
@test all(xs[:, 2] .=== [missing, 2, 4])
end

@testset "check model" begin
@model function demo_repeated_varname()
x ~ Normal(0, 1)
x ~ Normal(x, 1)
end

@test_throws ErrorException sample(
demo_repeated_varname(), NUTS(), 1000; check_model=true
)
# Make sure that disabling the check also works.
@test (sample(
demo_repeated_varname(), Prior(), 10; check_model=false
); true)

@model function demo_incorrect_missing(y)
y[1:1] ~ MvNormal(zeros(1), 1)
end
@test_throws ErrorException sample(
demo_incorrect_missing([missing]), NUTS(), 1000; check_model=true
)
end
end

end
4 changes: 3 additions & 1 deletion test/mcmc/gibbs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ using Turing.RandomMeasures: ChineseRestaurantProcess, DirichletProcess
Random.seed!(100)
alg = Gibbs(CSMC(15, :s), HMC(0.2, 4, :m; adtype=adbackend))
chain = sample(gdemo(1.5, 2.0), alg, 10_000)
check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.15)
check_numerical(chain, [:m], [7 / 6]; atol=0.15)
# Be more relaxed with the tolerance of the variance.
check_numerical(chain, [:s], [49 / 24]; atol=0.35)

Random.seed!(100)

Expand Down
2 changes: 1 addition & 1 deletion test/mcmc/hmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ using Turing

# The discrepancies in the chains are in the tails, so we can't just compare the mean, etc.
# KS will compare the empirical CDFs, which seems like a reasonable thing to do here.
@test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.01
@test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.001
end
end

Expand Down
2 changes: 1 addition & 1 deletion test/mcmc/is.jl
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ using Turing
ref = reference(n)

Random.seed!(seed)
chain = sample(model, alg, n)
chain = sample(model, alg, n; check_model=false)
sampled = get(chain, [:a, :b, :lp])

@test vec(sampled.a) == ref.as
Expand Down
21 changes: 16 additions & 5 deletions test/mcmc/mh.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,30 +44,41 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var))
# c6 = sample(gdemo_default, s6, N)
end
@testset "mh inference" begin
# Set the initial parameters, because if we get unlucky with the initial state,
# these chains are too short to converge to reasonable numbers.
discard_initial = 1000
initial_params = [1.0, 1.0]

Random.seed!(125)
alg = MH()
chain = sample(gdemo_default, alg, 10_000)
chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params)
check_gdemo(chain; atol=0.1)

Random.seed!(125)
# MH with Gaussian proposal
alg = MH((:s, InverseGamma(2, 3)), (:m, GKernel(1.0)))
chain = sample(gdemo_default, alg, 10_000)
chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params)
check_gdemo(chain; atol=0.1)

Random.seed!(125)
# MH within Gibbs
alg = Gibbs(MH(:m), MH(:s))
chain = sample(gdemo_default, alg, 10_000)
chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params)
check_gdemo(chain; atol=0.1)

Random.seed!(125)
# MoGtest
gibbs = Gibbs(
CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1, GKernel(1)), (:mu2, GKernel(1)))
)
chain = sample(MoGtest_default, gibbs, 500)
check_MoGtest_default(chain; atol=0.15)
chain = sample(
MoGtest_default,
gibbs,
500;
discard_initial=100,
initial_params=[1.0, 1.0, 0.0, 0.0, 1.0, 4.0],
)
check_MoGtest_default(chain; atol=0.2)
end

# Test MH shape passing.
Expand Down

0 comments on commit d40d82b

Please sign in to comment.