diff --git a/DESCRIPTION b/DESCRIPTION index 2be34caa..eb1b244c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: monty Title: Monte Carlo Models -Version: 0.2.18 +Version: 0.2.19 Authors@R: c(person("Rich", "FitzJohn", role = c("aut", "cre"), email = "rich.fitzjohn@gmail.com"), person("Wes", "Hinsley", role = "aut"), @@ -31,7 +31,6 @@ Suggests: coda, decor, knitr, - dust, mockery, mvtnorm, numDeriv, @@ -43,5 +42,3 @@ Suggests: Config/testthat/edition: 3 Language: en-GB VignetteBuilder: knitr -Remotes: - mrc-ide/dust diff --git a/R/cpp11.R b/R/cpp11.R index 9c38a8cf..9b37f389 100644 --- a/R/cpp11.R +++ b/R/cpp11.R @@ -76,6 +76,18 @@ monty_rng_state <- function(ptr, is_float) { .Call(`_monty_monty_rng_state`, ptr, is_float) } +cpp_monty_random_real <- function(ptr) { + .Call(`_monty_cpp_monty_random_real`, ptr) +} + +cpp_monty_random_binomial <- function(r_size, r_prob, ptr) { + .Call(`_monty_cpp_monty_random_binomial`, r_size, r_prob, ptr) +} + +cpp_monty_random_exponential_rate <- function(r_rate, ptr) { + .Call(`_monty_cpp_monty_random_exponential_rate`, r_rate, ptr) +} + monty_rng_pointer_init <- function(n_streams, seed, long_jump, algorithm) { .Call(`_monty_monty_rng_pointer_init`, n_streams, seed, long_jump, algorithm) } diff --git a/R/rng2.R b/R/rng2.R new file mode 100644 index 00000000..8cded76d --- /dev/null +++ b/R/rng2.R @@ -0,0 +1,23 @@ +monty_random_alloc <- function(n_streams = 1L, seed = NULL, + deterministic = FALSE) { + float <- FALSE + ptr <- monty_rng_alloc(seed, n_streams, deterministic, float) + ret <- list(ptr = ptr, n_streams = n_streams, deterministic = deterministic) + class(ret) <- "monty_random_state" + ret +} + + +monty_random_real <- function(state) { + cpp_monty_random_real(state$ptr) +} + + +monty_random_binomial <- function(size, prob, state) { + cpp_monty_random_binomial(size, prob, state$ptr) +} + + +monty_random_exponential_rate <- function(rate, state) { + cpp_monty_random_exponential_rate(rate, state$ptr) +} diff --git a/src/cpp11.cpp b/src/cpp11.cpp index 7628b53a..d2dc2697 100644 --- a/src/cpp11.cpp +++ b/src/cpp11.cpp @@ -140,6 +140,27 @@ extern "C" SEXP _monty_monty_rng_state(SEXP ptr, SEXP is_float) { return cpp11::as_sexp(monty_rng_state(cpp11::as_cpp>(ptr), cpp11::as_cpp>(is_float))); END_CPP11 } +// random2.cpp +cpp11::doubles cpp_monty_random_real(SEXP ptr); +extern "C" SEXP _monty_cpp_monty_random_real(SEXP ptr) { + BEGIN_CPP11 + return cpp11::as_sexp(cpp_monty_random_real(cpp11::as_cpp>(ptr))); + END_CPP11 +} +// random2.cpp +cpp11::doubles cpp_monty_random_binomial(cpp11::doubles r_size, cpp11::doubles r_prob, SEXP ptr); +extern "C" SEXP _monty_cpp_monty_random_binomial(SEXP r_size, SEXP r_prob, SEXP ptr) { + BEGIN_CPP11 + return cpp11::as_sexp(cpp_monty_random_binomial(cpp11::as_cpp>(r_size), cpp11::as_cpp>(r_prob), cpp11::as_cpp>(ptr))); + END_CPP11 +} +// random2.cpp +cpp11::doubles cpp_monty_random_exponential_rate(cpp11::doubles r_rate, SEXP ptr); +extern "C" SEXP _monty_cpp_monty_random_exponential_rate(SEXP r_rate, SEXP ptr) { + BEGIN_CPP11 + return cpp11::as_sexp(cpp_monty_random_exponential_rate(cpp11::as_cpp>(r_rate), cpp11::as_cpp>(ptr))); + END_CPP11 +} // rng_pointer.cpp cpp11::sexp monty_rng_pointer_init(int n_streams, cpp11::sexp seed, int long_jump, std::string algorithm); extern "C" SEXP _monty_monty_rng_pointer_init(SEXP n_streams, SEXP seed, SEXP long_jump, SEXP algorithm) { @@ -172,29 +193,32 @@ extern "C" SEXP _monty_test_xoshiro_run(SEXP obj) { extern "C" { static const R_CallMethodDef CallEntries[] = { - {"_monty_monty_rng_alloc", (DL_FUNC) &_monty_monty_rng_alloc, 4}, - {"_monty_monty_rng_beta", (DL_FUNC) &_monty_monty_rng_beta, 6}, - {"_monty_monty_rng_binomial", (DL_FUNC) &_monty_monty_rng_binomial, 6}, - {"_monty_monty_rng_cauchy", (DL_FUNC) &_monty_monty_rng_cauchy, 6}, - {"_monty_monty_rng_exponential_mean", (DL_FUNC) &_monty_monty_rng_exponential_mean, 5}, - {"_monty_monty_rng_exponential_rate", (DL_FUNC) &_monty_monty_rng_exponential_rate, 5}, - {"_monty_monty_rng_gamma_rate", (DL_FUNC) &_monty_monty_rng_gamma_rate, 6}, - {"_monty_monty_rng_gamma_scale", (DL_FUNC) &_monty_monty_rng_gamma_scale, 6}, - {"_monty_monty_rng_hypergeometric", (DL_FUNC) &_monty_monty_rng_hypergeometric, 7}, - {"_monty_monty_rng_jump", (DL_FUNC) &_monty_monty_rng_jump, 2}, - {"_monty_monty_rng_long_jump", (DL_FUNC) &_monty_monty_rng_long_jump, 2}, - {"_monty_monty_rng_multinomial", (DL_FUNC) &_monty_monty_rng_multinomial, 6}, - {"_monty_monty_rng_nbinomial", (DL_FUNC) &_monty_monty_rng_nbinomial, 6}, - {"_monty_monty_rng_normal", (DL_FUNC) &_monty_monty_rng_normal, 7}, - {"_monty_monty_rng_pointer_init", (DL_FUNC) &_monty_monty_rng_pointer_init, 4}, - {"_monty_monty_rng_pointer_sync", (DL_FUNC) &_monty_monty_rng_pointer_sync, 2}, - {"_monty_monty_rng_poisson", (DL_FUNC) &_monty_monty_rng_poisson, 5}, - {"_monty_monty_rng_random_normal", (DL_FUNC) &_monty_monty_rng_random_normal, 5}, - {"_monty_monty_rng_random_real", (DL_FUNC) &_monty_monty_rng_random_real, 4}, - {"_monty_monty_rng_state", (DL_FUNC) &_monty_monty_rng_state, 2}, - {"_monty_monty_rng_uniform", (DL_FUNC) &_monty_monty_rng_uniform, 6}, - {"_monty_test_rng_pointer_get", (DL_FUNC) &_monty_test_rng_pointer_get, 2}, - {"_monty_test_xoshiro_run", (DL_FUNC) &_monty_test_xoshiro_run, 1}, + {"_monty_cpp_monty_random_binomial", (DL_FUNC) &_monty_cpp_monty_random_binomial, 3}, + {"_monty_cpp_monty_random_exponential_rate", (DL_FUNC) &_monty_cpp_monty_random_exponential_rate, 2}, + {"_monty_cpp_monty_random_real", (DL_FUNC) &_monty_cpp_monty_random_real, 1}, + {"_monty_monty_rng_alloc", (DL_FUNC) &_monty_monty_rng_alloc, 4}, + {"_monty_monty_rng_beta", (DL_FUNC) &_monty_monty_rng_beta, 6}, + {"_monty_monty_rng_binomial", (DL_FUNC) &_monty_monty_rng_binomial, 6}, + {"_monty_monty_rng_cauchy", (DL_FUNC) &_monty_monty_rng_cauchy, 6}, + {"_monty_monty_rng_exponential_mean", (DL_FUNC) &_monty_monty_rng_exponential_mean, 5}, + {"_monty_monty_rng_exponential_rate", (DL_FUNC) &_monty_monty_rng_exponential_rate, 5}, + {"_monty_monty_rng_gamma_rate", (DL_FUNC) &_monty_monty_rng_gamma_rate, 6}, + {"_monty_monty_rng_gamma_scale", (DL_FUNC) &_monty_monty_rng_gamma_scale, 6}, + {"_monty_monty_rng_hypergeometric", (DL_FUNC) &_monty_monty_rng_hypergeometric, 7}, + {"_monty_monty_rng_jump", (DL_FUNC) &_monty_monty_rng_jump, 2}, + {"_monty_monty_rng_long_jump", (DL_FUNC) &_monty_monty_rng_long_jump, 2}, + {"_monty_monty_rng_multinomial", (DL_FUNC) &_monty_monty_rng_multinomial, 6}, + {"_monty_monty_rng_nbinomial", (DL_FUNC) &_monty_monty_rng_nbinomial, 6}, + {"_monty_monty_rng_normal", (DL_FUNC) &_monty_monty_rng_normal, 7}, + {"_monty_monty_rng_pointer_init", (DL_FUNC) &_monty_monty_rng_pointer_init, 4}, + {"_monty_monty_rng_pointer_sync", (DL_FUNC) &_monty_monty_rng_pointer_sync, 2}, + {"_monty_monty_rng_poisson", (DL_FUNC) &_monty_monty_rng_poisson, 5}, + {"_monty_monty_rng_random_normal", (DL_FUNC) &_monty_monty_rng_random_normal, 5}, + {"_monty_monty_rng_random_real", (DL_FUNC) &_monty_monty_rng_random_real, 4}, + {"_monty_monty_rng_state", (DL_FUNC) &_monty_monty_rng_state, 2}, + {"_monty_monty_rng_uniform", (DL_FUNC) &_monty_monty_rng_uniform, 6}, + {"_monty_test_rng_pointer_get", (DL_FUNC) &_monty_test_rng_pointer_get, 2}, + {"_monty_test_xoshiro_run", (DL_FUNC) &_monty_test_xoshiro_run, 1}, {NULL, NULL, 0} }; } diff --git a/src/random2.cpp b/src/random2.cpp new file mode 100644 index 00000000..5786f0e4 --- /dev/null +++ b/src/random2.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include + +#include +#include +#include + +using default_rng64 = monty::random::prng>; + +template +void check_length(T x, size_t len, const char * name) { + const size_t len_given = Rf_length(x); + if (len_given != len) { + cpp11::stop("Expected '%s' to have length %d, not %d", + name, len, len_given); + } +} + + +[[cpp11::register]] +cpp11::doubles cpp_monty_random_real(SEXP ptr) { + default_rng64 *rng = cpp11::as_cpp>(ptr).get(); + const int n = rng->size(); + cpp11::writable::doubles r_y = cpp11::writable::doubles(n); + double *y = REAL(r_y); + for (int i = 0; i < n; ++i) { + y[i] = monty::random::random_real(rng->state(i)); + } + return r_y; +} + + +[[cpp11::register]] +cpp11::doubles cpp_monty_random_binomial(cpp11::doubles r_size, + cpp11::doubles r_prob, + SEXP ptr) { + default_rng64 *rng = cpp11::as_cpp>(ptr).get(); + const int n = rng->size(); + check_length(r_size, n, "size"); + check_length(r_prob, n, "size"); + const double * size = REAL(r_size); + const double * prob = REAL(r_prob); + + cpp11::writable::doubles r_y = cpp11::writable::doubles(n); + double *y = REAL(r_y); + for (int i = 0; i < n; ++i) { + auto &state = rng->state(i); + y[i] = monty::random::binomial(state, size[i], prob[i]); + } + + return r_y; +} + + +[[cpp11::register]] +cpp11::doubles cpp_monty_random_exponential_rate(cpp11::doubles r_rate, + SEXP ptr) { + default_rng64 *rng = cpp11::as_cpp>(ptr).get(); + const int n = rng->size(); + check_length(r_rate, n, "size"); + const double * rate = REAL(r_rate); + + cpp11::writable::doubles r_y = cpp11::writable::doubles(n); + double *y = REAL(r_y); + for (int i = 0; i < n; ++i) { + auto &state = rng->state(i); + y[i] = monty::random::exponential_rate(state, rate[i]); + } + + return r_y; +} diff --git a/tests/testthat/helper-monty.R b/tests/testthat/helper-monty.R index 1d98e076..5308e6da 100644 --- a/tests/testthat/helper-monty.R +++ b/tests/testthat/helper-monty.R @@ -81,112 +81,6 @@ ex_simple_nested_with_base <- function(n_groups) { } -ex_dust_sir <- function(n_particles = 100, n_threads = 1, - deterministic = FALSE, save_trajectories = FALSE) { - testthat::skip_if_not_installed("dust") - sir <- dust::dust_example("sir") - - np <- 10 - end <- 150 * 4 - times <- seq(0, end, by = 4) - ans <- sir$new(list(), 0, np, seed = 1L)$simulate(times) - dat <- data.frame(time = times[-1], incidence = ans[5, 1, -1]) - - ## TODO: an upshot here is that our dust models are always going to - ## need to be initialisable; we might need to sample from the - ## statistical parameters, or set things up to allow two-phases of - ## initialsation (which is I think where we are heading, so that's - ## fine). - model <- sir$new(list(), 0, n_particles, seed = 1L, n_threads = n_threads, - deterministic = deterministic) - model$set_data(dust::dust_data(dat)) - model$set_index(c(2, 4)) - - prior_beta_shape <- 1 - prior_beta_rate <- 1 / 0.5 - prior_gamma_shape <- 1 - prior_gamma_rate <- 1 / 0.5 - - trajectories <- NULL - - density <- function(x) { - beta <- x[[1]] - gamma <- x[[2]] - prior <- dgamma(beta, prior_beta_shape, prior_beta_rate, log = TRUE) + - dgamma(gamma, prior_gamma_shape, prior_gamma_rate, log = TRUE) - if (is.finite(prior)) { - model$update_state( - pars = list(beta = x[[1]], gamma = x[[2]]), - time = 0, - set_initial_state = TRUE) - res <- model$filter(save_trajectories = save_trajectories) - if (save_trajectories) { - trajectories <<- res$trajectories - } - ll <- res$log_likelihood - } else { - ll <- -Inf - } - ll + prior - } - - direct_sample <- function(rng) { - c(rng$gamma_scale(1, prior_beta_shape, 1 / prior_beta_rate), - rng$gamma_scale(1, prior_gamma_shape, 1 / prior_gamma_rate)) - } - - set_rng_state <- function(rng_state) { - n_streams <- n_particles + 1 - if (length(rng_state) != 32 * n_streams) { - ## Expand the state by short jumps; we'll make this nicer once - ## we refactor the RNG interface and dust. - rng_state <- monty_rng$new(rng_state, n_streams)$state() - } - model$set_rng_state(rng_state) - } - - get_rng_state <- function() { - model$rng_state() - } - - if (save_trajectories) { - observer <- monty_observer( - function() { - ## TODO: It's not really clear to me (Rich) that we want the - ## rng coming in here. In dust2 we'll correctly use the - ## *filter* rng. however, even there we end up with the act - ## of observation changing the behaviour of the sampler and I - ## am not sure that is really desirable. We could probably - ## improve that in dust, but that would require that we do not - ## pass the rng here too. So for now we take the first - ## particle. - ## i <- floor(rng$random_real(1) * model$model$n_particles()) + 1L - i <- 1L - if (save_trajectories) { - traj <- trajectories[, i, , drop = FALSE] - dim(traj) <- dim(traj)[-2] - } else { - traj <- NULL - } - list(trajectories = traj, state = model$state()[, i]) - }) - } else { - observer <- NULL - } - - monty_model( - list(model = model, - density = density, - direct_sample = direct_sample, - parameters = c("beta", "gamma"), - domain = cbind(c(0, 0), c(Inf, Inf)), - observer = observer, - set_rng_state = set_rng_state, - get_rng_state = get_rng_state), - monty_model_properties(is_stochastic = !deterministic)) -} - - random_array <- function(dim, named = FALSE) { if (named) { dn <- lapply(seq_along(dim), function(i) { @@ -200,91 +94,21 @@ random_array <- function(dim, named = FALSE) { } -ex_dust_sir_likelihood <- function(n_particles = 100, n_threads = 1, - deterministic = FALSE, - save_trajectories = FALSE) { - testthat::skip_if_not_installed("dust") - sir <- dust::dust_example("sir") - - np <- 10 - end <- 150 * 4 - times <- seq(0, end, by = 4) - ans <- sir$new(list(), 0, np, seed = 1L)$simulate(times) - dat <- data.frame(time = times[-1], incidence = ans[5, 1, -1]) - - ## TODO: an upshot here is that our dust models are always going to - ## need to be initialisable; we might need to sample from the - ## statistical parameters, or set things up to allow two-phases of - ## initialsation (which is I think where we are heading, so that's - ## fine). - model <- sir$new(list(), 0, n_particles, seed = 1L, n_threads = n_threads, - deterministic = deterministic) - model$set_data(dust::dust_data(dat)) - model$set_index(c(2, 4)) - - trajectories <- NULL - - density <- function(x) { - beta <- x[[1]] - gamma <- x[[2]] - model$update_state( - pars = list(beta = x[[1]], gamma = x[[2]]), - time = 0, - set_initial_state = TRUE) - res <- model$filter(save_trajectories = save_trajectories) - if (save_trajectories) { - trajectories <<- res$trajectories - } - res$log_likelihood - } - - set_rng_state <- function(rng_state) { - n_streams <- n_particles + 1 - if (length(rng_state) != 32 * n_streams) { - ## Expand the state by short jumps; we'll make this nicer once - ## we refactor the RNG interface and dust. - rng_state <- monty_rng$new(rng_state, n_streams)$state() - } - model$set_rng_state(rng_state) - } - - get_rng_state <- function() { - model$rng_state() - } +ex_sir_filter_likelihood <- function(n_particles = 100, + deterministic = FALSE, + save_trajectories = FALSE) { + data <- data.frame(time = c( 4, 8, 12, 16, 20, 24, 28, 32, 36), + incidence = c( 1, 0, 3, 5, 2, 4, 3, 7, 2)) + sir_filter_monty(data, n_particles, deterministic, save_trajectories) +} - if (save_trajectories) { - observer <- monty_observer( - function() { - ## TODO: It's not really clear to me (Rich) that we want the - ## rng coming in here. In dust2 we'll correctly use the - ## *filter* rng. however, even there we end up with the act - ## of observation changing the behaviour of the sampler and I - ## am not sure that is really desirable. We could probably - ## improve that in dust, but that would require that we do not - ## pass the rng here too. So for now we take the first - ## particle. - ## i <- floor(rng$random_real(1) * model$model$n_particles()) + 1L - i <- 1L - if (save_trajectories) { - traj <- trajectories[, i, , drop = FALSE] - dim(traj) <- dim(traj)[-2] - } else { - traj <- NULL - } - list(trajectories = traj, state = model$state()[, i]) - }) - } else { - observer <- NULL - } - monty_model( - list(model = model, - density = density, - parameters = c("beta", "gamma"), - observer = observer, - set_rng_state = set_rng_state, - get_rng_state = get_rng_state), - monty_model_properties(is_stochastic = !deterministic)) +ex_sir_filter_posterior <- function(...) { + prior <- monty_dsl({ + beta ~ Gamma(shape = 1, rate = 1 / 0.5) + gamma ~ Gamma(shape = 1, rate = 1 / 0.5) + }) + ex_sir_filter_likelihood(...) + prior } diff --git a/tests/testthat/helper-sir-filter.R b/tests/testthat/helper-sir-filter.R new file mode 100644 index 00000000..c5cce7a5 --- /dev/null +++ b/tests/testthat/helper-sir-filter.R @@ -0,0 +1,123 @@ +sir_filter_monty <- function(data, n_particles, deterministic = FALSE, + save_trajectories = FALSE, seed = NULL) { + parameters <- c("beta", "gamma") + env <- new.env() + base <- list(N = 1000, I0 = 10, beta = 0.2, gamma = 0.1, exp_noise = 1e6) + + get_rng_state <- function() { + c(monty_rng_state(env$rng$filter$ptr, FALSE), + monty_rng_state(env$rng$system$ptr, FALSE)) + } + + set_rng_state <- function(rng_state) { + n_streams <- n_particles + 1 + r <- matrix( + monty_rng$new(n_streams = n_streams, seed = rng_state)$state(), + ncol = n_streams) + env$rng <- list( + filter = monty_random_alloc(1, r[, 1], deterministic), + system = monty_random_alloc(n_particles, c(r[, -1]), deterministic)) + } + + set_rng_state(seed) + + density <- function(x) { + pars <- base + pars[parameters] <- x + res <- sir_filter(pars, data, n_particles, env$rng, save_trajectories) + if (save_trajectories) { + env$trajectories <- res$trajectories + } + res$log_likelihood + } + + if (save_trajectories) { + observer <- monty_observer( + function() { + i <- 1L + trajectories <- env$trajectories[c(2, 4), i, , drop = FALSE] + dim(trajectories) <- dim(trajectories)[-2] + list(trajectories = trajectories) + }) + } else { + observer <- NULL + } + + monty_model( + list(density = density, + parameters = parameters, + observer = observer, + set_rng_state = set_rng_state, + get_rng_state = get_rng_state), + monty_model_properties(is_stochastic = !deterministic)) +} + + +sir_filter <- function(pars, data, n_particles, rng, + save_trajectories = FALSE) { + y <- list(S = pars$N - pars$I0, I = pars$I0, R = 0, cases = 0) + packer <- monty_packer(names(y)) + state <- matrix(packer$pack(y), 4, n_particles) + time <- 0 + dt <- 1 + ll <- 0 + exp_noise <- rep_len(pars$exp_noise, n_particles) + i_cases <- packer$index()$cases + if (save_trajectories) { + trajectories <- array(NA_real_, c(length(y), n_particles, nrow(data))) + } else { + trajectories <- NULL + } + + for (i in seq_len(nrow(data))) { + from <- time + to <- data$time[[i]] + state <- sir_run(from, to, dt, state, packer, pars, rng$system) + noise <- monty_random_exponential_rate(exp_noise, rng$system) + lambda <- state[i_cases, ] + noise + tmp <- dpois(data$incidence[[i]], lambda, log = TRUE) + w <- exp(tmp - max(tmp)) + ll <- ll + log(mean(w)) + max(tmp) + u <- monty_random_real(rng$filter) + k <- dust_resample_weight(w, u) + + if (save_trajectories) { + trajectories[, , i] <- state + trajectories <- trajectories[, k, , drop = FALSE] # slow but correct... + } + + state <- state[, k, drop = FALSE] + time <- to + } + + list(log_likelihood = ll, trajectories = trajectories) +} + + +sir_run <- function(from, to, dt, state, packer, pars, rng) { + state <- packer$unpack(state) + for (time in seq(from, to, by = dt)[-1]) { + state <- sir_step(time, dt, state, pars, rng) + } + packer$pack(state) +} + + +sir_step <- function(time, dt, state, pars, rng) { + p_SI <- 1 - exp(-pars$beta * state$I / pars$N * dt) + p_IR <- rep(1 - exp(-pars$gamma * dt), length(state$I)) + n_SI <- monty_random_binomial(state$S, p_SI, rng) + n_IR <- monty_random_binomial(state$I, p_IR, rng) + cases <- if (time %% 1 == 0) 0 else state$cases + list(S = state$S - n_SI, + I = state$I + n_SI - n_IR, + R = state$R + n_IR, + cases = cases + n_SI) +} + + +dust_resample_weight <- function(w, u) { + n <- length(w) + uu <- u / n + seq(0, by = 1 / n, length.out = n) + findInterval(uu, cumsum(w / sum(w))) + 1L +} diff --git a/tests/testthat/test-combine.R b/tests/testthat/test-combine.R index 525415c0..21fa709e 100644 --- a/tests/testthat/test-combine.R +++ b/tests/testthat/test-combine.R @@ -186,7 +186,7 @@ test_that("can combine gradients where parameters do not agree", { test_that("can combine a stochastic and deterministic model", { - ll <- ex_dust_sir_likelihood() + ll <- ex_sir_filter_likelihood() prior <- monty_dsl({ beta ~ Gamma(shape = 1, rate = 1 / 0.5) gamma ~ Gamma(shape = 1, rate = 1 / 0.5) @@ -199,7 +199,7 @@ test_that("can combine a stochastic and deterministic model", { test_that("can't disable stochastic model on combination", { - ll <- ex_dust_sir_likelihood() + ll <- ex_sir_filter_likelihood() prior <- monty_dsl({ beta ~ Gamma(shape = 1, rate = 1 / 0.5) gamma ~ Gamma(shape = 1, rate = 1 / 0.5) @@ -212,7 +212,7 @@ test_that("can't disable stochastic model on combination", { test_that("can't create model out of two stochastic halves", { - ll <- ex_dust_sir_likelihood() + ll <- ex_sir_filter_likelihood() expect_error( ll + ll, "Can't combine two stochastic models") diff --git a/tests/testthat/test-sample.R b/tests/testthat/test-sample.R index cdb381fe..82117b2e 100644 --- a/tests/testthat/test-sample.R +++ b/tests/testthat/test-sample.R @@ -269,7 +269,7 @@ test_that("error if initial conditions do not have finite density", { test_that("can continue a stochastic model identically", { set.seed(1) - model <- ex_dust_sir() + model <- ex_sir_filter_posterior() vcv <- matrix(c(0.0006405, 0.0005628, 0.0005628, 0.0006641), 2, 2) sampler <- monty_sampler_random_walk(vcv = vcv) initial <- c(0.2, 0.1) diff --git a/tests/testthat/test-sampler-adaptive.R b/tests/testthat/test-sampler-adaptive.R index 249348de..3b4bce4c 100644 --- a/tests/testthat/test-sampler-adaptive.R +++ b/tests/testthat/test-sampler-adaptive.R @@ -92,7 +92,7 @@ test_that("can continue adaptive sampler", { test_that("can't use adaptive sampler with stochastic models", { set.seed(1) - m <- ex_dust_sir() + m <- ex_sir_filter_posterior() sampler <- monty_sampler_adaptive(initial_vcv = diag(c(0.01, 0.01))) expect_error( monty_sample(m, sampler, 30, n_chains = 3), diff --git a/tests/testthat/test-sampler-hmc.R b/tests/testthat/test-sampler-hmc.R index b39b13f2..0383b41c 100644 --- a/tests/testthat/test-sampler-hmc.R +++ b/tests/testthat/test-sampler-hmc.R @@ -216,7 +216,7 @@ test_that("can't use hmc with models that lack gradients", { test_that("can't use hmc with stochastic models", { set.seed(1) - m <- ex_dust_sir() + m <- ex_sir_filter_posterior() sampler <- monty_sampler_hmc(epsilon = 0.1, n_integration_steps = 10) expect_error( monty_sample(m, sampler, 30, n_chains = 3), diff --git a/tests/testthat/test-sampler-random-walk.R b/tests/testthat/test-sampler-random-walk.R index d43339f7..9bba3c23 100644 --- a/tests/testthat/test-sampler-random-walk.R +++ b/tests/testthat/test-sampler-random-walk.R @@ -26,7 +26,7 @@ test_that("validate sampler against model on initialisation", { test_that("can draw samples from a random model", { set.seed(1) - m <- ex_dust_sir() + m <- ex_sir_filter_posterior() vcv <- matrix(c(0.0006405, 0.0005628, 0.0005628, 0.0006641), 2, 2) sampler <- monty_sampler_random_walk(vcv = vcv) res <- monty_sample(m, sampler, 20) @@ -36,7 +36,7 @@ test_that("can draw samples from a random model", { test_that("can observe a model", { - m <- ex_dust_sir(save_trajectories = TRUE) + m <- ex_sir_filter_posterior(save_trajectories = TRUE) vcv <- matrix(c(0.0006405, 0.0005628, 0.0005628, 0.0006641), 2, 2) sampler <- monty_sampler_random_walk(vcv = vcv) @@ -46,18 +46,14 @@ test_that("can observe a model", { expect_setequal(names(res), c("pars", "density", "initial", "details", "observations")) expect_equal(names(res$observations), - c("trajectories", "state")) + "trajectories") expect_equal(dim(res$observations$trajectories), - c(2, 151, 20, 3)) # states, time steps, samples, chains - expect_equal(dim(res$observations$state), - c(5, 20, 3)) # states, samples, chains - expect_equal(res$observations$state[c(2, 4), , ], - res$observations$trajectories[, 151, , ]) + c(2, 9, 20, 3)) # states, time steps, samples, chains }) test_that("can continue observed models", { - m <- ex_dust_sir(save_trajectories = TRUE) + m <- ex_sir_filter_posterior(save_trajectories = TRUE) vcv <- matrix(c(0.0006405, 0.0005628, 0.0005628, 0.0006641), 2, 2) sampler <- monty_sampler_random_walk(vcv = vcv)