diff --git a/tests/testthat/_snaps/survival-tune-eval-time-arguments.md b/tests/testthat/_snaps/survival-tune-eval-time-arguments.md new file mode 100644 index 0000000..9d45c7b --- /dev/null +++ b/tests/testthat/_snaps/survival-tune-eval-time-arguments.md @@ -0,0 +1,795 @@ +# evaluation time argument to tune objects + + Code + spec %>% tune_grid(Surv(time, status) ~ ., resamples = rs, metrics = mtr) + Condition + Error in `tune_grid()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric". Only 0 unique times were given. + +--- + + Code + spec %>% tune_grid(Surv(time, status) ~ ., resamples = rs, metrics = reg_mtr) + Condition + Error in `tune_grid()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + linear_reg() %>% tune_grid(age ~ ., resamples = rs, metrics = reg_mtr, + eval_time = 1) + Condition + Warning: + Evaluation times are only required when the model mode is "censored regression" (and will be ignored). + Warning: + No tuning parameters have been detected, performance will be evaluated using the resamples with no tuning. Did you want to [tune()] parameters? + Output + # Tuning results + # 10-fold cross-validation using stratification + # A tibble: 10 x 4 + splits id .metrics .notes + + 1 Fold01 + 2 Fold02 + 3 Fold03 + 4 Fold04 + 5 Fold05 + 6 Fold06 + 7 Fold07 + 8 Fold08 + 9 Fold09 + 10 Fold10 + +--- + + Code + no_usable_times <- spec %>% tune_grid(Surv(time, status) ~ ., resamples = rs, + metrics = mtr, eval_time = c(-1, Inf)) + Condition + Error: + ! There were no usable evaluation times (finite, non-missing, and >= 0). + +# eval time inputs are checked for censored regression models + + Code + check_eval_time_arg(NULL, met_stc) + Output + NULL + +--- + + Code + check_eval_time_arg(NULL, met_dyn) + Condition + Error: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_stc_dyn) + Condition + Error: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_stc_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_dyn_stc) + Condition + Error: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_dyn_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_int_stc) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(NULL, met_int_dyn) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + check_eval_time_arg(2, met_stc) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + Output + NULL + +--- + + Code + check_eval_time_arg(2, met_dyn) + Output + [1] 2 + +--- + + Code + check_eval_time_arg(2, met_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + check_eval_time_arg(2, met_stc_dyn) + Output + [1] 2 + +--- + + Code + check_eval_time_arg(2, met_stc_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + check_eval_time_arg(2, met_dyn_stc) + Output + [1] 2 + +--- + + Code + check_eval_time_arg(2, met_dyn_int) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + check_eval_time_arg(2, met_int_stc) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + check_eval_time_arg(2, met_int_dyn) + Condition + Error: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + check_eval_time_arg(1:3, met_stc) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + Output + NULL + +--- + + Code + check_eval_time_arg(1:3, met_dyn) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_int) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_stc_dyn) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_stc_int) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_dyn_stc) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_dyn_int) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_int_stc) + Output + [1] 1 2 3 + +--- + + Code + check_eval_time_arg(1:3, met_int_dyn) + Output + [1] 1 2 3 + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc) + +--- + + Code + fit_resamples(wflow, rs, metrics = met_dyn) + Condition + Error in `fit_resamples()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_stc_dyn) + Condition + Error in `fit_resamples()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_stc_int) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_dyn_stc) + Condition + Error in `fit_resamples()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_dyn_int) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int_stc) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int_dyn) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc, eval_time = 2) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_dyn, eval_time = 2) + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int, eval_time = 2) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc_dyn, eval_time = 2) + +--- + + Code + fit_resamples(wflow, rs, metrics = met_stc_int, eval_time = 2) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_dyn_stc, eval_time = 2) + +--- + + Code + fit_resamples(wflow, rs, metrics = met_dyn_int, eval_time = 2) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int_stc, eval_time = 2) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_int_dyn, eval_time = 2) + Condition + Error in `fit_resamples()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc, eval_time = 1:3) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_dyn, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_int, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc_dyn, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_stc_int, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_dyn_stc, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_dyn_int, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_int_stc, eval_time = 1:3) + +--- + + Code + res <- fit_resamples(wflow, rs, metrics = met_int_dyn, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc) + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_dyn) + Condition + Error in `tune_grid()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_stc_dyn) + Condition + Error in `tune_grid()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_stc_int) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_dyn_stc) + Condition + Error in `tune_grid()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_dyn_int) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int_stc) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int_dyn) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc, eval_time = 2) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_dyn, eval_time = 2) + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int, eval_time = 2) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc_dyn, eval_time = 2) + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_stc_int, eval_time = 2) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_dyn_stc, eval_time = 2) + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_dyn_int, eval_time = 2) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int_stc, eval_time = 2) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_int_dyn, eval_time = 2) + Condition + Error in `tune_grid()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc, eval_time = 1:3) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_dyn, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_int, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc_dyn, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_stc_int, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_dyn_stc, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_dyn_int, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_int_stc, eval_time = 1:3) + +--- + + Code + res <- tune_grid(wflow_tune, rs, metrics = met_int_dyn, eval_time = 1:3) + +--- + + Code + last_fit(wflow, split, metrics = met_dyn) + Condition + Error in `last_fit()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_int) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_stc_dyn) + Condition + Error in `last_fit()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_stc_int) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_dyn_stc) + Condition + Error in `last_fit()`: + ! At least 1 evaluation time is required for the metric type(s) requested: "dynamic_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_dyn_int) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_int_stc) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 0 unique times were given. + +--- + + Code + last_fit(wflow, split, metrics = met_int_dyn) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 0 unique times were given. + +--- + + Code + res <- last_fit(wflow, split, metrics = met_stc, eval_time = 2) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- last_fit(wflow, split, metrics = met_dyn, eval_time = 2) + +--- + + Code + last_fit(wflow, split, metrics = met_int, eval_time = 2) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- last_fit(wflow, split, metrics = met_stc_dyn, eval_time = 2) + +--- + + Code + last_fit(wflow, split, metrics = met_stc_int, eval_time = 2) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- last_fit(wflow, split, metrics = met_dyn_stc, eval_time = 2) + +--- + + Code + last_fit(wflow, split, metrics = met_dyn_int, eval_time = 2) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + last_fit(wflow, split, metrics = met_int_stc, eval_time = 2) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "integrated_survival_metric" and "static_survival_metric". Only 1 unique time was given. + +--- + + Code + last_fit(wflow, split, metrics = met_int_dyn, eval_time = 2) + Condition + Error in `last_fit()`: + ! At least 2 evaluation times are required for the metric type(s) requested: "dynamic_survival_metric" and "integrated_survival_metric". Only 1 unique time was given. + +--- + + Code + res <- last_fit(wflow, split, metrics = met_stc, eval_time = 1:3) + Condition + Warning: + Evaluation times are only required when dynmanic or integrated metrics are used (and will be ignored here). + +--- + + Code + res <- last_fit(wflow, split, metrics = met_dyn, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_int, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_stc_dyn, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_stc_int, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_dyn_stc, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_dyn_int, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_int_stc, eval_time = 1:3) + +--- + + Code + res <- last_fit(wflow, split, metrics = met_int_dyn, eval_time = 1:3) + diff --git a/tests/testthat/_snaps/survival-tune-metric-arguments.md b/tests/testthat/_snaps/survival-tune-metric-arguments.md new file mode 100644 index 0000000..bdd7f33 --- /dev/null +++ b/tests/testthat/_snaps/survival-tune-metric-arguments.md @@ -0,0 +1,96 @@ +# metric inputs are checked for censored regression models + + Code + check_metrics_arg(NULL, wflow) + Output + A metric set, consisting of: + - `brier_survival()`, a dynamic survival metric | direction: minimize + +--- + + Code + check_metrics_arg(met_reg, wflow) + Condition + Error: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + check_metrics_arg(met_cls, wflow) + Condition + Error: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + check_metrics_arg(met_srv, wflow) + Output + A metric set, consisting of: + - `concordance_survival()`, a static survival metric | direction: maximize + +--- + + Code + fit_resamples(wflow, rs, metrics = met_cls) + Condition + Error in `fit_resamples()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + fit_resamples(wflow, rs, metrics = met_reg) + Condition + Error in `fit_resamples()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_cls) + Condition + Error in `tune_grid()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + tune_grid(wflow_tune, rs, metrics = met_reg) + Condition + Error in `tune_grid()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + tune_bayes(wflow_tune, rs, metrics = met_cls) + Condition + Error in `tune_bayes()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + tune_bayes(wflow_tune, rs, metrics = met_reg) + Condition + Error in `tune_bayes()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + last_fit(wflow, split, metrics = met_cls) + Condition + Error in `last_fit()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + +--- + + Code + last_fit(wflow, split, metrics = met_reg) + Condition + Error in `last_fit()`: + ! The parsnip model has `mode` value of "censored regression", but the `metrics` is a metric set for a different model mode. + diff --git a/tests/testthat/test-survival-tune-eval-time-arguments.R b/tests/testthat/test-survival-tune-eval-time-arguments.R new file mode 100644 index 0000000..9d2e5dc --- /dev/null +++ b/tests/testthat/test-survival-tune-eval-time-arguments.R @@ -0,0 +1,260 @@ +test_that("evaluation time argument to tune objects", { + skip_if_not_installed("yardstick", minimum_version = "1.1.0.9000") + + suppressPackageStartupMessages(library(tune)) + suppressPackageStartupMessages(library(censored)) + suppressPackageStartupMessages(library(yardstick)) + suppressPackageStartupMessages(library(rsample)) + + spec <- survival_reg() + set.seed(1) + rs <- vfold_cv(stanford2, strata = status) + .time <- seq(1, 1000, length = 5) + mtr <- metric_set(brier_survival) + reg_mtr <- metric_set(rmse) + + expect_snapshot(error = TRUE, + spec %>% tune_grid(Surv(time, status) ~ ., resamples = rs, metrics = mtr) + ) + expect_snapshot(error = TRUE, + spec %>% tune_grid(Surv(time, status) ~ ., resamples = rs, metrics = reg_mtr) + ) + expect_snapshot( + linear_reg() %>% tune_grid(age ~ ., resamples = rs, metrics = reg_mtr, eval_time = 1) + ) + + expect_snapshot(error = TRUE, + no_usable_times <- + spec %>% + tune_grid(Surv(time, status) ~ ., resamples = rs, metrics = mtr, eval_time = c(-1, Inf)) + ) + + times <- 4:1 + expect_equal(get_metric_time(metric_set(brier_survival), times), 4) + expect_equal(get_metric_time(metric_set(concordance_survival), times), NULL) + expect_equal(get_metric_time(metric_set(brier_survival_integrated), times), NULL) + expect_equal( + get_metric_time( + metric_set(brier_survival, brier_survival_integrated, concordance_survival), + times + ), + 4 + ) + +}) + + +test_that("eval time inputs are checked for censored regression models", { + skip_if_not_installed("censored") + + library(parsnip) + library(workflows) + library(yardstick) + library(rsample) + suppressPackageStartupMessages(library(censored)) + + stanford2$event_time <- Surv(stanford2$time, stanford2$status) + stanford2 <- stanford2[, c("event_time", "age")] + + wflow <- workflow(event_time ~ age, survival_reg()) + sr_spec <- survival_reg(dist = tune()) + wflow_tune <- workflow(event_time ~ age, sr_spec) + + set.seed(1) + split <- initial_split(stanford2) + rs <- vfold_cv(stanford2) + + # ------------------------------------------------------------------------------ + # setup metric sets + + met_stc <- metric_set(concordance_survival) + met_dyn <- metric_set(brier_survival) + met_int <- metric_set(brier_survival_integrated) + met_stc_dyn <- metric_set(concordance_survival, brier_survival) + met_stc_int <- metric_set(concordance_survival, brier_survival_integrated) + met_dyn_stc <- metric_set(brier_survival, concordance_survival) + met_dyn_int <- metric_set(brier_survival, brier_survival_integrated) + met_int_stc <- metric_set(brier_survival_integrated, concordance_survival) + met_int_dyn <- metric_set(brier_survival_integrated, brier_survival) + + # ------------------------------------------------------------------------------ + # check inputs when eval_time left out + + expect_snapshot(check_eval_time_arg(NULL, met_stc)) + expect_snapshot(check_eval_time_arg(NULL, met_dyn), error = TRUE) + expect_snapshot(check_eval_time_arg(NULL, met_int), error = TRUE) + + expect_snapshot(check_eval_time_arg(NULL, met_stc_dyn), error = TRUE) + expect_snapshot(check_eval_time_arg(NULL, met_stc_int), error = TRUE) + expect_snapshot(check_eval_time_arg(NULL, met_dyn_stc), error = TRUE) + + expect_snapshot(check_eval_time_arg(NULL, met_dyn_int), error = TRUE) + expect_snapshot(check_eval_time_arg(NULL, met_int_stc), error = TRUE) + expect_snapshot(check_eval_time_arg(NULL, met_int_dyn), error = TRUE) + + # ------------------------------------------------------------------------------ + # check inputs with single eval times + + expect_snapshot(check_eval_time_arg(2, met_stc)) + expect_snapshot(check_eval_time_arg(2, met_dyn)) + expect_snapshot(check_eval_time_arg(2, met_int), error = TRUE) + + expect_snapshot(check_eval_time_arg(2, met_stc_dyn)) + expect_snapshot(check_eval_time_arg(2, met_stc_int), error = TRUE) + + expect_snapshot(check_eval_time_arg(2, met_dyn_stc)) + expect_snapshot(check_eval_time_arg(2, met_dyn_int), error = TRUE) + + expect_snapshot(check_eval_time_arg(2, met_int_stc), error = TRUE) + expect_snapshot(check_eval_time_arg(2, met_int_dyn), error = TRUE) + + # ------------------------------------------------------------------------------ + # check inputs with multiple eval times + + expect_snapshot(check_eval_time_arg(1:3, met_stc)) + expect_snapshot(check_eval_time_arg(1:3, met_dyn)) + expect_snapshot(check_eval_time_arg(1:3, met_int)) + + expect_snapshot(check_eval_time_arg(1:3, met_stc_dyn)) + expect_snapshot(check_eval_time_arg(1:3, met_stc_int)) + expect_snapshot(check_eval_time_arg(1:3, met_dyn_stc)) + + expect_snapshot(check_eval_time_arg(1:3, met_dyn_int)) + expect_snapshot(check_eval_time_arg(1:3, met_int_stc)) + expect_snapshot(check_eval_time_arg(1:3, met_int_dyn)) + + # ------------------------------------------------------------------------------ + # resampling + + # no eval time + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc)) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_dyn), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int), error = TRUE) + + expect_snapshot(fit_resamples(wflow, rs, metrics = met_stc_dyn), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_stc_int), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_dyn_stc), error = TRUE) + + expect_snapshot(fit_resamples(wflow, rs, metrics = met_dyn_int), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int_stc), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int_dyn), error = TRUE) + + # one eval time + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc, eval_time = 2)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_dyn, eval_time = 2)) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc_dyn, eval_time = 2)) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_stc_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_dyn_stc, eval_time = 2)) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_dyn_int, eval_time = 2), error = TRUE) + + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int_stc, eval_time = 2), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_int_dyn, eval_time = 2), error = TRUE) + + # multiple eval times + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc, eval_time = 1:3)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_dyn, eval_time = 1:3)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_int, eval_time = 1:3)) + + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc_dyn, eval_time = 1:3)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_stc_int, eval_time = 1:3)) + + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_dyn_stc, eval_time = 1:3)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_dyn_int, eval_time = 1:3)) + + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_int_stc, eval_time = 1:3)) + expect_snapshot(res <- fit_resamples(wflow, rs, metrics = met_int_dyn, eval_time = 1:3)) + + # ------------------------------------------------------------------------------ + # grid tuning (tune bayes tests in extratests repo) + + # no eval time + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc)) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_dyn), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int), error = TRUE) + + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_stc_dyn), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_stc_int), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_dyn_stc), error = TRUE) + + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_dyn_int), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int_stc), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int_dyn), error = TRUE) + + # one eval time + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc, eval_time = 2)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_dyn, eval_time = 2)) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc_dyn, eval_time = 2)) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_stc_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_dyn_stc, eval_time = 2)) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_dyn_int, eval_time = 2), error = TRUE) + + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int_stc, eval_time = 2), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_int_dyn, eval_time = 2), error = TRUE) + + # multiple eval times + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc, eval_time = 1:3)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_dyn, eval_time = 1:3)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_int, eval_time = 1:3)) + + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc_dyn, eval_time = 1:3)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_stc_int, eval_time = 1:3)) + + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_dyn_stc, eval_time = 1:3)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_dyn_int, eval_time = 1:3)) + + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_int_stc, eval_time = 1:3)) + expect_snapshot(res <- tune_grid(wflow_tune, rs, metrics = met_int_dyn, eval_time = 1:3)) + + # ------------------------------------------------------------------------------ + # last fit + + # no eval time + expect_silent(res <- last_fit(wflow, split, metrics = met_stc)) + expect_snapshot(last_fit(wflow, split, metrics = met_dyn), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_int), error = TRUE) + + expect_snapshot(last_fit(wflow, split, metrics = met_stc_dyn), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_stc_int), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_dyn_stc), error = TRUE) + + expect_snapshot(last_fit(wflow, split, metrics = met_dyn_int), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_int_stc), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_int_dyn), error = TRUE) + + # one eval time + expect_snapshot(res <- last_fit(wflow, split, metrics = met_stc, eval_time = 2)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_dyn, eval_time = 2)) + expect_snapshot(last_fit(wflow, split, metrics = met_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- last_fit(wflow, split, metrics = met_stc_dyn, eval_time = 2)) + expect_snapshot(last_fit(wflow, split, metrics = met_stc_int, eval_time = 2), error = TRUE) + + expect_snapshot(res <- last_fit(wflow, split, metrics = met_dyn_stc, eval_time = 2)) + expect_snapshot(last_fit(wflow, split, metrics = met_dyn_int, eval_time = 2), error = TRUE) + + expect_snapshot(last_fit(wflow, split, metrics = met_int_stc, eval_time = 2), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_int_dyn, eval_time = 2), error = TRUE) + + # multiple eval times + expect_snapshot(res <- last_fit(wflow, split, metrics = met_stc, eval_time = 1:3)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_dyn, eval_time = 1:3)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_int, eval_time = 1:3)) + + expect_snapshot(res <- last_fit(wflow, split, metrics = met_stc_dyn, eval_time = 1:3)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_stc_int, eval_time = 1:3)) + + expect_snapshot(res <- last_fit(wflow, split, metrics = met_dyn_stc, eval_time = 1:3)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_dyn_int, eval_time = 1:3)) + + expect_snapshot(res <- last_fit(wflow, split, metrics = met_int_stc, eval_time = 1:3)) + expect_snapshot(res <- last_fit(wflow, split, metrics = met_int_dyn, eval_time = 1:3)) + + +}) + diff --git a/tests/testthat/test-survival-tune-metric-arguments.R b/tests/testthat/test-survival-tune-metric-arguments.R new file mode 100644 index 0000000..89fdbad --- /dev/null +++ b/tests/testthat/test-survival-tune-metric-arguments.R @@ -0,0 +1,60 @@ + +test_that("metric inputs are checked for censored regression models", { + skip_if_not_installed("censored") + library(parsnip) + library(workflows) + library(yardstick) + library(rsample) + library(censored) + + stanford2$event_time <- Surv(stanford2$time, stanford2$status) + stanford2 <- stanford2[, c("event_time", "age")] + + wflow <- workflow(event_time ~ age, survival_reg()) + sr_spec <- survival_reg(dist = tune()) + wflow_tune <- workflow(event_time ~ age, sr_spec) + + set.seed(1) + split <- initial_split(stanford2) + rs <- vfold_cv(stanford2) + + # ------------------------------------------------------------------------------ + # setup metric sets + + met_srv <- metric_set(concordance_survival) + met_reg <- metric_set(rmse) + met_cls <- metric_set(brier_class) + + # ------------------------------------------------------------------------------ + # check inputs + + expect_snapshot(check_metrics_arg(NULL, wflow)) + + expect_snapshot(check_metrics_arg(met_reg, wflow), error = TRUE) + expect_snapshot(check_metrics_arg(met_cls, wflow), error = TRUE) + expect_snapshot(check_metrics_arg(met_srv, wflow)) + + # ------------------------------------------------------------------------------ + # resampling + + expect_snapshot(fit_resamples(wflow, rs, metrics = met_cls), error = TRUE) + expect_snapshot(fit_resamples(wflow, rs, metrics = met_reg), error = TRUE) + + # ------------------------------------------------------------------------------ + # tuning + + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_cls), error = TRUE) + expect_snapshot(tune_grid(wflow_tune, rs, metrics = met_reg), error = TRUE) + + expect_snapshot(tune_bayes(wflow_tune, rs, metrics = met_cls), error = TRUE) + expect_snapshot(tune_bayes(wflow_tune, rs, metrics = met_reg), error = TRUE) + + # ------------------------------------------------------------------------------ + # final fit + + expect_snapshot(last_fit(wflow, split, metrics = met_cls), error = TRUE) + expect_snapshot(last_fit(wflow, split, metrics = met_reg), error = TRUE) + +}) + +