diff --git a/NAMESPACE b/NAMESPACE index 855c65b5c..5c0835022 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -54,6 +54,7 @@ S3method(setup_approach,timeseries) S3method(setup_approach,vaeac) export(additional_regression_setup) export(aicc_full_single_cpp) +export(append_vS_list) export(check_convergence) export(cli_compute_vS) export(cli_iter) diff --git a/R/approach_vaeac.R b/R/approach_vaeac.R index da3f17555..2eff261c3 100644 --- a/R/approach_vaeac.R +++ b/R/approach_vaeac.R @@ -2531,7 +2531,7 @@ Last epoch: %d. \tVLB = %.3f \tIWAE = %.3f \tIWAE_running = %.3f\n", #' x_explain = x_explain, #' x_train = x_train, #' approach = approach, -#' prediction_zero = p0, +#' phi0 = p0, #' n_MC_samples = 1, # As we are only interested in the training of the vaeac #' vaeac.epochs = 10, # Should be higher in applications. #' vaeac.n_vaeacs_initialize = 1, @@ -2545,7 +2545,7 @@ Last epoch: %d. \tVLB = %.3f \tIWAE = %.3f \tIWAE_running = %.3f\n", #' x_explain = x_explain, #' x_train = x_train, #' approach = approach, -#' prediction_zero = p0, +#' phi0 = p0, #' n_MC_samples = 1, # As we are only interested in the training of the vaeac #' vaeac.epochs = 10, # Should be higher in applications. #' vaeac.width = 16, @@ -2766,7 +2766,7 @@ vaeac_plot_eval_crit <- function(explanation_list, #' x_explain = x_explain, #' x_train = x_train, #' approach = "vaeac", -#' prediction_zero = mean(y_train), +#' phi0 = mean(y_train), #' n_MC_samples = 1, #' vaeac.epochs = 10, #' vaeac.n_vaeacs_initialize = 1 diff --git a/R/cli.R b/R/cli.R index 5f2643add..694c7f7cd 100644 --- a/R/cli.R +++ b/R/cli.R @@ -80,7 +80,6 @@ cli_startup <- function(internal, model_class, verbose) { #' @export #' @keywords internal cli_compute_vS <- function(internal) { - verbose <- internal$parameters$verbose approach <- internal$parameters$approach diff --git a/R/compute_estimates.R b/R/compute_estimates.R index 34c14c826..05b7fd1d8 100644 --- a/R/compute_estimates.R +++ b/R/compute_estimates.R @@ -76,11 +76,11 @@ compute_estimates <- function(internal, vS_list) { #' @keywords internal postprocess_vS_list <- function(vS_list, internal) { keep_samp_for_vS <- internal$parameters$output_args$keep_samp_for_vS - prediction_zero <- internal$parameters$prediction_zero + phi0 <- internal$parameters$phi0 n_explain <- internal$parameters$n_explain # Appending the zero-prediction to the list - dt_vS0 <- as.data.table(rbind(c(1, rep(prediction_zero, n_explain)))) + dt_vS0 <- as.data.table(rbind(c(1, rep(phi0, n_explain)))) # Extracting/merging the data tables from the batch running # TODO: Need a memory and speed optimized way to transform the output form dt_vS_list to two different lists, diff --git a/R/compute_vS.R b/R/compute_vS.R index e6345e4a9..321a391d3 100644 --- a/R/compute_vS.R +++ b/R/compute_vS.R @@ -230,7 +230,6 @@ compute_MCint <- function(dt, pred_cols = "p_hat") { #' @export #' @keywords internal append_vS_list <- function(vS_list, internal) { - iter <- length(internal$iter_list) # Adds v_S output above to any vS_list already computed @@ -243,17 +242,17 @@ append_vS_list <- function(vS_list, internal) { # Creates a mapper from the last id_coalition to the new id_coalition numbering id_coalitions_mapper <- merge(prev_coalition_map, - current_coalition_map, - by = "coalitions_str", - suffixes = c("", "_new") + current_coalition_map, + by = "coalitions_str", + suffixes = c("", "_new") ) prev_vS_list_new <- list() # Applies the mapper to update the prev_vS_list ot the new id_coalition numbering for (k in seq_along(prev_vS_list)) { prev_vS_list_new[[k]] <- merge(prev_vS_list[[k]], - id_coalitions_mapper[, .(id_coalition, id_coalition_new)], - by = "id_coalition" + id_coalitions_mapper[, .(id_coalition, id_coalition_new)], + by = "id_coalition" ) prev_vS_list_new[[k]][, id_coalition := id_coalition_new] prev_vS_list_new[[k]][, id_coalition_new := NULL] @@ -263,5 +262,4 @@ append_vS_list <- function(vS_list, internal) { vS_list <- c(prev_vS_list_new, vS_list) } return(vS_list) - } diff --git a/R/explain.R b/R/explain.R index 2381eb2a8..caaaf0743 100644 --- a/R/explain.R +++ b/R/explain.R @@ -21,7 +21,7 @@ #' `"categorical"`, `"timeseries"`, `"independence"`, `"regression_separate"`, or `"regression_surrogate"`. #' The two regression approaches can not be combined with any other approach. See details for more information. #' -#' @param prediction_zero Numeric. +#' @param phi0 Numeric. #' The prediction value for unseen data, i.e. an estimate of the expected prediction without conditioning on any #' features. #' Typically we set this value equal to the mean of the response variable in our training data, but other choices @@ -211,7 +211,7 @@ #' \describe{ #' \item{shapley_values_est}{data.table with the estimated Shapley values with explained observation in the rows and #' features along the columns. -#' The column `none` is the prediction not devoted to any of the features (given by the argument `prediction_zero`)} +#' The column `none` is the prediction not devoted to any of the features (given by the argument `phi0`)} #' \item{shapley_values_sd}{data.table with the standard deviation of the Shapley values reflecting the uncertainty. #' Note that this only reflects the coalition sampling part of the kernelSHAP procedure, and is therefore by #' definition 0 when all coalitions is used. @@ -269,7 +269,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "empirical", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -279,7 +279,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -289,7 +289,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "copula", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -299,7 +299,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "ctree", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -310,7 +310,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = approach, -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -332,7 +332,7 @@ #' x_train = x_train, #' group = group_list, #' approach = "empirical", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' print(explain_groups$shapley_values_est) @@ -347,7 +347,7 @@ #' model = model, #' x_explain = x_explain, #' x_train = x_train, -#' prediction_zero = p, +#' phi0 = p, #' approach = "regression_separate", #' regression.model = parsnip::linear_reg() #' ) @@ -356,7 +356,7 @@ #' model = model, #' x_explain = x_explain, #' x_train = x_train, -#' prediction_zero = p, +#' phi0 = p, #' approach = "regression_surrogate", #' regression.model = parsnip::linear_reg() #' ) @@ -370,7 +370,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2, #' iterative = TRUE, #' iterative_args = list(initial_n_coalitions = 10) @@ -395,7 +395,7 @@ explain <- function(model, x_explain, x_train, approach, - prediction_zero, + phi0, iterative = NULL, max_n_coalitions = NULL, group = NULL, @@ -433,7 +433,7 @@ explain <- function(model, x_explain = x_explain, approach = approach, paired_shap_sampling = paired_shap_sampling, - prediction_zero = prediction_zero, + phi0 = phi0, max_n_coalitions = max_n_coalitions, group = group, n_MC_samples = n_MC_samples, diff --git a/R/explain_forecast.R b/R/explain_forecast.R index 36aaddbae..eeaff7ca3 100644 --- a/R/explain_forecast.R +++ b/R/explain_forecast.R @@ -79,7 +79,7 @@ #' explain_y_lags = 2, #' horizon = 3, #' approach = "empirical", -#' prediction_zero = p0_ar, +#' phi0 = p0_ar, #' group_lags = FALSE #' ) #' @@ -93,7 +93,7 @@ explain_forecast <- function(model, explain_xreg_lags = explain_y_lags, horizon, approach, - prediction_zero, + phi0, max_n_coalitions = NULL, iterative = NULL, iterative_args = list(), @@ -125,7 +125,7 @@ explain_forecast <- function(model, # Checks data/model compatability internal <- setup( approach = approach, - prediction_zero = prediction_zero, + phi0 = phi0, output_size = horizon, max_n_coalitions = max_n_coalitions, n_MC_samples = n_MC_samples, diff --git a/R/plot.R b/R/plot.R index 9bf19d2b7..a206d1951 100644 --- a/R/plot.R +++ b/R/plot.R @@ -101,7 +101,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "empirical", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -151,7 +151,7 @@ #' x_explain = x_explain, #' x_train = x_train, #' approach = "ctree", -#' prediction_zero = p, +#' phi0 = p, #' n_MC_samples = 1e2 #' ) #' @@ -299,7 +299,7 @@ plot.shapr <- function(x, # compute start and end values for waterfall rectangles data.table::setorder(dt_plot, rank_waterfall) dt_plot[, end := cumsum(phi), by = id] - expected <- x$internal$parameters$prediction_zero + expected <- x$internal$parameters$phi0 dt_plot[, start := c(expected, head(end, -1)), by = id] dt_plot[, phi_significant := format(phi, digits = digits), by = id] @@ -895,7 +895,7 @@ make_waterfall_plot <- function(dt_plot, #' ) #' #' # Specifying the phi_0, i.e. the expected prediction without any features -#' prediction_zero <- mean(y_train) +#' phi0 <- mean(y_train) #' #' # Independence approach #' explanation_independence <- explain( @@ -903,7 +903,7 @@ make_waterfall_plot <- function(dt_plot, #' x_explain = x_explain, #' x_train = x_train, #' approach = "independence", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -913,7 +913,7 @@ make_waterfall_plot <- function(dt_plot, #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e1 #' ) #' @@ -923,7 +923,7 @@ make_waterfall_plot <- function(dt_plot, #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -933,7 +933,7 @@ make_waterfall_plot <- function(dt_plot, #' x_explain = x_explain, #' x_train = x_train, #' approach = "ctree", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -943,7 +943,7 @@ make_waterfall_plot <- function(dt_plot, #' x_explain = x_explain, #' x_train = x_train, #' approach = c("gaussian", "independence", "ctree"), -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -1452,7 +1452,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' ) #' #' # Specifying the phi_0, i.e. the expected prediction without any features -#' prediction_zero <- mean(y_train) +#' phi0 <- mean(y_train) #' #' # Independence approach #' explanation_independence <- explain( @@ -1460,7 +1460,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' x_explain = x_explain, #' x_train = x_train, #' approach = "independence", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -1470,7 +1470,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' x_explain = x_explain, #' x_train = x_train, #' approach = "empirical", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -1480,7 +1480,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e1 #' ) #' @@ -1490,7 +1490,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' x_explain = x_explain, #' x_train = x_train, #' approach = "gaussian", -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' @@ -1500,7 +1500,7 @@ make_MSEv_coalition_plots <- function(MSEv_coalition_dt, #' x_explain = x_explain, #' x_train = x_train, #' approach = c("gaussian", "ctree", "empirical"), -#' prediction_zero = prediction_zero, +#' phi0 = phi0, #' n_MC_samples = 1e2 #' ) #' diff --git a/R/setup.R b/R/setup.R index be00abb95..904c7cdec 100644 --- a/R/setup.R +++ b/R/setup.R @@ -27,7 +27,7 @@ setup <- function(x_train, x_explain, approach, paired_shap_sampling = TRUE, - prediction_zero, + phi0, output_size = 1, max_n_coalitions, group, @@ -76,7 +76,7 @@ setup <- function(x_train, internal$parameters <- get_parameters( approach = approach, paired_shap_sampling = paired_shap_sampling, - prediction_zero = prediction_zero, + phi0 = phi0, output_size = output_size, max_n_coalitions = max_n_coalitions, group = group, @@ -163,7 +163,7 @@ get_prev_internal <- function(prev_shapr_object, #' @keywords internal get_parameters <- function(approach, paired_shap_sampling, - prediction_zero, + phi0, output_size = 1, max_n_coalitions, group, @@ -290,12 +290,12 @@ get_parameters <- function(approach, if (!is.null(causal_ordering) && !is.list(causal_ordering)) stop("`causal_ordering` must be a list.\n") #### Tests combining more than one parameter #### - # prediction_zero vs output_size - if (!all((is.numeric(prediction_zero)) && - all(length(prediction_zero) == output_size) && - all(!is.na(prediction_zero)))) { + # phi0 vs output_size + if (!all((is.numeric(phi0)) && + all(length(phi0) == output_size) && + all(!is.na(phi0)))) { stop(paste0( - "`prediction_zero` (", paste0(prediction_zero, collapse = ", "), + "`phi0` (", paste0(phi0, collapse = ", "), ") must be numeric and match the output size of the model (", paste0(output_size, collapse = ", "), ")." )) @@ -315,7 +315,7 @@ get_parameters <- function(approach, parameters <- list( approach = approach, paired_shap_sampling = paired_shap_sampling, - prediction_zero = prediction_zero, + phi0 = phi0, max_n_coalitions = max_n_coalitions, group = group, n_MC_samples = n_MC_samples, diff --git a/README.Rmd b/README.Rmd index d7511c54a..db17b1395 100644 --- a/README.Rmd +++ b/README.Rmd @@ -171,7 +171,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) # Printing the Shapley values for the test data. diff --git a/README.md b/README.md index 203526d60..d5b68201b 100644 --- a/README.md +++ b/README.md @@ -227,7 +227,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. diff --git a/inst/scripts/Beeswarm_illustration.R b/inst/scripts/Beeswarm_illustration.R index 83fedf42f..72b61cce7 100644 --- a/inst/scripts/Beeswarm_illustration.R +++ b/inst/scripts/Beeswarm_illustration.R @@ -498,7 +498,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 10, # Do not need precise Shapley values to illustrate the behaviour of beeswarm plot n_MC_samples = 10 # Do not need precise Shapley values to illustrate the behaviour of beeswarm plot ) diff --git a/inst/scripts/Compare_Conditional_and_Causal_Categorical.R b/inst/scripts/Compare_Conditional_and_Causal_Categorical.R index 502b16e47..f30efa475 100644 --- a/inst/scripts/Compare_Conditional_and_Causal_Categorical.R +++ b/inst/scripts/Compare_Conditional_and_Causal_Categorical.R @@ -33,7 +33,7 @@ causal_independence <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "independence", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -47,7 +47,7 @@ causal_categorical <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -63,7 +63,7 @@ causal_ctree <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -79,7 +79,7 @@ causal_vaeac <- explain( x_train = x_train_categorical, approach = "vaeac", vaeac.epochs = 20, - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -102,7 +102,7 @@ conditional_independence <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "independence", - prediction_zero = p0, + phi0 = p0, # asymmetric = FALSE, # causal_ordering = list(3:4, 2, 1), # confounding = c(TRUE, FALSE, FALSE), @@ -117,7 +117,7 @@ conditional_categorical <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, # asymmetric = FALSE, # causal_ordering = list(3:4, 2, 1), # confounding = c(TRUE, FALSE, FALSE), @@ -133,7 +133,7 @@ conditional_ctree <- explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "ctree", - prediction_zero = p0, + phi0 = p0, # asymmetric = FALSE, # causal_ordering = list(3:4, 2, 1), # confounding = c(TRUE, FALSE, FALSE), @@ -149,7 +149,7 @@ conditional_vaeac <- explain( x_train = x_train_categorical, approach = "vaeac", vaeac.epochs = 20, - prediction_zero = p0, + phi0 = p0, # asymmetric = FALSE, # causal_ordering = list(3:4, 2, 1), # confounding = c(TRUE, FALSE, FALSE), diff --git a/inst/scripts/Compare_categorical_prepare_data.R b/inst/scripts/Compare_categorical_prepare_data.R index c0c4ab340..dd913ee4d 100644 --- a/inst/scripts/Compare_categorical_prepare_data.R +++ b/inst/scripts/Compare_categorical_prepare_data.R @@ -452,7 +452,7 @@ explanation = explain( x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, n_batches = 1, timing = FALSE ) diff --git a/inst/scripts/Heskes_bike_rental_illustration.R b/inst/scripts/Heskes_bike_rental_illustration.R index 9fb9d2722..e74e48ce9 100644 --- a/inst/scripts/Heskes_bike_rental_illustration.R +++ b/inst/scripts/Heskes_bike_rental_illustration.R @@ -51,7 +51,7 @@ save_plots <- FALSE #' dat_A, #' approach = "gaussian", #' explainer = explainer, -#' prediction_zero = y_mean +#' phi0 = y_mean #' ) #' sina_plot(explanation_classic) #' @@ -59,7 +59,7 @@ save_plots <- FALSE #' dat_A, #' approach = "causal", #' explainer = explainer, -#' prediction_zero = y_mean, +#' phi0 = y_mean, #' ordering = list(1, c(2, 3)) #' ) #' sina_plot(explanation_causal) @@ -195,7 +195,7 @@ model <- xgboost( ) # caret::RMSE(y_explain, predict(model, x_explain)) sqrt(mean((predict(model, x_explain) - y_explain)^2)) -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) message("1. Prepared and plotted data, trained XGBoost model") @@ -209,7 +209,7 @@ explanation_gaussian_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1:7), confounding = FALSE, @@ -236,7 +236,7 @@ explanation_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -284,7 +284,7 @@ explanation_marginal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1:7), confounding = FALSE, @@ -331,7 +331,7 @@ explanation_asymmetric_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = FALSE, @@ -376,7 +376,7 @@ explanation_asymmetric_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -602,7 +602,7 @@ explanation_asymmetric_causal_gaussian_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -622,7 +622,7 @@ explanation_asymmetric_causal_copula_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "copula", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -642,7 +642,7 @@ explanation_asymmetric_causal_ctree_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -663,7 +663,7 @@ explanation_asymmetric_causal_independence_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -683,7 +683,7 @@ explanation_asymmetric_causal_empirical_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "empirical", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -703,7 +703,7 @@ explanation_asymmetric_causal_vaeac_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -744,7 +744,7 @@ explanation_asymmetric_all_gaussian2 <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = FALSE, @@ -773,7 +773,7 @@ explanation_asymmetric_gaussian <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = FALSE, @@ -800,7 +800,7 @@ explanation_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -821,7 +821,7 @@ explanation_causal_time_sampled = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, c(2, 3), c(4:7)), confounding = c(FALSE, TRUE, FALSE), @@ -869,7 +869,7 @@ explanation_group_asymmetric_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, 2:3, 4:6), confounding = c(FALSE, TRUE, FALSE), @@ -892,7 +892,7 @@ explanation_group_asymmetric_causal_sampled_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, 2:3, 4:6), confounding = confounding, @@ -914,7 +914,7 @@ explanation_group_symmetric_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1, 2:3, 4:6), #FORTSETT HER MED Å ENDRE OG SE HVA SOM KRÆSJER confounding = confounding, @@ -933,7 +933,7 @@ explanation_group_symmetric_causal_sampled_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = causal_ordering, confounding = confounding, @@ -955,7 +955,7 @@ explanation_group_symmetric_conditional_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = NULL, confounding = FALSE, @@ -974,7 +974,7 @@ explanation_group_symmetric_conditional_sampled_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = NULL, confounding = FALSE, @@ -994,7 +994,7 @@ explanation_group_asymmetric_conditional_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(seq_along(group_list)), confounding = FALSE, @@ -1014,7 +1014,7 @@ explanation_group_asymmetric_causal_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering, confounding = c(FALSE, TRUE, FALSE), @@ -1039,7 +1039,7 @@ explanation_group_asymmetric_conditional_sampled_time = system.time({ x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering, confounding = FALSE, diff --git a/inst/scripts/check_model_workflow.R b/inst/scripts/check_model_workflow.R index ef76a36ff..296c090ae 100644 --- a/inst/scripts/check_model_workflow.R +++ b/inst/scripts/check_model_workflow.R @@ -50,7 +50,7 @@ explain_workflow = explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) @@ -59,7 +59,7 @@ explain_xgboost = explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) @@ -103,7 +103,7 @@ explain_decision_tree_ctree = explain( x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) @@ -113,7 +113,7 @@ explain_decision_tree_lm = explain( x_train = x_train_mixed, approach = "regression_separate", regression.model = parsnip::linear_reg(), - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) @@ -149,7 +149,7 @@ explain_decision_model_rf_cv_rf = explain( x_train = x_train_mixed, approach = "regression_separate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression"), - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) @@ -159,7 +159,7 @@ explain_decision_model_rf_cv_ctree = explain( x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, n_batches = 4 ) diff --git a/inst/scripts/compare_copula_in_R_and_C++.R b/inst/scripts/compare_copula_in_R_and_C++.R index d0698dbf2..f3811c7fa 100644 --- a/inst/scripts/compare_copula_in_R_and_C++.R +++ b/inst/scripts/compare_copula_in_R_and_C++.R @@ -906,7 +906,7 @@ arma::cube prepare_data_copula_cpp_and_R(arma::mat MC_samples_mat, predictive_model <- lm(y ~ ., data = data_train_with_response) # Get the prediction zero, i.e., the phi0 Shapley value. - prediction_zero <- mean(response_train) + phi0 <- mean(response_train) model <- predictive_model x_explain <- data_test @@ -925,7 +925,7 @@ arma::cube prepare_data_copula_cpp_and_R(arma::mat MC_samples_mat, x_train = x_train, x_explain = x_explain, approach = approach, - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = n_coalitions, group = group, n_samples = n_samples, @@ -1400,7 +1400,7 @@ all.equal(shapr_mat_arma_res, sourceCpp_mat_arma_res) predictive_model <- lm(y ~ ., data = data_train_with_response) # Get the prediction zero, i.e., the phi0 Shapley value. - prediction_zero <- mean(response_train) + phi0 <- mean(response_train) model <- predictive_model x_explain <- data_test @@ -1419,7 +1419,7 @@ all.equal(shapr_mat_arma_res, sourceCpp_mat_arma_res) x_train = x_train, x_explain = x_explain, approach = approach, - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = n_coalitions, group = group, n_samples = n_samples, @@ -1511,7 +1511,7 @@ temp_shapley_value_func = function(dt, internal, model, predict_model) { xreg = internal$data$xreg ) dt_vS2 <- compute_MCint(dt, paste0("p_hat", seq_len(internal$parameters$output_size))) - dt_vS <- rbind(t(as.matrix(c(1, rep(prediction_zero, n_test)))), dt_vS2, t(as.matrix(c(2^M, response_test))), + dt_vS <- rbind(t(as.matrix(c(1, rep(phi0, n_test)))), dt_vS2, t(as.matrix(c(2^M, response_test))), use.names = FALSE) colnames(dt_vS) = colnames(dt_vS2) compute_shapley_new(internal, dt_vS) diff --git a/inst/scripts/compare_gaussian_in_R_and_C++.R b/inst/scripts/compare_gaussian_in_R_and_C++.R index 0106109a6..b358c9127 100644 --- a/inst/scripts/compare_gaussian_in_R_and_C++.R +++ b/inst/scripts/compare_gaussian_in_R_and_C++.R @@ -2289,7 +2289,7 @@ prepare_data_gaussian_new_v6 <- function(internal, index_features, ...) { predictive_model <- lm(y ~ ., data = data_train_with_response) # Get the prediction zero, i.e., the phi0 Shapley value. - prediction_zero <- mean(response_train) + phi0 <- mean(response_train) model <- predictive_model x_explain <- data_test @@ -2308,7 +2308,7 @@ prepare_data_gaussian_new_v6 <- function(internal, index_features, ...) { x_train = x_train, x_explain = x_explain, approach = approach, - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = n_coalitions, group = group, n_samples = n_samples, diff --git a/inst/scripts/compare_shap_python.R b/inst/scripts/compare_shap_python.R index 6a4ed7787..ebc39e2c3 100644 --- a/inst/scripts/compare_shap_python.R +++ b/inst/scripts/compare_shap_python.R @@ -47,12 +47,12 @@ time_R_prepare <- proc.time() # Computing the actual Shapley values with kernelSHAP accounting for feature dependence using # the empirical (conditional) distribution approach with bandwidth parameter sigma = 0.1 (default) -explanation_independence <- explain(x_test, explainer, approach = "independence", prediction_zero = p0) +explanation_independence <- explain(x_test, explainer, approach = "independence", phi0 = p0) time_R_indep0 <- proc.time() explanation_largesigma <- explain(x_test, explainer, approach = "empirical", type = "fixed_sigma", - fixed_sigma_vec = 10000, w_threshold = 1, prediction_zero = p0) + fixed_sigma_vec = 10000, w_threshold = 1, phi0 = p0) time_R_largesigma0 <- proc.time() diff --git a/inst/scripts/compare_shap_python_new.R b/inst/scripts/compare_shap_python_new.R index b8a0a2b33..5e51120f4 100644 --- a/inst/scripts/compare_shap_python_new.R +++ b/inst/scripts/compare_shap_python_new.R @@ -40,14 +40,14 @@ time_R_start <- proc.time() # Computing the actual Shapley values with kernelSHAP accounting for feature dependence using # the empirical (conditional) distribution approach with bandwidth parameter sigma = 0.1 (default) explanation_independence <- explain(model = model,x_explain = x_test,x_train=x_train, - approach = "independence", prediction_zero = p0,n_batches = 1) + approach = "independence", phi0 = p0,n_batches = 1) time_R_indep0 <- proc.time() explanation_largesigma <- explain(model = model,x_explain = x_test,x_train=x_train, approach = "empirical",empirical.type="fixed_sigma",empirical.fixed_sigma=10000,empirical.eta=1, - prediction_zero = p0,n_batches=1) + phi0 = p0,n_batches=1) time_R_largesigma0 <- proc.time() diff --git a/inst/scripts/devel/Rscript_test_shapr.R b/inst/scripts/devel/Rscript_test_shapr.R index 8f8b5a504..03380a6ed 100644 --- a/inst/scripts/devel/Rscript_test_shapr.R +++ b/inst/scripts/devel/Rscript_test_shapr.R @@ -62,7 +62,7 @@ sys_time_start_shapr <- Sys.time() explainer <- shapr(x_train, model) sys_time_end_shapr <- Sys.time() -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) n_batches_use <- min(nrow(explainer$S),n_batches) @@ -73,7 +73,7 @@ explanation <- explain( x_test, approach = approach, explainer = explainer, - prediction_zero = prediction_zero, + phi0 = phi0, n_batches = n_batches_use ) sys_time_end_explain <- Sys.time() diff --git a/inst/scripts/devel/compare_explain_batch.R b/inst/scripts/devel/compare_explain_batch.R index 45473a747..48544bd80 100644 --- a/inst/scripts/devel/compare_explain_batch.R +++ b/inst/scripts/devel/compare_explain_batch.R @@ -25,13 +25,13 @@ model <- xgboost( library(shapr) explainer <- shapr(x_train, model,n_coalitions = 100) p = mean(y_train) -gauss = explain(x_test, explainer, "gaussian", prediction_zero = p, n_samples = 10000) -emp = explain(x_test, explainer, "empirical", prediction_zero = p, n_samples = 10000) -copula = explain(x_test, explainer, "copula", prediction_zero = p, n_samples = 10000) -indep = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000) -comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), prediction_zero = p, n_samples = 10000) -ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, prediction_zero = p, n_samples = 10000) -ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), prediction_zero = p, n_samples = 10000) +gauss = explain(x_test, explainer, "gaussian", phi0 = p, n_samples = 10000) +emp = explain(x_test, explainer, "empirical", phi0 = p, n_samples = 10000) +copula = explain(x_test, explainer, "copula", phi0 = p, n_samples = 10000) +indep = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000) +comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), phi0 = p, n_samples = 10000) +ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, phi0 = p, n_samples = 10000) +ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), phi0 = p, n_samples = 10000) #saveRDS(list(gauss = gauss, empirical = emp, copula = copula, indep = indep, comb = comb, ctree = ctree, ctree_comb = ctree2), file = "inst/scripts/devel/master_res2.rds") # saveRDS(list(ctree = ctree, ctree_comb = ctree2), file = "inst/scripts/devel/master_res_ctree.rds") @@ -42,13 +42,13 @@ nobs = 6 x_test <- as.matrix(Boston[1:nobs, x_var]) explainer <- shapr(x_train, model,n_coalitions = 100) p = mean(y_train) -gauss = explain(x_test, explainer, "gaussian", prediction_zero = p, n_samples = 10000, n_batches = 1) -emp = explain(x_test, explainer, "empirical", prediction_zero = p, n_samples = 10000, n_batches = 1) -copula = explain(x_test, explainer, "copula", prediction_zero = p, n_samples = 10000, n_batches = 1) -indep = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000, n_batches = 1) -comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), prediction_zero = p, n_samples = 10000, n_batches = 1) -ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, prediction_zero = p, n_samples = 10000, n_batches = 1) -ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), prediction_zero = p, n_samples = 10000, n_batches = 1) +gauss = explain(x_test, explainer, "gaussian", phi0 = p, n_samples = 10000, n_batches = 1) +emp = explain(x_test, explainer, "empirical", phi0 = p, n_samples = 10000, n_batches = 1) +copula = explain(x_test, explainer, "copula", phi0 = p, n_samples = 10000, n_batches = 1) +indep = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000, n_batches = 1) +comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), phi0 = p, n_samples = 10000, n_batches = 1) +ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, phi0 = p, n_samples = 10000, n_batches = 1) +ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), phi0 = p, n_samples = 10000, n_batches = 1) res = readRDS("inst/scripts/devel/master_res2.rds") @@ -60,8 +60,8 @@ res$comb$dt comb$dt # With batches -gauss_b = explain(x_test, explainer, "gaussian", prediction_zero = p, n_samples = 10000, n_batches = 3) -emp_b = explain(x_test, explainer, "empirical", prediction_zero = p, n_samples = 10000, n_batches = 3) +gauss_b = explain(x_test, explainer, "gaussian", phi0 = p, n_samples = 10000, n_batches = 3) +emp_b = explain(x_test, explainer, "empirical", phi0 = p, n_samples = 10000, n_batches = 3) gauss_b$dt res$gauss$dt @@ -71,7 +71,7 @@ res$empirical$dt #### MJ stuff here: -explain.independence2 <- function(x, explainer, approach, prediction_zero, +explain.independence2 <- function(x, explainer, approach, phi0, n_samples = 1e3, n_batches = 1, seed = 1, only_return_contrib_dt = FALSE, ...) { @@ -82,7 +82,7 @@ explain.independence2 <- function(x, explainer, approach, prediction_zero, explainer$approach <- approach explainer$n_samples <- n_samples - r <- prepare_and_predict(explainer, n_batches, prediction_zero, only_return_contrib_dt, ...) + r <- prepare_and_predict(explainer, n_batches, phi0, only_return_contrib_dt, ...) } @@ -137,36 +137,36 @@ prepare_data.independence2 <- function(x, index_features = NULL, ...) { # Using independence with n_samples > nrow(x_train) such that no sampling is performed -indep1 = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000, n_batches = 1) -indep2 = explain(x_test, explainer, "independence2", prediction_zero = p, n_samples = 10000, n_batches = 1) +indep1 = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000, n_batches = 1) +indep2 = explain(x_test, explainer, "independence2", phi0 = p, n_samples = 10000, n_batches = 1) all.equal(indep1,indep2) # TRUE -indep1_batch_2 = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000, n_batches = 2) +indep1_batch_2 = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000, n_batches = 2) all.equal(indep1,indep1_batch_2) # TRUE -indep1_batch_5 = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000, n_batches = 5) +indep1_batch_5 = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000, n_batches = 5) all.equal(indep1,indep1_batch_5) # TRUE -comb_indep_1_batch_1 = explain(x_test, explainer, c("independence", "independence", "independence", "independence"), prediction_zero = p, n_samples = 10000, n_batches = 1) +comb_indep_1_batch_1 = explain(x_test, explainer, c("independence", "independence", "independence", "independence"), phi0 = p, n_samples = 10000, n_batches = 1) all.equal(indep1,comb_indep_1_batch_1) # TRUE -comb_indep_1_batch_2 = explain(x_test, explainer, c("independence", "independence", "independence", "independence"), prediction_zero = p, n_samples = 10000, n_batches = 2) +comb_indep_1_batch_2 = explain(x_test, explainer, c("independence", "independence", "independence", "independence"), phi0 = p, n_samples = 10000, n_batches = 2) all.equal(indep1,comb_indep_1_batch_2) # TRUE -comb_indep_1_2_batch_1 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), prediction_zero = p, n_samples = 10000, n_batches = 1) +comb_indep_1_2_batch_1 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), phi0 = p, n_samples = 10000, n_batches = 1) all.equal(indep1,comb_indep_1_2_batch_1) #TRUE -comb_indep_1_2_batch_2 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), prediction_zero = p, n_samples = 10000, n_batches = 2) +comb_indep_1_2_batch_2 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), phi0 = p, n_samples = 10000, n_batches = 2) all.equal(indep1,comb_indep_1_2_batch_2) #TRUE -comb_indep_1_2_batch_5 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), prediction_zero = p, n_samples = 10000, n_batches = 5) +comb_indep_1_2_batch_5 = explain(x_test, explainer, c("independence", "independence", "independence2", "independence2"), phi0 = p, n_samples = 10000, n_batches = 5) all.equal(indep1,comb_indep_1_2_batch_5) #TRUE diff --git a/inst/scripts/devel/compare_indep_implementations.R b/inst/scripts/devel/compare_indep_implementations.R index a508e2d1e..ae035b492 100644 --- a/inst/scripts/devel/compare_indep_implementations.R +++ b/inst/scripts/devel/compare_indep_implementations.R @@ -37,7 +37,7 @@ explanation_old <- explain( approach = "empirical", type = "independence", explainer = explainer, - prediction_zero = p, seed=111,n_samples = 100 + phi0 = p, seed=111,n_samples = 100 ) print(proc.time()-t_old) #user system elapsed @@ -48,7 +48,7 @@ explanation_new <- explain( x_test, approach = "independence", explainer = explainer, - prediction_zero = p,seed = 111,n_samples = 100 + phi0 = p,seed = 111,n_samples = 100 ) print(proc.time()-t_new) #user system elapsed @@ -69,7 +69,7 @@ explanation_full_old <- explain( approach = "empirical", type = "independence", explainer = explainer, - prediction_zero = p, seed=111 + phi0 = p, seed=111 ) print(proc.time()-t_old) #user system elapsed @@ -80,7 +80,7 @@ explanation_full_new <- explain( x_test, approach = "independence", explainer = explainer, - prediction_zero = p,seed = 111 + phi0 = p,seed = 111 ) print(proc.time()-t_new) #user system elapsed diff --git a/inst/scripts/devel/demonstrate_combined_approaches_bugs.R b/inst/scripts/devel/demonstrate_combined_approaches_bugs.R index 57e5b9f44..bafa0bab3 100644 --- a/inst/scripts/devel/demonstrate_combined_approaches_bugs.R +++ b/inst/scripts/devel/demonstrate_combined_approaches_bugs.R @@ -10,7 +10,7 @@ explanation_1 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula", "empirical"), - prediction_zero = p0, + phi0 = p0, n_batches = 3, timing = FALSE, seed = 1) @@ -42,7 +42,7 @@ explanation_2 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "ctree", "ctree", "ctree" ,"ctree"), - prediction_zero = p0, + phi0 = p0, n_batches = 2, timing = FALSE, seed = 1) @@ -62,7 +62,7 @@ explanation_3 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "ctree", "ctree", "ctree" ,"ctree"), - prediction_zero = p0, + phi0 = p0, n_batches = 15, timing = FALSE, seed = 1) @@ -93,7 +93,7 @@ explanation_combined_1 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula", "empirical"), - prediction_zero = p0, + phi0 = p0, timing = FALSE, seed = 1) @@ -102,7 +102,7 @@ explanation_combined_2 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula", "empirical"), - prediction_zero = p0, + phi0 = p0, timing = FALSE, seed = 1) @@ -117,7 +117,7 @@ explanation_combined_3 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula", "ctree"), - prediction_zero = p0, + phi0 = p0, timing = FALSE, seed = 1) @@ -126,7 +126,7 @@ explanation_combined_4 = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula", "ctree"), - prediction_zero = p0, + phi0 = p0, timing = FALSE, seed = 1) diff --git a/inst/scripts/devel/devel_batch_testing.R b/inst/scripts/devel/devel_batch_testing.R index 8fcb2c807..20c1063f3 100644 --- a/inst/scripts/devel/devel_batch_testing.R +++ b/inst/scripts/devel/devel_batch_testing.R @@ -53,7 +53,7 @@ expl <- explain(model = model, x_explain= x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p0, + phi0 = p0, n_batches = 100, n_samples = 1000, iterative = TRUE, diff --git a/inst/scripts/devel/devel_convergence_branch.R b/inst/scripts/devel/devel_convergence_branch.R index 9d022ba17..313a28698 100644 --- a/inst/scripts/devel/devel_convergence_branch.R +++ b/inst/scripts/devel/devel_convergence_branch.R @@ -43,7 +43,7 @@ explanation_iterative <- explain( x_train = x_train, approach = "gaussian", max_n_coalitions = 500, - prediction_zero = p0, + phi0 = p0, iterative = TRUE, print_shapleyres = TRUE, # tmp print_iter_info = TRUE, # tmp @@ -56,7 +56,7 @@ explanation_iterative <- explain( x_train = x_train, approach = "ctree", n_coalitions = 500, - prediction_zero = p0, + phi0 = p0, iterative = TRUE, print_shapleyres = TRUE, # tmp print_iter_info = TRUE, # tmp @@ -70,7 +70,7 @@ explanation_noniterative <- explain( x_train = x_train, approach = "gaussian", n_coalitions = 400, - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) @@ -81,7 +81,7 @@ explanation_iterative <- explain( x_train = x_train, approach = "gaussian", n_coalitions = 500, - prediction_zero = p0, + phi0 = p0, iterative = TRUE, iterative_args = list(initial_n_coalitions=10,convergence_tol=0.0001), print_shapleyres = TRUE, # tmp @@ -142,7 +142,7 @@ explanation_regular <- explain( x_train = x_train, approach = "gaussian", n_coalitions = NULL, - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) diff --git a/inst/scripts/devel/devel_non_exact_grouping.R b/inst/scripts/devel/devel_non_exact_grouping.R index bb8cf5d3b..d5e29e3b0 100644 --- a/inst/scripts/devel/devel_non_exact_grouping.R +++ b/inst/scripts/devel/devel_non_exact_grouping.R @@ -38,14 +38,14 @@ explanation1 <- explain( x_test, approach = "independence", explainer = explainer1, - prediction_zero = p + phi0 = p ) explanation2 <- explain( x_test, approach = "independence", explainer = explainer2, - prediction_zero = p + phi0 = p ) diff --git a/inst/scripts/devel/devel_parallelization.R b/inst/scripts/devel/devel_parallelization.R index 6dd6d10bd..21aa964cc 100644 --- a/inst/scripts/devel/devel_parallelization.R +++ b/inst/scripts/devel/devel_parallelization.R @@ -35,7 +35,7 @@ explanation0 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time0 <- stop-start @@ -48,7 +48,7 @@ explanation1 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time1 <- stop-start @@ -60,7 +60,7 @@ explanation2 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time2 <- stop-start @@ -72,7 +72,7 @@ explanation3 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time3 <- stop-start @@ -84,7 +84,7 @@ explanation4 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time4 <- stop-start @@ -96,7 +96,7 @@ explanation5 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time5 <- stop-start @@ -108,7 +108,7 @@ explanation6 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() time6 <- stop-start @@ -123,7 +123,7 @@ explanation7 <- explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 32 + phi0 = p,n_batches = 32 ) stop <- proc.time() parallel::stopCluster(cl) diff --git a/inst/scripts/devel/devel_tmp_new_batch.R b/inst/scripts/devel/devel_tmp_new_batch.R index 290d5c009..37950b3a3 100644 --- a/inst/scripts/devel/devel_tmp_new_batch.R +++ b/inst/scripts/devel/devel_tmp_new_batch.R @@ -5,7 +5,7 @@ explainer <- explain_setup( x_test, approach = c("empirical","empirical","gaussian","copula"), explainer = explainer, - prediction_zero = p, + phi0 = p, n_batches = 4 ) diff --git a/inst/scripts/devel/devel_verbose.R b/inst/scripts/devel/devel_verbose.R index d9f0a54fc..ad4a2fb7d 100644 --- a/inst/scripts/devel/devel_verbose.R +++ b/inst/scripts/devel/devel_verbose.R @@ -4,7 +4,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative_args = list( initial_n_coalitions = 6, @@ -21,7 +21,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = TRUE,verbose=c("vS_details") ) @@ -30,7 +30,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = TRUE,verbose=c("basic","progress","vS_details"), regression.model = parsnip::decision_tree(tree_depth = hardhat::tune(), engine = "rpart", mode = "regression"), @@ -43,7 +43,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = FALSE,verbose=c("basic","vS_details"), regression.model = parsnip::decision_tree(tree_depth = hardhat::tune(), engine = "rpart", mode = "regression"), @@ -62,7 +62,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = FALSE,verbose=c("basic","progress","vS_details"), n_MC_samples = 100, @@ -75,7 +75,7 @@ ex2 <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = FALSE,verbose=c("basic","progress","vS_details"), n_MC_samples = 100, @@ -97,7 +97,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative = FALSE,verbose=c("basic") ) @@ -109,7 +109,7 @@ ex <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative_args = list( initial_n_coalitions = 6, @@ -127,7 +127,7 @@ explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, iterative_args <- list(n_initial_) verbose = c("basic"), diff --git a/inst/scripts/devel/explain_new.R b/inst/scripts/devel/explain_new.R index b6a1e2af7..1e86d1276 100644 --- a/inst/scripts/devel/explain_new.R +++ b/inst/scripts/devel/explain_new.R @@ -39,7 +39,7 @@ explanation_new <- explain_new( x_test, approach = "gaussian", explainer = explainer1, - prediction_zero = p, + phi0 = p, n_samples = 5*10^5,n_batches = 1 ) @@ -56,7 +56,7 @@ explanation_new <- explain_new( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p, + phi0 = p, n_samples = 10^5,n_batches = 4 ) @@ -73,7 +73,7 @@ explanation_new <- explain_new( x_test, approach = "empirical", explainer = explainer, - prediction_zero = p, + phi0 = p, n_samples = 10^5,n_batches = 1 ) @@ -90,7 +90,7 @@ explanation_new <- explain_new( x_test, approach = "empirical", explainer = explainer, - prediction_zero = p, + phi0 = p, n_samples = 10^5,n_batches = 4 ) @@ -112,7 +112,7 @@ explanation_new$dt_shapley # x_test, # approach = "gaussian", # explainer = explainer, -# prediction_zero = p +# phi0 = p # ) # # str(explainer,max.level = 1) @@ -122,7 +122,7 @@ explainer <- explain_setup( x_test, approach = "empirical", explainer = explainer, - prediction_zero = p, + phi0 = p, n_batches = 4 ) @@ -130,7 +130,7 @@ explainer0 <- explain_setup( x_test, approach = c("empirical","copula","ctree","gaussian"), explainer = explainer, - prediction_zero = p, + phi0 = p, n_batches = 7 ) @@ -149,7 +149,7 @@ explainer0$X # x_test, # approach = "gaussian", # explainer = explainer, -# prediction_zero = p, +# phi0 = p, # n_samples = 10^5 # ) diff --git a/inst/scripts/devel/real_data_iterative_kernelshap.R b/inst/scripts/devel/real_data_iterative_kernelshap.R index 9d6404f99..0e33ae141 100644 --- a/inst/scripts/devel/real_data_iterative_kernelshap.R +++ b/inst/scripts/devel/real_data_iterative_kernelshap.R @@ -107,7 +107,7 @@ expl <- shapr::explain(model = model, x_explain= x_explain[inds,], x_train = x_train, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) fwrite(expl$shapley_values_est,paste0(sim_results_saving_folder,"exact_shapley_values_", kernelSHAP_reweighting_strategy, ".csv")) print(Sys.time()) @@ -168,7 +168,7 @@ for (i in seq_along(testObs_computed_vec)){ x_explain= x_explain[testObs_computed_vec[i],], x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_coalitions = runcomps_list[[i]]) expl_approx[i,] = unlist(expl_approx_obj$shapley_values_est) expl_approx_obj_list[[i]] <- expl_approx_obj diff --git a/inst/scripts/devel/same_seed_as_master.R b/inst/scripts/devel/same_seed_as_master.R index 977c770f7..4460b7e62 100644 --- a/inst/scripts/devel/same_seed_as_master.R +++ b/inst/scripts/devel/same_seed_as_master.R @@ -22,13 +22,13 @@ model <- xgboost( # Prepare the data for explanation explainer <- shapr(x_train, model,n_coalitions = 100) p = mean(y_train) -gauss = explain(x_test, explainer, "gaussian", prediction_zero = p, n_samples = 10000) -emp = explain(x_test, explainer, "empirical", prediction_zero = p, n_samples = 10000) -copula = explain(x_test, explainer, "copula", prediction_zero = p, n_samples = 10000) -indep = explain(x_test, explainer, "independence", prediction_zero = p, n_samples = 10000) -comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), prediction_zero = p, n_samples = 10000) -ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, prediction_zero = p, n_samples = 10000) -ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), prediction_zero = p, n_samples = 10000) +gauss = explain(x_test, explainer, "gaussian", phi0 = p, n_samples = 10000) +emp = explain(x_test, explainer, "empirical", phi0 = p, n_samples = 10000) +copula = explain(x_test, explainer, "copula", phi0 = p, n_samples = 10000) +indep = explain(x_test, explainer, "independence", phi0 = p, n_samples = 10000) +comb = explain(x_test, explainer, c("gaussian", "gaussian", "empirical", "empirical"), phi0 = p, n_samples = 10000) +ctree = explain(x_test, explainer, "ctree", mincriterion = 0.95, phi0 = p, n_samples = 10000) +ctree2 = explain(x_test, explainer, "ctree", mincriterion = c(0.95, 0.95, 0.95, 0.95), phi0 = p, n_samples = 10000) # results from master diff --git a/inst/scripts/devel/simtest_iterative_kernelshap_lingauss_v2.R b/inst/scripts/devel/simtest_iterative_kernelshap_lingauss_v2.R index 1d88192c5..afbd2467f 100644 --- a/inst/scripts/devel/simtest_iterative_kernelshap_lingauss_v2.R +++ b/inst/scripts/devel/simtest_iterative_kernelshap_lingauss_v2.R @@ -80,7 +80,7 @@ expl <- shapr::explain(model = model, x_explain= x_explain[inds,], x_train = x_train, approach = "gaussian", - prediction_zero = p0,Sigma=Sigma,mu=mu) + phi0 = p0,Sigma=Sigma,mu=mu) fwrite(expl$shapley_values_est,paste0(sim_results_saving_folder,"exact_shapley_values_",shapley_threshold_val,"_",kernelSHAP_reweighting_strategy, ".csv")) @@ -149,7 +149,7 @@ for (i in testObs_computed_vec){ x_explain= x_explain[inds[i],], x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_coalitions = runcomps_list[[i]], Sigma=Sigma,mu=mu) expl_approx[i,] = unlist(expl_approx_obj$shapley_values_est) diff --git a/inst/scripts/devel/simtest_reweighting_strategies.R b/inst/scripts/devel/simtest_reweighting_strategies.R index 3a5a24f0c..3f6a1e3df 100644 --- a/inst/scripts/devel/simtest_reweighting_strategies.R +++ b/inst/scripts/devel/simtest_reweighting_strategies.R @@ -61,7 +61,7 @@ expl <- shapr::explain(model = model, x_train = x_train, approach = "gaussian", n_batches=100,n_samples = 10000, - prediction_zero = p0,Sigma=Sigma,mu=mu) + phi0 = p0,Sigma=Sigma,mu=mu) dt_vS_map <- merge(expl$internal$iter_list[[1]]$coalition_map,expl$internal$output$dt_vS,by="id_coalition")[,-"id_coalition"] @@ -92,7 +92,7 @@ for(i0 in seq_along(paired_shap_sampling_vec)){ approach = "gaussian", n_samples = 10, # Never used n_batches=10, - prediction_zero = p0, + phi0 = p0, Sigma=Sigma, mu=mu, seed = this_seed, @@ -226,7 +226,7 @@ for (i in testObs_computed_vec){ x_explain= x_explain[inds[i],], x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_combinations = runcomps_list[[i]], Sigma=Sigma,mu=mu) expl_approx[i,] = unlist(expl_approx_obj$shapley_values_est) diff --git a/inst/scripts/devel/simtest_reweighting_strategies_nonlinear.R b/inst/scripts/devel/simtest_reweighting_strategies_nonlinear.R index 0c1c8b521..c7ab347b7 100644 --- a/inst/scripts/devel/simtest_reweighting_strategies_nonlinear.R +++ b/inst/scripts/devel/simtest_reweighting_strategies_nonlinear.R @@ -78,7 +78,7 @@ expl <- shapr::explain(model = model, x_train = x_train, approach = "gaussian", n_batches=100,n_samples = 10000, - prediction_zero = p0,Sigma=Sigma,mu=mu) + phi0 = p0,Sigma=Sigma,mu=mu) dt_vS_map <- merge(expl$internal$iter_list[[1]]$coalition_map,expl$internal$output$dt_vS,by="id_coalition")[,-"id_coalition"] @@ -109,7 +109,7 @@ for(i0 in seq_along(paired_shap_sampling_vec)){ approach = "gaussian", n_samples = 10, # Never used n_batches=10, - prediction_zero = p0, + phi0 = p0, Sigma=Sigma, mu=mu, seed = this_seed, diff --git a/inst/scripts/devel/simtest_reweighting_strategies_nonlinear_nonunique_sampling.R b/inst/scripts/devel/simtest_reweighting_strategies_nonlinear_nonunique_sampling.R index cc8341f92..84f9e71c4 100644 --- a/inst/scripts/devel/simtest_reweighting_strategies_nonlinear_nonunique_sampling.R +++ b/inst/scripts/devel/simtest_reweighting_strategies_nonlinear_nonunique_sampling.R @@ -78,7 +78,7 @@ expl <- shapr::explain(model = model, x_train = x_train, approach = "gaussian", n_batches=100,n_samples = 10000, - prediction_zero = p0,Sigma=Sigma,mu=mu) + phi0 = p0,Sigma=Sigma,mu=mu) dt_vS_map <- merge(expl$internal$iter_list[[1]]$coalition_map,expl$internal$output$dt_vS,by="id_coalition")[,-"id_coalition"] @@ -109,7 +109,7 @@ for(ii in seq_along(n_coalitions_vec)){ approach = "gaussian", n_samples = 10, # Never used n_batches=10, - prediction_zero = p0, + phi0 = p0, Sigma=Sigma, mu=mu, seed = this_seed, diff --git a/inst/scripts/devel/simtest_timing_to_Frida.R b/inst/scripts/devel/simtest_timing_to_Frida.R index 93fc801bf..acc7e3e2a 100644 --- a/inst/scripts/devel/simtest_timing_to_Frida.R +++ b/inst/scripts/devel/simtest_timing_to_Frida.R @@ -81,7 +81,7 @@ expl <- explain( x_explain= x_explain[inds,], x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_coalitions = 100, Sigma=Sigma, mu=mu, diff --git a/inst/scripts/devel/testing_explain_forevast_n_comb.R b/inst/scripts/devel/testing_explain_forevast_n_comb.R index 43aeb6ced..03bea2181 100644 --- a/inst/scripts/devel/testing_explain_forevast_n_comb.R +++ b/inst/scripts/devel/testing_explain_forevast_n_comb.R @@ -9,7 +9,7 @@ h3test <- explain_forecast(model = model_arima_temp, explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar[1:3], + phi0 = p0_ar[1:3], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -26,7 +26,7 @@ h2test <- explain_forecast(model = model_arima_temp, explain_xreg_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = p0_ar[1:2], + phi0 = p0_ar[1:2], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -43,7 +43,7 @@ h1test <- explain_forecast(model = model_arima_temp, explain_xreg_lags = 2, horizon = 1, approach = "empirical", - prediction_zero = p0_ar[1], + phi0 = p0_ar[1], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -87,7 +87,7 @@ h3full <- explain_forecast(model = model_arima_temp, explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar[1:3], + phi0 = p0_ar[1:3], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -103,7 +103,7 @@ h1full <- explain_forecast(model = model_arima_temp, explain_xreg_lags = 2, horizon = 1, approach = "empirical", - prediction_zero = p0_ar[1], + phi0 = p0_ar[1], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -122,7 +122,7 @@ for (i in 1:reps){ explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar[1:3], + phi0 = p0_ar[1:3], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -139,7 +139,7 @@ for (i in 1:reps){ explain_xreg_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = p0_ar[1:2], + phi0 = p0_ar[1:2], group_lags = FALSE, n_batches = 1, timing = FALSE, @@ -156,7 +156,7 @@ for (i in 1:reps){ explain_xreg_lags = 2, horizon = 1, approach = "empirical", - prediction_zero = p0_ar[1], + phi0 = p0_ar[1], group_lags = FALSE, n_batches = 1, timing = FALSE, diff --git a/inst/scripts/devel/testing_for_valid_defualt_n_batches.R b/inst/scripts/devel/testing_for_valid_defualt_n_batches.R index a9d5739fd..a097fe73c 100644 --- a/inst/scripts/devel/testing_for_valid_defualt_n_batches.R +++ b/inst/scripts/devel/testing_for_valid_defualt_n_batches.R @@ -49,6 +49,6 @@ explanation <- explain( x_train = x_train, n_samples = 2, # Low value for fast computations approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_coalitions = any_number_equal_or_below_10 ) diff --git a/inst/scripts/devel/testing_intermediate_saving.R b/inst/scripts/devel/testing_intermediate_saving.R index 7214a2eac..85981c381 100644 --- a/inst/scripts/devel/testing_intermediate_saving.R +++ b/inst/scripts/devel/testing_intermediate_saving.R @@ -5,7 +5,7 @@ aa = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.01, @@ -22,7 +22,7 @@ bb = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, @@ -47,7 +47,7 @@ full = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, @@ -67,7 +67,7 @@ first = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, @@ -87,7 +87,7 @@ second = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, @@ -110,7 +110,7 @@ second_path = explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, diff --git a/inst/scripts/devel/testing_memory_monitoring.R b/inst/scripts/devel/testing_memory_monitoring.R index 90af5c872..f161c3d2e 100644 --- a/inst/scripts/devel/testing_memory_monitoring.R +++ b/inst/scripts/devel/testing_memory_monitoring.R @@ -56,7 +56,7 @@ peakRAM(explain( x_test, approach = "gaussian", explainer = explainer, - prediction_zero = p,n_batches = 4) + phi0 = p,n_batches = 4) ) # , @@ -64,28 +64,28 @@ peakRAM(explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 2), +# phi0 = p,n_batches = 2), # explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 4)) +# phi0 = p,n_batches = 4)) # explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 8), +# phi0 = p,n_batches = 8), # explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 16), +# phi0 = p,n_batches = 16), # explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 32) +# phi0 = p,n_batches = 32) # ) # s <- proc.time() @@ -93,6 +93,6 @@ peakRAM(explain( # x_test, # approach = "empirical", # explainer = explainer, -# prediction_zero = p,n_batches = 32) +# phi0 = p,n_batches = 32) # print(proc.time()-s) # diff --git a/inst/scripts/devel/testing_n_cobinations_equal_2_power_m.R b/inst/scripts/devel/testing_n_cobinations_equal_2_power_m.R index 980597b6f..ee8a01e3f 100644 --- a/inst/scripts/devel/testing_n_cobinations_equal_2_power_m.R +++ b/inst/scripts/devel/testing_n_cobinations_equal_2_power_m.R @@ -41,7 +41,7 @@ explanation_exact <- explain( n_samples = 2, # Low value for fast computations n_batches = 1, # Not related to the bug approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_coalitions = NULL ) @@ -53,7 +53,7 @@ explanation_should_also_be_exact <- explain( n_samples = 2, # Low value for fast computations n_batches = 1, # Not related to the bug approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_coalitions = 2^ncol(x_explain) ) diff --git a/inst/scripts/devel/testing_parallelization.R b/inst/scripts/devel/testing_parallelization.R index 24cacc1a7..3f82541f2 100644 --- a/inst/scripts/devel/testing_parallelization.R +++ b/inst/scripts/devel/testing_parallelization.R @@ -78,7 +78,7 @@ for(i in seq_len(nrow(res_dt))){ x_test, approach = approach_use, explainer = explainer, - prediction_zero = p,n_batches = n_batches_use + phi0 = p,n_batches = n_batches_use )},iterations = reps,time_unit ='s',memory = F, min_time = Inf ) diff --git a/inst/scripts/devel/testing_verification_ar_model.R b/inst/scripts/devel/testing_verification_ar_model.R index 0271bb62f..6cf50f894 100644 --- a/inst/scripts/devel/testing_verification_ar_model.R +++ b/inst/scripts/devel/testing_verification_ar_model.R @@ -28,7 +28,7 @@ exp <- explain_forecast(model = model_arima_temp, explain_xreg_lags = c(0,0), horizon = 2, approach = "empirical", - prediction_zero = c(0,0), + phi0 = c(0,0), group_lags = FALSE, n_batches = 1, timing = FALSE, diff --git a/inst/scripts/devel/time_series_annabelle.R b/inst/scripts/devel/time_series_annabelle.R index 26e1f8b38..62fdffd7b 100644 --- a/inst/scripts/devel/time_series_annabelle.R +++ b/inst/scripts/devel/time_series_annabelle.R @@ -71,7 +71,7 @@ explanation_group <- explain( x_explain = x_explain, x_train = x_train, approach = "timeseries", - prediction_zero = p0, + phi0 = p0, group = group, timeseries.fixed_sigma_vec = 2 # timeseries.bounds = c(-1, 2) diff --git a/inst/scripts/devel/verifying_arima_model_output.R b/inst/scripts/devel/verifying_arima_model_output.R index 4f27f3c40..47ce0641d 100644 --- a/inst/scripts/devel/verifying_arima_model_output.R +++ b/inst/scripts/devel/verifying_arima_model_output.R @@ -45,7 +45,7 @@ exp <- explain_forecast(model = model_arima_temp, explain_xreg_lags = c(0,1), horizon = 1, approach = "empirical", - prediction_zero = rep(mean(y),1), + phi0 = rep(mean(y),1), group_lags = FALSE, n_batches = 1) diff --git a/inst/scripts/devel/visual_bug_in_Shapley_bar_plot.R b/inst/scripts/devel/visual_bug_in_Shapley_bar_plot.R index 0c57fe6c1..f9189e480 100644 --- a/inst/scripts/devel/visual_bug_in_Shapley_bar_plot.R +++ b/inst/scripts/devel/visual_bug_in_Shapley_bar_plot.R @@ -41,7 +41,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_samples = 10, keep_samp_for_vS = TRUE ) diff --git a/inst/scripts/empirical_memory_testing2.R b/inst/scripts/empirical_memory_testing2.R index 678883b4e..84e1f863f 100644 --- a/inst/scripts/empirical_memory_testing2.R +++ b/inst/scripts/empirical_memory_testing2.R @@ -60,7 +60,7 @@ xy_train <- cbind(x_train,y=y_train) model <- lm(formula = y~.,data=xy_train) -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) n_batches_use <- min(2^p-2,n_batches) @@ -71,7 +71,7 @@ explanation_many <- explain( x_train = x_train, approach = approach, n_batches = n_batches_use, - prediction_zero = prediction_zero + phi0 = phi0 ) @@ -81,7 +81,7 @@ explanation_many <- explain( # x_train = x_train, # approach = approach, # n_batches = 1, -# prediction_zero = prediction_zero +# phi0 = phi0 #) @@ -99,7 +99,7 @@ internal <- setup( x_train = x_train, x_explain = x_explain, approach = approach, - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 2^p, group = NULL, n_samples = 1e3, diff --git a/inst/scripts/example_annabelle.R b/inst/scripts/example_annabelle.R index feede50bb..b2cad4031 100644 --- a/inst/scripts/example_annabelle.R +++ b/inst/scripts/example_annabelle.R @@ -46,7 +46,7 @@ temp = explain( x_explain = x_test, model = model, approach = "categorical", - prediction_zero = p, + phi0 = p, joint_probability_dt = joint_prob_dt ) print(temp) diff --git a/inst/scripts/example_ctree_method.R b/inst/scripts/example_ctree_method.R index 6f0d26f12..6765a989c 100644 --- a/inst/scripts/example_ctree_method.R +++ b/inst/scripts/example_ctree_method.R @@ -33,7 +33,7 @@ p0 <- mean(y_train) # and sample = TRUE explanation <- explain(x_test, explainer, approach = "ctree", - prediction_zero = p0) + phi0 = p0) # Printing the Shapley values for the test data explanation$dt @@ -91,7 +91,7 @@ explanation_cat <- explain( dummylist$testdata_new, approach = "ctree", explainer = explainer_cat, - prediction_zero = p0 + phi0 = p0 ) # Plot the resulting explanations for observations 1 and 6, excluding diff --git a/inst/scripts/example_custom_model.R b/inst/scripts/example_custom_model.R index 34a6377a4..c2a476a31 100644 --- a/inst/scripts/example_custom_model.R +++ b/inst/scripts/example_custom_model.R @@ -65,7 +65,7 @@ get_model_specs.gbm <- function(x){ set.seed(123) explainer <- shapr(xy_train, model) p0 <- mean(xy_train[,y_var]) -explanation <- explain(x_test, explainer, approach = "empirical", prediction_zero = p0) +explanation <- explain(x_test, explainer, approach = "empirical", phi0 = p0) # Plot results plot(explanation) @@ -89,6 +89,6 @@ predict_model.gbm <- function(x, newdata) { set.seed(123) explainer <- shapr(x_train, model) p0 <- mean(xy_train[,y_var]) -explanation <- explain(x_test, explainer, approach = "empirical", prediction_zero = p0) +explanation <- explain(x_test, explainer, approach = "empirical", phi0 = p0) # Plot results plot(explanation) diff --git a/inst/scripts/example_plot_MSEv.R b/inst/scripts/example_plot_MSEv.R index 51fd56ee2..725b1d896 100644 --- a/inst/scripts/example_plot_MSEv.R +++ b/inst/scripts/example_plot_MSEv.R @@ -29,7 +29,7 @@ model <- xgboost::xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Independence approach explanation_independence <- explain( @@ -37,7 +37,7 @@ explanation_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -47,7 +47,7 @@ explanation_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -57,7 +57,7 @@ explanation_gaussian_1e1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e1 ) @@ -67,7 +67,7 @@ explanation_gaussian_1e2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -77,7 +77,7 @@ explanation_ctree <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -87,7 +87,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "independence", "ctree"), - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -290,7 +290,7 @@ explanation_gaussian_seed_1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10, n_coalitions = 10, seed = 1 @@ -301,7 +301,7 @@ explanation_gaussian_seed_1_V2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10, n_coalitions = 10, seed = 1 @@ -312,7 +312,7 @@ explanation_gaussian_seed_2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10, n_coalitions = 10, seed = 2 @@ -323,7 +323,7 @@ explanation_gaussian_seed_3 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10, n_coalitions = 10, seed = 3 @@ -350,7 +350,7 @@ explanation_gaussian_all <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10 ) @@ -359,7 +359,7 @@ explanation_gaussian_only_5 <- explain( x_explain = x_explain[1:5, ], x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10 ) @@ -376,7 +376,7 @@ explanation_gaussian <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10 ) @@ -397,7 +397,7 @@ explanation_gaussian <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 10 ) diff --git a/inst/scripts/example_plot_SV_several_approaches.R b/inst/scripts/example_plot_SV_several_approaches.R index a25c66b36..564e4c133 100644 --- a/inst/scripts/example_plot_SV_several_approaches.R +++ b/inst/scripts/example_plot_SV_several_approaches.R @@ -27,7 +27,7 @@ model = xgboost::xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero = mean(y_train) +phi0 = mean(y_train) # Independence approach explanation_independence = explain( @@ -35,7 +35,7 @@ explanation_independence = explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -45,7 +45,7 @@ explanation_empirical = explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -55,7 +55,7 @@ explanation_gaussian_1e1 = explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e1 ) @@ -65,7 +65,7 @@ explanation_gaussian_1e2 = explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) @@ -75,7 +75,7 @@ explanation_combined = explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "ctree", "empirical"), - prediction_zero = prediction_zero, + phi0 = phi0, n_samples = 1e2 ) diff --git a/inst/scripts/example_plot_several_vaeacs_VLB_IWAE.R b/inst/scripts/example_plot_several_vaeacs_VLB_IWAE.R index a364a9ce4..85e9e3914 100644 --- a/inst/scripts/example_plot_several_vaeacs_VLB_IWAE.R +++ b/inst/scripts/example_plot_several_vaeacs_VLB_IWAE.R @@ -28,7 +28,7 @@ explanation_paired_sampling_TRUE <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_batches = 2, n_samples = 1, #' As we are only interested in the training of the vaeac vaeac.epochs = 25, #' Should be higher in applications. @@ -44,7 +44,7 @@ explanation_paired_sampling_FALSE <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_batches = 2, n_samples = 1, #' As we are only interested in the training of the vaeac vaeac.epochs = 25, #' Should be higher in applications. @@ -61,7 +61,7 @@ explanation_paired_sampling_FALSE_small <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_batches = 2, n_samples = 1, #' As we are only interested in the training of the vaeac vaeac.epochs = 25, #' Should be higher in applications. @@ -80,7 +80,7 @@ explanation_paired_sampling_TRUE_small <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_batches = 2, n_samples = 1, #' As we are only interested in the training of the vaeac vaeac.epochs = 25, #' Should be higher in applications. diff --git a/inst/scripts/explain_memory_testing.R b/inst/scripts/explain_memory_testing.R index 7c3030ffc..d9e35e7eb 100644 --- a/inst/scripts/explain_memory_testing.R +++ b/inst/scripts/explain_memory_testing.R @@ -60,7 +60,7 @@ xy_train <- cbind(x_train,y=y_train) model <- lm(formula = y~.,data=xy_train) -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) n_batches_use <- min(2^p-2,n_batches) @@ -74,7 +74,7 @@ explanation <- explain( x_train = x_train, approach = approach, n_batches = n_batches_use, - prediction_zero = prediction_zero + phi0 = phi0 ) },threshold=10^4) diff --git a/inst/scripts/problematic_plots_jens.R b/inst/scripts/problematic_plots_jens.R index 2aa26c896..176af6a9f 100644 --- a/inst/scripts/problematic_plots_jens.R +++ b/inst/scripts/problematic_plots_jens.R @@ -41,7 +41,7 @@ explanation_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) @@ -62,7 +62,7 @@ explanation_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) # Works fine @@ -85,7 +85,7 @@ explanation_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) # Only 4 ticks in the x-axis for the factor @@ -107,7 +107,7 @@ explanation_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) # Duplicated labels on the x-axis diff --git a/inst/scripts/readme_example.R b/inst/scripts/readme_example.R index b9e8852a9..9d63bc1a1 100644 --- a/inst/scripts/readme_example.R +++ b/inst/scripts/readme_example.R @@ -34,7 +34,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) # Printing the Shapley values for the test data. diff --git a/inst/scripts/testing_samling_ncombinations.R b/inst/scripts/testing_samling_ncombinations.R index 1d4fdb36f..d11220a4f 100644 --- a/inst/scripts/testing_samling_ncombinations.R +++ b/inst/scripts/testing_samling_ncombinations.R @@ -25,7 +25,7 @@ for (i in seq_along(n)) { x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = n_coalitions[i] ) ) @@ -52,7 +52,7 @@ for (i in seq_along(n)) { x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = n_coalitions[i] ) ) @@ -79,7 +79,7 @@ system.time({res = explain( x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = 1000 )}) @@ -89,7 +89,7 @@ system.time({res2 = explain( x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = 800 )}) @@ -100,7 +100,7 @@ system.time({res3 = explain( x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = NULL )}) @@ -117,7 +117,7 @@ res = profvis({res = explain( x_test, model = model, approach = "empirical", - prediction_zero = p_mean, + phi0 = p_mean, n_coalitions = n_coalitions[i] )}) res diff --git a/inst/scripts/time_series_annabelle.R b/inst/scripts/time_series_annabelle.R index 26e1f8b38..62fdffd7b 100644 --- a/inst/scripts/time_series_annabelle.R +++ b/inst/scripts/time_series_annabelle.R @@ -71,7 +71,7 @@ explanation_group <- explain( x_explain = x_explain, x_train = x_train, approach = "timeseries", - prediction_zero = p0, + phi0 = p0, group = group, timeseries.fixed_sigma_vec = 2 # timeseries.bounds = c(-1, 2) diff --git a/inst/scripts/timing_script_2023.R b/inst/scripts/timing_script_2023.R index 71a25c1c7..31c258d98 100644 --- a/inst/scripts/timing_script_2023.R +++ b/inst/scripts/timing_script_2023.R @@ -59,7 +59,7 @@ xy_train <- cbind(x_train,y=y_train) model <- lm(formula = y~.,data=xy_train) -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) n_batches_use <- min(2^p-2,n_batches) @@ -72,7 +72,7 @@ explanation <- explain( x_train = x_train, approach = approach, n_batches = n_batches_use, - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 10^4 ) diff --git a/inst/scripts/vilde/airquality_example.R b/inst/scripts/vilde/airquality_example.R index 9c162bfe2..59d2e225a 100644 --- a/inst/scripts/vilde/airquality_example.R +++ b/inst/scripts/vilde/airquality_example.R @@ -15,7 +15,7 @@ x <- explain( test, model = model, approach = "empirical", - prediction_zero = p + phi0 = p ) if (requireNamespace("ggplot2", quietly = TRUE)) { diff --git a/inst/scripts/vilde/check_progress.R b/inst/scripts/vilde/check_progress.R index aee0f765c..ec3da4887 100644 --- a/inst/scripts/vilde/check_progress.R +++ b/inst/scripts/vilde/check_progress.R @@ -25,34 +25,34 @@ p <- mean(y_train) plan(multisession, workers=3) # when we simply call explain(), no progress bar is shown -x <- explain(x_train, x_test, model, approach="gaussian", prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach="gaussian", phi0=p, n_batches = 4) # the handler specifies what kind of progress bar is shown # Wrapping explain() in with_progress() gives a progress bar when calling explain() handlers("txtprogressbar") x <- with_progress( - explain(x_train, x_test, model, approach="empirical", prediction_zero=p, n_batches = 5) + explain(x_train, x_test, model, approach="empirical", phi0=p, n_batches = 5) ) # with global=TRUE the progress bar is displayed whenever the explain-function is called, and there is no need to use with_progress() handlers(global = TRUE) -x <- explain(x_train, x_test, model, approach="gaussian", prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach="gaussian", phi0=p, n_batches = 4) # there are different options for what kind of progress bar should be displayed handlers("txtprogressbar") #this is the default -x <- explain(x_train, x_test, model, approach="independence", prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach="independence", phi0=p, n_batches = 4) handlers("progress") -x <- explain(x_train, x_test, model, approach="independence", prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach="independence", phi0=p, n_batches = 4) # you can edit the symbol used to draw completed progress in the progress bar (as well as other features) with handler_progress() handlers(handler_progress(complete = "#")) -x <- explain(x_train, x_test, model, approach="copula", prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach="copula", phi0=p, n_batches = 4) plan("sequential") handlers("progress") -x <- explain(x_train, x_test, model, approach=c(rep("ctree",4),"independence","independence"), prediction_zero=p, n_batches = 4) +x <- explain(x_train, x_test, model, approach=c(rep("ctree",4),"independence","independence"), phi0=p, n_batches = 4) diff --git a/inst/scripts/vilde/sketch_for_waterfall_plot.R b/inst/scripts/vilde/sketch_for_waterfall_plot.R index f8572fb55..e31971a1a 100644 --- a/inst/scripts/vilde/sketch_for_waterfall_plot.R +++ b/inst/scripts/vilde/sketch_for_waterfall_plot.R @@ -25,7 +25,7 @@ model <- xgboost( p <- mean(y_train) # Prepare the data for explanation -res <- explain_final(x_train,x_test,model,approach="independence",prediction_zero=p,n_batches = 4) +res <- explain_final(x_train,x_test,model,approach="independence",phi0=p,n_batches = 4) plot(res) i<- 1 # index for observation we want to plot diff --git a/inst/scripts/vilde/waterfall_plot.R b/inst/scripts/vilde/waterfall_plot.R index 531f1e4c1..5035d2528 100644 --- a/inst/scripts/vilde/waterfall_plot.R +++ b/inst/scripts/vilde/waterfall_plot.R @@ -19,7 +19,7 @@ model <- xgboost( verbose = FALSE ) p <- mean(y_train) -x <- explain_final(x_train,x_test,model,approach="independence",prediction_zero=p,n_batches = 4) +x <- explain_final(x_train,x_test,model,approach="independence",phi0=p,n_batches = 4) plot.shapr(x, plot_type = "bar", digits = 3, diff --git a/man/append_vS_list.Rd b/man/append_vS_list.Rd new file mode 100644 index 000000000..ceb1db088 --- /dev/null +++ b/man/append_vS_list.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute_vS.R +\name{append_vS_list} +\alias{append_vS_list} +\title{Appends the new vS_list to the prev vS_list} +\usage{ +append_vS_list(vS_list, internal) +} +\arguments{ +\item{vS_list}{List +Output from \code{\link[=compute_vS]{compute_vS()}}} + +\item{internal}{List. +Not used directly, but passed through from \code{\link[=explain]{explain()}}.} +} +\description{ +Appends the new vS_list to the prev vS_list +} +\keyword{internal} diff --git a/man/cli_iter.Rd b/man/cli_iter.Rd index 7d3481899..6426af8c9 100644 --- a/man/cli_iter.Rd +++ b/man/cli_iter.Rd @@ -26,6 +26,9 @@ E.g. \code{verbose = c("basic", "vS_details")} will display basic information + \item{internal}{List. Not used directly, but passed through from \code{\link[=explain]{explain()}}.} + +\item{iter}{Integer. +The iteration number. Only used internally.} } \description{ Printing messages in iterative procedure with cli diff --git a/man/default_doc_explain.Rd b/man/default_doc_explain.Rd index 9fcc39a30..af0bc7014 100644 --- a/man/default_doc_explain.Rd +++ b/man/default_doc_explain.Rd @@ -12,6 +12,9 @@ Not used directly, but passed through from \code{\link[=explain]{explain()}}.} \item{index_features}{Positive integer vector. Specifies the id_coalition to apply to the present method. \code{NULL} means all coalitions. Only used internally.} + +\item{iter}{Integer. +The iteration number. Only used internally.} } \description{ Exported documentation helper function. diff --git a/man/explain.Rd b/man/explain.Rd index b1e3e2454..f7fd8694e 100644 --- a/man/explain.Rd +++ b/man/explain.Rd @@ -9,7 +9,7 @@ explain( x_explain, x_train, approach, - prediction_zero, + phi0, iterative = NULL, max_n_coalitions = NULL, group = NULL, @@ -49,7 +49,7 @@ All elements should, either be \code{"gaussian"}, \code{"copula"}, \code{"empiri \code{"categorical"}, \code{"timeseries"}, \code{"independence"}, \code{"regression_separate"}, or \code{"regression_surrogate"}. The two regression approaches can not be combined with any other approach. See details for more information.} -\item{prediction_zero}{Numeric. +\item{phi0}{Numeric. The prediction value for unseen data, i.e. an estimate of the expected prediction without conditioning on any features. Typically we set this value equal to the mean of the response variable in our training data, but other choices @@ -330,7 +330,7 @@ Object of class \code{c("shapr", "list")}. Contains the following items: \describe{ \item{shapley_values_est}{data.table with the estimated Shapley values with explained observation in the rows and features along the columns. -The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{prediction_zero})} +The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{phi0})} \item{shapley_values_sd}{data.table with the standard deviation of the Shapley values reflecting the uncertainty. Note that this only reflects the coalition sampling part of the kernelSHAP procedure, and is therefore by definition 0 when all coalitions is used. @@ -415,7 +415,7 @@ explain1 <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -425,7 +425,7 @@ explain2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -435,7 +435,7 @@ explain3 <- explain( x_explain = x_explain, x_train = x_train, approach = "copula", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -445,7 +445,7 @@ explain4 <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -456,7 +456,7 @@ explain5 <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -478,7 +478,7 @@ explain_groups <- explain( x_train = x_train, group = group_list, approach = "empirical", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) print(explain_groups$shapley_values_est) @@ -493,7 +493,7 @@ explain_separate_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p, + phi0 = p, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -502,7 +502,7 @@ explain_surrogate_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p, + phi0 = p, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -516,7 +516,7 @@ explain_iterative <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2, iterative = TRUE, iterative_args = list(initial_n_coalitions = 10) diff --git a/man/explain_forecast.Rd b/man/explain_forecast.Rd index db66aba7a..df2d3176c 100644 --- a/man/explain_forecast.Rd +++ b/man/explain_forecast.Rd @@ -14,7 +14,7 @@ explain_forecast( explain_xreg_lags = explain_y_lags, horizon, approach, - prediction_zero, + phi0, max_n_coalitions = NULL, iterative = NULL, iterative_args = list(), @@ -70,7 +70,7 @@ All elements should, either be \code{"gaussian"}, \code{"copula"}, \code{"empiri \code{"categorical"}, \code{"timeseries"}, \code{"independence"}, \code{"regression_separate"}, or \code{"regression_surrogate"}. The two regression approaches can not be combined with any other approach. See details for more information.} -\item{prediction_zero}{Numeric. +\item{phi0}{Numeric. The prediction value for unseen data, i.e. an estimate of the expected prediction without conditioning on any features. Typically we set this value equal to the mean of the response variable in our training data, but other choices @@ -262,7 +262,7 @@ Object of class \code{c("shapr", "list")}. Contains the following items: \describe{ \item{shapley_values_est}{data.table with the estimated Shapley values with explained observation in the rows and features along the columns. -The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{prediction_zero})} +The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{phi0})} \item{shapley_values_sd}{data.table with the standard deviation of the Shapley values reflecting the uncertainty. Note that this only reflects the coalition sampling part of the kernelSHAP procedure, and is therefore by definition 0 when all coalitions is used. @@ -316,7 +316,7 @@ explain_forecast( explain_y_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE ) diff --git a/man/finalize_explanation_forecast.Rd b/man/finalize_explanation_forecast.Rd index 5ed40b0f4..6911de4a9 100644 --- a/man/finalize_explanation_forecast.Rd +++ b/man/finalize_explanation_forecast.Rd @@ -20,7 +20,7 @@ Object of class \code{c("shapr", "list")}. Contains the following items: \describe{ \item{shapley_values_est}{data.table with the estimated Shapley values with explained observation in the rows and features along the columns. -The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{prediction_zero})} +The column \code{none} is the prediction not devoted to any of the features (given by the argument \code{phi0})} \item{shapley_values_sd}{data.table with the standard deviation of the Shapley values reflecting the uncertainty. Note that this only reflects the coalition sampling part of the kernelSHAP procedure, and is therefore by definition 0 when all coalitions is used. @@ -105,7 +105,7 @@ explain1 <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -115,7 +115,7 @@ explain2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -125,7 +125,7 @@ explain3 <- explain( x_explain = x_explain, x_train = x_train, approach = "copula", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -135,7 +135,7 @@ explain4 <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -146,7 +146,7 @@ explain5 <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -168,7 +168,7 @@ explain_groups <- explain( x_train = x_train, group = group_list, approach = "empirical", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) print(explain_groups$shapley_values_est) @@ -183,7 +183,7 @@ explain_separate_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p, + phi0 = p, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -192,7 +192,7 @@ explain_surrogate_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p, + phi0 = p, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -206,7 +206,7 @@ explain_iterative <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2, iterative = TRUE, iterative_args = list(initial_n_coalitions = 10) diff --git a/man/plot.shapr.Rd b/man/plot.shapr.Rd index 77df20f5a..e2e856402 100644 --- a/man/plot.shapr.Rd +++ b/man/plot.shapr.Rd @@ -134,7 +134,7 @@ x <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) @@ -184,7 +184,7 @@ x <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p, + phi0 = p, n_MC_samples = 1e2 ) diff --git a/man/plot_MSEv_eval_crit.Rd b/man/plot_MSEv_eval_crit.Rd index cd7792070..c7d569fee 100644 --- a/man/plot_MSEv_eval_crit.Rd +++ b/man/plot_MSEv_eval_crit.Rd @@ -90,7 +90,7 @@ model <- xgboost::xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Independence approach explanation_independence <- explain( @@ -98,7 +98,7 @@ explanation_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -108,7 +108,7 @@ explanation_gaussian_1e1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e1 ) @@ -118,7 +118,7 @@ explanation_gaussian_1e2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -128,7 +128,7 @@ explanation_ctree <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -138,7 +138,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "independence", "ctree"), - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) diff --git a/man/plot_SV_several_approaches.Rd b/man/plot_SV_several_approaches.Rd index cc0c046ce..2fcfd1111 100644 --- a/man/plot_SV_several_approaches.Rd +++ b/man/plot_SV_several_approaches.Rd @@ -114,7 +114,7 @@ model <- xgboost::xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Independence approach explanation_independence <- explain( @@ -122,7 +122,7 @@ explanation_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -132,7 +132,7 @@ explanation_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -142,7 +142,7 @@ explanation_gaussian_1e1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e1 ) @@ -152,7 +152,7 @@ explanation_gaussian_1e2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) @@ -162,7 +162,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "ctree", "empirical"), - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1e2 ) diff --git a/man/setup.Rd b/man/setup.Rd index 28b96bfb4..dec833a20 100644 --- a/man/setup.Rd +++ b/man/setup.Rd @@ -9,7 +9,7 @@ setup( x_explain, approach, paired_shap_sampling = TRUE, - prediction_zero, + phi0, output_size = 1, max_n_coalitions, group, @@ -59,7 +59,7 @@ If \code{TRUE} (default), paired versions of all sampled coalitions are also inc That is, if there are 5 features and e.g. coalitions (1,3,5) are sampled, then also coalition (2,4) is used for computing the Shapley values. This is done to reduce the variance of the Shapley value estimates.} -\item{prediction_zero}{Numeric. +\item{phi0}{Numeric. The prediction value for unseen data, i.e. an estimate of the expected prediction without conditioning on any features. Typically we set this value equal to the mean of the response variable in our training data, but other choices diff --git a/man/vaeac_plot_eval_crit.Rd b/man/vaeac_plot_eval_crit.Rd index c7086b853..fc8e2865b 100644 --- a/man/vaeac_plot_eval_crit.Rd +++ b/man/vaeac_plot_eval_crit.Rd @@ -79,7 +79,7 @@ explanation_paired <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1, # As we are only interested in the training of the vaeac vaeac.epochs = 10, # Should be higher in applications. vaeac.n_vaeacs_initialize = 1, @@ -93,7 +93,7 @@ explanation_regular <- explain( x_explain = x_explain, x_train = x_train, approach = approach, - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1, # As we are only interested in the training of the vaeac vaeac.epochs = 10, # Should be higher in applications. vaeac.width = 16, diff --git a/man/vaeac_plot_imputed_ggpairs.Rd b/man/vaeac_plot_imputed_ggpairs.Rd index f95abe5b9..6b4b1a75b 100644 --- a/man/vaeac_plot_imputed_ggpairs.Rd +++ b/man/vaeac_plot_imputed_ggpairs.Rd @@ -108,7 +108,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = mean(y_train), + phi0 = mean(y_train), n_MC_samples = 1, vaeac.epochs = 10, vaeac.n_vaeacs_initialize = 1 diff --git a/python/README.md b/python/README.md index b010fec77..512ce3c39 100644 --- a/python/README.md +++ b/python/README.md @@ -51,7 +51,7 @@ df_shapley, pred_explain, internal, timing = explain( x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) ``` diff --git a/python/examples/devel_new_explain.py b/python/examples/devel_new_explain.py index 04088aae5..cfa4e5d52 100644 --- a/python/examples/devel_new_explain.py +++ b/python/examples/devel_new_explain.py @@ -28,7 +28,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'gaussian', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), max_n_coalitions=30 ) @@ -55,7 +55,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'gaussian', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), max_n_coalitions=100, iterative = False ) @@ -65,7 +65,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = ['gaussian', 'empirical',"gaussian","empirical","gaussian","gaussian","empirical"], - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), max_n_coalitions=100, iterative = True, verbose = ["basic", "progress"] @@ -76,7 +76,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'vaeac', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), max_n_coalitions=100, iterative = False, verbose = ["basic", "progress","vS_details","shapley"] @@ -88,7 +88,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), regression_model='parsnip::linear_reg()' ) diff --git a/python/examples/keras_classifier.py b/python/examples/keras_classifier.py index d7b31e70f..60138165f 100644 --- a/python/examples/keras_classifier.py +++ b/python/examples/keras_classifier.py @@ -30,7 +30,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) @@ -57,4 +57,4 @@ """ MSEv MSEv_sd 1 0.000312 0.00014 -""" \ No newline at end of file +""" diff --git a/python/examples/pytorch_custom.py b/python/examples/pytorch_custom.py index eac345337..d58fa5337 100644 --- a/python/examples/pytorch_custom.py +++ b/python/examples/pytorch_custom.py @@ -42,7 +42,7 @@ def forward(self, x): x_explain = dfx_test, approach = 'empirical', predict_model = lambda m, x: m(torch.from_numpy(x.values).float()).cpu().detach().numpy(), - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) """ @@ -65,4 +65,4 @@ def forward(self, x): """ MSEv MSEv_sd 1 27.046126 7.253933 -""" \ No newline at end of file +""" diff --git a/python/examples/regression_paradigm.py b/python/examples/regression_paradigm.py index c5daab4c4..bf53b77fe 100644 --- a/python/examples/regression_paradigm.py +++ b/python/examples/regression_paradigm.py @@ -27,7 +27,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='empirical', - prediction_zero=dfy_train.mean().item() + phi0=dfy_train.mean().item() ) # Explain the model using several separate regression methods @@ -37,7 +37,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model='parsnip::linear_reg()' @@ -49,7 +49,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model='parsnip::linear_reg()', @@ -64,7 +64,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model='parsnip::linear_reg()', @@ -79,7 +79,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model="parsnip::decision_tree(tree_depth = hardhat::tune(), engine = 'rpart', mode = 'regression')", @@ -93,7 +93,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model="parsnip::boost_tree(engine = 'xgboost', mode = 'regression')" @@ -105,7 +105,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_separate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model="parsnip::boost_tree(trees = hardhat::tune(), engine = 'xgboost', mode = 'regression')", @@ -120,7 +120,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_surrogate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model='parsnip::linear_reg()' @@ -132,7 +132,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_surrogate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model="parsnip::rand_forest(engine = 'ranger', mode = 'regression')" @@ -144,7 +144,7 @@ x_train=dfx_train, x_explain=dfx_test, approach='regression_surrogate', - prediction_zero=dfy_train.mean().item(), + phi0=dfy_train.mean().item(), verbose=2, n_batches=1, regression_model="""parsnip::rand_forest( @@ -191,4 +191,4 @@ 3 0.276002 0.957242 4 0.028560 0.049815 5 -0.242943 0.006815 -""" \ No newline at end of file +""" diff --git a/python/examples/sklearn_classifier.py b/python/examples/sklearn_classifier.py index 418f88016..14e7dc263 100644 --- a/python/examples/sklearn_classifier.py +++ b/python/examples/sklearn_classifier.py @@ -14,7 +14,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) diff --git a/python/examples/sklearn_regressor.py b/python/examples/sklearn_regressor.py index 6f7d59067..3c7e87ac0 100644 --- a/python/examples/sklearn_regressor.py +++ b/python/examples/sklearn_regressor.py @@ -14,7 +14,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item() + phi0 = dfy_train.mean().item() ) print(df_shapley) @@ -51,7 +51,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), group = group ) print(df_shapley_g) diff --git a/python/examples/xgboost_booster.py b/python/examples/xgboost_booster.py index b89044344..d000ea06b 100644 --- a/python/examples/xgboost_booster.py +++ b/python/examples/xgboost_booster.py @@ -14,7 +14,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) diff --git a/python/examples/xgboost_regressor.py b/python/examples/xgboost_regressor.py index 7183a2dd8..da9a36389 100644 --- a/python/examples/xgboost_regressor.py +++ b/python/examples/xgboost_regressor.py @@ -14,7 +14,7 @@ x_train = dfx_train, x_explain = dfx_test, approach = 'empirical', - prediction_zero = dfy_train.mean().item(), + phi0 = dfy_train.mean().item(), ) print(df_shapley) diff --git a/python/shaprpy/explain.py b/python/shaprpy/explain.py index b3404e30d..be2c517c1 100644 --- a/python/shaprpy/explain.py +++ b/python/shaprpy/explain.py @@ -24,7 +24,7 @@ def explain( x_explain: pd.DataFrame, x_train: pd.DataFrame, approach: str, - prediction_zero: float, + phi0: float, iterative: bool | None = None, max_n_coalitions: int | None = None, group: dict | None = None, @@ -64,7 +64,7 @@ def explain( The method(s) to estimate the conditional expectation. All elements should, either be `"gaussian"`, `"copula"`, `"empirical"`, `"ctree"`, `"categorical"`, `"timeseries"`, `"independence"`, `"regression_separate"`, or `"regression_surrogate"`. - prediction_zero: float + phi0: float The prediction value for unseen data, i.e. an estimate of the expected prediction without conditioning on any features. Typically we set this value equal to the mean of the response variable in our training data, but other choices such as the mean of the predictions in the training data are also reasonable. @@ -181,7 +181,7 @@ def explain( x_explain = py2r(x_explain), approach = StrVector(approach), paired_shap_sampling = paired_shap_sampling, - prediction_zero = prediction_zero, + phi0 = phi0, max_n_coalitions = maybe_null(max_n_coalitions), group = r_group, n_MC_samples = n_MC_samples, diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_gaussian_group_converges_tol.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_gaussian_group_converges_tol.rds index 7394eaf89..ed6d05c26 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_gaussian_group_converges_tol.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_gaussian_group_converges_tol.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_indep_conv_max_n_coalitions.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_indep_conv_max_n_coalitions.rds index 76286933e..0a0f7379e 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_indep_conv_max_n_coalitions.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_indep_conv_max_n_coalitions.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_object.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_object.rds index 96aab6ef9..0507b05cd 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_object.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_object.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_path.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_path.rds index 96aab6ef9..0507b05cd 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_path.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_cont_est_path.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_maxit.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_maxit.rds index de01585c9..752207bf6 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_maxit.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_maxit.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol.rds index b87810bfb..02f2a785c 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol_paired.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol_paired.rds index b87810bfb..02f2a785c 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol_paired.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_converges_tol_paired.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_reach_exact.rds b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_reach_exact.rds index 355c868db..4c5089cbb 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_reach_exact.rds and b/tests/testthat/_snaps/adaptive-output/output_lm_numeric_independence_reach_exact.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_verbose_1.rds b/tests/testthat/_snaps/adaptive-output/output_verbose_1.rds index 0697297cc..876bd1a66 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_verbose_1.rds and b/tests/testthat/_snaps/adaptive-output/output_verbose_1.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3.rds b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3.rds index 6011838ef..fd125a547 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3.rds and b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4.rds b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4.rds index 434f78f8f..583867cac 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4.rds and b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4.rds differ diff --git a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4_5.rds b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4_5.rds index b7ebe066f..f0b55a8cf 100644 Binary files a/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4_5.rds and b/tests/testthat/_snaps/adaptive-output/output_verbose_1_3_4_5.rds differ diff --git a/tests/testthat/_snaps/adaptive-setup.md b/tests/testthat/_snaps/adaptive-setup.md index 1b331af40..326a03d44 100644 --- a/tests/testthat/_snaps/adaptive-setup.md +++ b/tests/testthat/_snaps/adaptive-setup.md @@ -3,7 +3,7 @@ Code n_batches_non_numeric_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_numeric_1)) Message Success with message: @@ -19,7 +19,7 @@ Code n_batches_non_numeric_2 <- TRUE explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_numeric_2)) Message Success with message: @@ -35,7 +35,7 @@ Code n_batches_non_integer <- 10.5 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_integer)) Message Success with message: @@ -51,7 +51,7 @@ Code n_batches_too_long <- c(1, 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_too_long)) Message Success with message: @@ -67,7 +67,7 @@ Code n_batches_is_NA <- as.numeric(NA) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_is_NA)) Message Success with message: @@ -83,7 +83,7 @@ Code n_batches_non_positive <- 0 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_positive)) Message Success with message: diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_FALSE.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_FALSE.rds index 14ba674c5..2c86f3888 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_FALSE.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_FALSE.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_TRUE.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_TRUE.rds index fb1d734a5..7efc59830 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_TRUE.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_TRUE.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix.rds index 21cbcdf4f..2f53b0bb0 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_ctree.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_ctree.rds index 2cc8b74d1..317c795b6 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_ctree.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_ctree.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_empirical.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_empirical.rds index 2c48c83d5..7ff35f3b1 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_empirical.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_empirical.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_n_coal.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_n_coal.rds index a9e4afeb0..0d5093ecb 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_n_coal.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_caus_conf_mix_n_coal.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg.rds index 31c97c9e3..90492aadc 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg_iterative.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg_iterative.rds index 73b0db6af..0feb8fa18 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg_iterative.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asym_cond_reg_iterative.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_asymmetric_conditional.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_asymmetric_conditional.rds index bd5a1f75e..b14832cea 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_asymmetric_conditional.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_asymmetric_conditional.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_cat_asym_causal_mixed_cat_ad.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_cat_asym_causal_mixed_cat_ad.rds index 10a5efcd2..a1f893d63 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_cat_asym_causal_mixed_cat_ad.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_cat_asym_causal_mixed_cat_ad.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_cat.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_cat.rds index a8bed6265..9382e91c4 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_cat.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_cat.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_ctree.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_ctree.rds index dd492da81..dfa242805 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_ctree.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_categorical_asym_causal_mixed_ctree.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_asym_cond_reg.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_asym_cond_reg.rds index a118b3ba1..43e42e350 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_asym_cond_reg.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_asym_cond_reg.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE.rds index 6bb20c0b4..acc0b0c51 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE_iterative.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE_iterative.rds index 0b966a372..dca60b54b 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE_iterative.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_TRUE_iterative.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed.rds index cbd3a9a98..013be31f6 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed_2.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed_2.rds index 9449892fe..728271154 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed_2.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_mixed_sym_caus_conf_mixed_2.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_FALSE.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_FALSE.rds index 0b467aaa4..eb3acdebb 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_FALSE.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_FALSE.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE.rds index 09d37d403..254104338 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE_group.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE_group.rds index 17390ddec..ed45a6d4f 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE_group.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_TRUE_group.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix.rds index 710b81499..6af3b48ce 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group.rds index 7310786e1..bf5adcd67 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group_iterative.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group_iterative.rds index ab8aad2a1..f35ab2407 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group_iterative.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_sym_caus_conf_mix_group_iterative.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_conditional.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_conditional.rds index 80061644b..17d3b04e1 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_conditional.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_conditional.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_gaussian.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_gaussian.rds index 8bff4fb6e..39833ddba 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_gaussian.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_gaussian.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_independence.rds b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_independence.rds index 0fe9f4ca4..4f2cb1c49 100644 Binary files a/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_independence.rds and b/tests/testthat/_snaps/asymmetric-causal-output/output_symmetric_marginal_independence.rds differ diff --git a/tests/testthat/_snaps/asymmetric-causal-setup.md b/tests/testthat/_snaps/asymmetric-causal-setup.md index c5468f86b..984aa7b47 100644 --- a/tests/testthat/_snaps/asymmetric-causal-setup.md +++ b/tests/testthat/_snaps/asymmetric-causal-setup.md @@ -2,9 +2,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(1:6), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + 1:6), confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -13,9 +12,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(1:5, 5), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + 1:5, 5), confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -24,9 +22,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(2:5, 5), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + 2:5, 5), confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -35,9 +32,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(1:2, 4), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + 1:2, 4), confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -46,10 +42,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list("Solar.R", "Wind", "Temp", "Month", "Day", - "Invalid feature name"), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + "Solar.R", "Wind", "Temp", "Month", "Day", "Invalid feature name"), + confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `convert_feature_name_to_idx()`: ! `causal_ordering` contains feature names (`Invalid feature name`) that are not in the data (`Solar.R`, `Wind`, `Temp`, `Month`, `Day`). @@ -58,9 +53,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list("Solar.R", "Wind", "Temp", "Month", "Day", "Day"), - confounding = NULL, approach = "gaussian", iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + "Solar.R", "Wind", "Temp", "Month", "Day", "Day"), confounding = NULL, + approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -69,9 +64,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list("Solar.R", "Wind", "Temp", "Day", "Day"), confounding = NULL, - approach = "gaussian", iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + "Solar.R", "Wind", "Temp", "Day", "Day"), confounding = NULL, approach = "gaussian", + iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -80,9 +75,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list("Solar.R", "Wind"), confounding = NULL, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + "Solar.R", "Wind"), confounding = NULL, approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all feature names or indices exactly once. @@ -91,10 +85,10 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(c("Solar.R", "Wind", "Temp", "Month"), "Day"), - confounding = NULL, approach = "gaussian", group = list(A = c("Solar.R", - "Wind"), B = "Temp", C = c("Month", "Day")), iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + c("Solar.R", "Wind", "Temp", "Month"), "Day"), confounding = NULL, + approach = "gaussian", group = list(A = c("Solar.R", "Wind"), B = "Temp", C = c( + "Month", "Day")), iterative = FALSE) Condition Error in `convert_feature_name_to_idx()`: ! `causal_ordering` contains group names (`Solar.R`, `Wind`, `Temp`, `Month`, `Day`) that are not in the data (`A`, `B`, `C`). @@ -103,10 +97,10 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(c("A", "C"), "Wrong name"), confounding = NULL, - approach = "gaussian", group = list(A = c("Solar.R", "Wind"), B = "Temp", C = c( - "Month", "Day")), iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + c("A", "C"), "Wrong name"), confounding = NULL, approach = "gaussian", + group = list(A = c("Solar.R", "Wind"), B = "Temp", C = c("Month", "Day")), + iterative = FALSE) Condition Error in `convert_feature_name_to_idx()`: ! `causal_ordering` contains group names (`Wrong name`) that are not in the data (`A`, `B`, `C`). @@ -115,10 +109,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = TRUE, - causal_ordering = list(c("A"), "B"), confounding = NULL, approach = "gaussian", - group = list(A = c("Solar.R", "Wind"), B = "Temp", C = c("Month", "Day")), - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = TRUE, causal_ordering = list( + c("A"), "B"), confounding = NULL, approach = "gaussian", group = list(A = c( + "Solar.R", "Wind"), B = "Temp", C = c("Month", "Day")), iterative = FALSE) Condition Error in `check_and_set_causal_ordering()`: ! `causal_ordering` is incomplete/incorrect. It must contain all group names or indices exactly once. @@ -127,9 +120,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = FALSE, - causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, approach = c( - "gaussian", "independence", "empirical", "gaussian"), iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = FALSE, causal_ordering = list( + 1:2, 3:4, 5), confounding = TRUE, approach = c("gaussian", "independence", + "empirical", "gaussian"), iterative = FALSE) Condition Error in `check_and_set_causal_sampling()`: ! Causal Shapley values is not applicable for combined approaches. @@ -138,7 +131,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = c(FALSE, FALSE), + x_train = x_train_numeric, phi0 = p0, asymmetric = c(FALSE, FALSE), causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, approach = "gaussian", iterative = FALSE) Condition @@ -149,7 +142,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = "Must be a single logical", + x_train = x_train_numeric, phi0 = p0, asymmetric = "Must be a single logical", causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, approach = "gaussian", iterative = FALSE) Condition @@ -160,9 +153,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = 1L, - causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, approach = "gaussian", - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = 1L, causal_ordering = list( + 1:2, 3:4, 5), confounding = TRUE, approach = "gaussian", iterative = FALSE) Condition Error in `get_parameters()`: ! `asymmetric` must be a single logical. @@ -171,9 +163,9 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = FALSE, - causal_ordering = list(1:2, 3:4, 5), confounding = c("A", "B", "C"), - approach = "gaussian", iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, asymmetric = FALSE, causal_ordering = list( + 1:2, 3:4, 5), confounding = c("A", "B", "C"), approach = "gaussian", + iterative = FALSE) Condition Error in `get_parameters()`: ! `confounding` must be a logical (vector). @@ -182,8 +174,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, asymmetric = FALSE, - causal_ordering = list(1:2, 3:4, 5), confounding = c(TRUE, FALSE), approach = "gaussian", + x_train = x_train_numeric, phi0 = p0, asymmetric = FALSE, causal_ordering = list( + 1:2, 3:4, 5), confounding = c(TRUE, FALSE), approach = "gaussian", iterative = FALSE) Condition Error in `check_and_set_confounding()`: diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_ar_numeric.rds b/tests/testthat/_snaps/forecast-output/forecast_output_ar_numeric.rds index 2fcf7aa9b..f7ed98834 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_ar_numeric.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_ar_numeric.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric.rds b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric.rds index 6b5c6c5da..3b42ce6c3 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative.rds b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative.rds index 26be2ea86..6a691f19b 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative_groups.rds b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative_groups.rds index e7ed70acb..0061a2ab8 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative_groups.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_iterative_groups.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_lags.rds b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_lags.rds index 51b75e049..c51ff8268 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_lags.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_lags.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_xreg.rds b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_xreg.rds index e32fdafc5..08001992a 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_xreg.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_arima_numeric_no_xreg.rds differ diff --git a/tests/testthat/_snaps/forecast-output/forecast_output_forecast_ARIMA_group_numeric.rds b/tests/testthat/_snaps/forecast-output/forecast_output_forecast_ARIMA_group_numeric.rds index e5db013be..1ab8c99a1 100644 Binary files a/tests/testthat/_snaps/forecast-output/forecast_output_forecast_ARIMA_group_numeric.rds and b/tests/testthat/_snaps/forecast-output/forecast_output_forecast_ARIMA_group_numeric.rds differ diff --git a/tests/testthat/_snaps/forecast-setup.md b/tests/testthat/_snaps/forecast-setup.md index 6f03d5298..fdf2616a9 100644 --- a/tests/testthat/_snaps/forecast-setup.md +++ b/tests/testthat/_snaps/forecast-setup.md @@ -6,7 +6,7 @@ explain_forecast(testing = TRUE, model = model_custom_arima_temp, y = data_arima[ 1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149: 150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Message Note: You passed a model to explain() which is not natively supported, and did not supply a 'get_model_specs' function to explain(). Consistency checks between model and data is therefore disabled. @@ -27,7 +27,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = y_wrong_format, xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `y` has 2 columns (Temp,Wind). @@ -41,7 +41,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = xreg_wrong_format, train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `xreg` has 2 columns (Temp,Wind). @@ -56,7 +56,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = xreg_no_column_names, train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `xreg` misses column names. @@ -66,22 +66,22 @@ Code explain_forecast(testing = TRUE, y = data_arima[1:150, "Temp"], xreg = data_arima[ , "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, - explain_xreg_lags = 2, horizon = 3, approach = "independence", prediction_zero = p0_ar) + explain_xreg_lags = 2, horizon = 3, approach = "independence", phi0 = p0_ar) Condition Error in `explain_forecast()`: ! argument "model" is missing, with no default -# erroneous input: `prediction_zero` +# erroneous input: `phi0` Code p0_wrong_length <- p0_ar[1:2] explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_wrong_length) + phi0 = p0_wrong_length) Condition Error in `get_parameters()`: - ! `prediction_zero` (77.8823529411765, 77.8823529411765) must be numeric and match the output size of the model (3). + ! `phi0` (77.8823529411765, 77.8823529411765) must be numeric and match the output size of the model (3). # erroneous input: `max_n_coalitions` @@ -93,8 +93,8 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = explain_y_lags, explain_xreg_lags = explain_xreg_lags, - horizon = horizon, approach = "independence", prediction_zero = p0_ar, - max_n_coalitions = n_coalitions, group_lags = FALSE) + horizon = horizon, approach = "independence", phi0 = p0_ar, max_n_coalitions = n_coalitions, + group_lags = FALSE) Message Note: Feature names extracted from the model contains NA. Consistency checks between model and data is therefore disabled. @@ -117,8 +117,8 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = explain_y_lags, explain_xreg_lags = explain_xreg_lags, - horizon = horizon, approach = "independence", prediction_zero = p0_ar, - max_n_coalitions = n_coalitions, group_lags = TRUE) + horizon = horizon, approach = "independence", phi0 = p0_ar, max_n_coalitions = n_coalitions, + group_lags = TRUE) Message Note: Feature names extracted from the model contains NA. Consistency checks between model and data is therefore disabled. @@ -153,7 +153,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = train_idx_too_short, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `train_idx` must be a vector of positive finite integers and length > 1. @@ -165,7 +165,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = train_idx_not_integer, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `train_idx` must be a vector of positive finite integers and length > 1. @@ -177,7 +177,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = train_idx_out_of_range, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! The train (`train_idx`) and explain (`explain_idx`) indices must fit in the lagged data. @@ -190,7 +190,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = explain_idx_not_integer, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Condition Error in `get_parameters()`: ! `explain_idx` must be a vector of positive finite integers. @@ -202,7 +202,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = explain_idx_out_of_range, explain_y_lags = 2, explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar) + phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! The train (`train_idx`) and explain (`explain_idx`) indices must fit in the lagged data. @@ -215,7 +215,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = explain_y_lags_negative, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `explain_y_lags` must be a vector of positive finite integers. @@ -227,7 +227,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = explain_y_lags_not_integer, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `explain_y_lags` must be a vector of positive finite integers. @@ -239,7 +239,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = explain_y_lags_more_than_one, explain_xreg_lags = 2, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `y` has 1 columns (Temp). @@ -252,7 +252,7 @@ explain_y_lags_zero <- 0 explain_forecast(testing = TRUE, model = model_arima_temp_noxreg, y = data_arima[ 1:150, "Temp"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 0, - horizon = 3, approach = "independence", prediction_zero = p0_ar) + horizon = 3, approach = "independence", phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `explain_y_lags=0` is not allowed for models without exogeneous variables @@ -264,7 +264,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = explain_xreg_lags_negative, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `explain_xreg_lags` must be a vector of positive finite integers. @@ -276,7 +276,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = explain_xreg_lags_not_integer, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `explain_xreg_lags` must be a vector of positive finite integers. @@ -288,7 +288,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = explain_x_lags_wrong_length, horizon = 3, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_data_forecast()`: ! `xreg` has 1 columns (Wind). @@ -302,7 +302,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = horizon_negative, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `horizon` must be a vector (or scalar) of positive integers. @@ -314,7 +314,7 @@ explain_forecast(testing = TRUE, model = model_arima_temp, y = data_arima[1:150, "Temp"], xreg = data_arima[, "Wind"], train_idx = 2:148, explain_idx = 149:150, explain_y_lags = 2, explain_xreg_lags = 2, horizon = horizon_not_integer, - approach = "independence", prediction_zero = p0_ar) + approach = "independence", phi0 = p0_ar) Condition Error in `get_parameters()`: ! `horizon` must be a vector (or scalar) of positive integers. diff --git a/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_separate.rds index 51b4a330b..d5bc1b7ef 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_surrogate.rds b/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_surrogate.rds index 9cc5a5799..5287a9c1f 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_surrogate.rds and b/tests/testthat/_snaps/regression-output/output_lm_categorical_lm_surrogate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate.rds index b6dd532ee..374301c6f 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate_parallel.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate_parallel.rds index 67c49f255..f0b5651a3 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate_parallel.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_separate_parallel.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_surrogate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_surrogate.rds index 791246703..f3e2341ec 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_surrogate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_decision_tree_cv_surrogate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_separate.rds index e5910370b..afd7c3d30 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_surrogate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_surrogate.rds index 95c24fb6a..33203d03a 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_surrogate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_lm_surrogate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_splines_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_splines_separate.rds index 6a0b062f9..77bedf0ad 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_splines_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_splines_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_separate.rds index 781f0619f..6582ba9bf 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_surrogate.rds b/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_surrogate.rds index 430ec826e..194dea761 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_surrogate.rds and b/tests/testthat/_snaps/regression-output/output_lm_mixed_xgboost_surrogate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate.rds index 18bd04305..dc65779ef 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_iterative.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_iterative.rds index 0770d63b6..c2ff52301 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_iterative.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_iterative.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_n_comb.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_n_comb.rds index e01c8e14d..e15da382d 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_n_comb.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_separate_n_comb.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate.rds index af6063e9b..0e18b9b3e 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_iterative.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_iterative.rds index af6063e9b..0e18b9b3e 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_iterative.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_iterative.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_n_comb.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_n_comb.rds index 4496f2af7..653929021 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_n_comb.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_n_comb.rds differ diff --git a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_reg_surr_n_comb.rds b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_reg_surr_n_comb.rds index 2ec897d61..114fa6707 100644 Binary files a/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_reg_surr_n_comb.rds and b/tests/testthat/_snaps/regression-output/output_lm_numeric_lm_surrogate_reg_surr_n_comb.rds differ diff --git a/tests/testthat/_snaps/regression-setup.md b/tests/testthat/_snaps/regression-setup.md index e63719891..754236c2e 100644 --- a/tests/testthat/_snaps/regression-setup.md +++ b/tests/testthat/_snaps/regression-setup.md @@ -2,9 +2,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = c( - "regression_surrogate", "gaussian", "independence", "empirical"), - iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, approach = c("regression_surrogate", + "gaussian", "independence", "empirical"), iterative = FALSE) Condition Error in `check_approach()`: ! The `regression_separate` and `regression_surrogate` approaches cannot be combined with other approaches. @@ -13,8 +12,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = c( - "regression_separate", "gaussian", "independence", "empirical"), iterative = FALSE) + x_train = x_train_numeric, phi0 = p0, approach = c("regression_separate", + "gaussian", "independence", "empirical"), iterative = FALSE) Condition Error in `check_approach()`: ! The `regression_separate` and `regression_surrogate` approaches cannot be combined with other approaches. @@ -23,7 +22,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = NULL) Message Success with message: @@ -38,7 +37,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = lm) Message Success with message: @@ -53,7 +52,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression")) Message @@ -69,7 +68,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(num_terms = c(1, 2, 3))) @@ -86,7 +85,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3), num_terms = c(1, 2, 3))) @@ -103,7 +102,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = 2, engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3))) @@ -120,7 +119,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_surrogate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_surrogate", regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), iterative = FALSE) Message Success with message: @@ -144,7 +143,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = 2, engine = "rpart", mode = "regression"), regression.tune_values = as.matrix(data.frame( tree_depth = c(1, 2, 3)))) @@ -161,7 +160,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = function(x) c(1, 2, 3)) Message @@ -177,7 +176,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = function(x) data.frame( wrong_name = c(1, 2, 3))) @@ -194,7 +193,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), regression.vfold_cv_para = 10) @@ -211,7 +210,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), regression.vfold_cv_para = list(10)) @@ -228,7 +227,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), regression.vfold_cv_para = list(hey = 10)) @@ -245,7 +244,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_separate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_separate", regression.recipe_func = 3) Message Success with message: @@ -260,7 +259,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_surrogate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_surrogate", regression.recipe_func = function(x) { return(2) }, iterative = FALSE) @@ -286,7 +285,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_surrogate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_surrogate", regression.surrogate_n_comb = 2^ncol(x_explain_numeric) - 1, iterative = FALSE) Message Success with message: @@ -310,7 +309,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "regression_surrogate", + x_train = x_train_numeric, phi0 = p0, approach = "regression_surrogate", regression.surrogate_n_comb = 0, iterative = FALSE) Message Success with message: diff --git a/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_1.rds b/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_1.rds index a82a9d8e5..5485560c0 100644 Binary files a/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_1.rds and b/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_1.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_2.rds b/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_2.rds index a82a9d8e5..5485560c0 100644 Binary files a/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_2.rds and b/tests/testthat/_snaps/regular-output/output_custom_lm_numeric_independence_2.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_custom_xgboost_mixed_dummy_ctree.rds b/tests/testthat/_snaps/regular-output/output_custom_xgboost_mixed_dummy_ctree.rds index 9abb24e1c..2f103a3d3 100644 Binary files a/tests/testthat/_snaps/regular-output/output_custom_xgboost_mixed_dummy_ctree.rds and b/tests/testthat/_snaps/regular-output/output_custom_xgboost_mixed_dummy_ctree.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_categorical_ctree.rds b/tests/testthat/_snaps/regular-output/output_lm_categorical_ctree.rds index 59f5d23ae..59124c1b9 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_categorical_ctree.rds and b/tests/testthat/_snaps/regular-output/output_lm_categorical_ctree.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_categorical_independence.rds b/tests/testthat/_snaps/regular-output/output_lm_categorical_independence.rds index 8516466f5..4ea1ead8f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_categorical_independence.rds and b/tests/testthat/_snaps/regular-output/output_lm_categorical_independence.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_categorical_method.rds b/tests/testthat/_snaps/regular-output/output_lm_categorical_method.rds index f3f171306..cde306c3f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_categorical_method.rds and b/tests/testthat/_snaps/regular-output/output_lm_categorical_method.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_categorical_vaeac.rds b/tests/testthat/_snaps/regular-output/output_lm_categorical_vaeac.rds index 0717162de..95aaddf73 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_categorical_vaeac.rds and b/tests/testthat/_snaps/regular-output/output_lm_categorical_vaeac.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_mixed_comb.rds b/tests/testthat/_snaps/regular-output/output_lm_mixed_comb.rds index 931ce50d9..3e7b804e0 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_mixed_comb.rds and b/tests/testthat/_snaps/regular-output/output_lm_mixed_comb.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_mixed_ctree.rds b/tests/testthat/_snaps/regular-output/output_lm_mixed_ctree.rds index e4b60be42..5f3070172 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_mixed_ctree.rds and b/tests/testthat/_snaps/regular-output/output_lm_mixed_ctree.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_mixed_independence.rds b/tests/testthat/_snaps/regular-output/output_lm_mixed_independence.rds index 97a0d238f..b18091acc 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_mixed_independence.rds and b/tests/testthat/_snaps/regular-output/output_lm_mixed_independence.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_mixed_vaeac.rds b/tests/testthat/_snaps/regular-output/output_lm_mixed_vaeac.rds index a1eb54259..846bbb00f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_mixed_vaeac.rds and b/tests/testthat/_snaps/regular-output/output_lm_mixed_vaeac.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb1.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb1.rds index 12b4e5dd7..fc58e7f79 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb1.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb1.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb2.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb2.rds index 8d9abe50f..382deb5fe 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb2.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb2.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb3.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb3.rds index dd9b28d3d..c256b378b 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_comb3.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_comb3.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_copula.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_copula.rds index d6928debf..30842c349 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_copula.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_copula.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree.rds index 69f7a61a9..accea429f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree_parallelized.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree_parallelized.rds index 69f7a61a9..accea429f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree_parallelized.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_ctree_parallelized.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical.rds index 2316a3acc..aaf9e052f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_each.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_each.rds index 367922cc7..0b11904ba 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_each.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_each.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_full.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_full.rds index a4691aad1..57fed3dad 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_full.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_AICc_full.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_independence.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_independence.rds index a005d77b7..c21420f31 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_independence.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_independence.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_n_coalitions.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_n_coalitions.rds index e9bd604f4..4b240bd44 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_n_coalitions.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_n_coalitions.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_progress.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_progress.rds index 2316a3acc..aaf9e052f 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_progress.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_empirical_progress.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_gaussian.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_gaussian.rds index e2fba82a2..9a197ced8 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_gaussian.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_gaussian.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence.rds index dfca2b741..b23b244eb 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_MSEv_Shapley_weights.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_MSEv_Shapley_weights.rds index b4b1b1231..379359169 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_MSEv_Shapley_weights.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_MSEv_Shapley_weights.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_keep_samp_for_vS.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_keep_samp_for_vS.rds index 8eed1a51f..f9f4575a1 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_keep_samp_for_vS.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_independence_keep_samp_for_vS.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_interaction.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_interaction.rds index 507e17dfa..e7a21d736 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_interaction.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_interaction.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_numeric_vaeac.rds b/tests/testthat/_snaps/regular-output/output_lm_numeric_vaeac.rds index 5b5ac43ce..edea23233 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_numeric_vaeac.rds and b/tests/testthat/_snaps/regular-output/output_lm_numeric_vaeac.rds differ diff --git a/tests/testthat/_snaps/regular-output/output_lm_timeseries_method.rds b/tests/testthat/_snaps/regular-output/output_lm_timeseries_method.rds index 6ba6c2942..5aa38d8f8 100644 Binary files a/tests/testthat/_snaps/regular-output/output_lm_timeseries_method.rds and b/tests/testthat/_snaps/regular-output/output_lm_timeseries_method.rds differ diff --git a/tests/testthat/_snaps/regular-setup.md b/tests/testthat/_snaps/regular-setup.md index e689c5b8c..12ca26adf 100644 --- a/tests/testthat/_snaps/regular-setup.md +++ b/tests/testthat/_snaps/regular-setup.md @@ -4,7 +4,7 @@ model_custom_lm_mixed <- model_lm_mixed class(model_custom_lm_mixed) <- "whatever" explain(testing = TRUE, model = model_custom_lm_mixed, x_train = x_train_mixed, - x_explain = x_explain_mixed, approach = "independence", prediction_zero = p0) + x_explain = x_explain_mixed, approach = "independence", phi0 = p0) Message Note: You passed a model to explain() which is not natively supported, and did not supply a 'get_model_specs' function to explain(). Consistency checks between model and data is therefore disabled. @@ -22,7 +22,7 @@ Code explain(testing = TRUE, model = model_custom_lm_mixed, x_train = x_train_mixed, - x_explain = x_explain_mixed, approach = "independence", prediction_zero = p0, + x_explain = x_explain_mixed, approach = "independence", phi0 = p0, predict_model = custom_predict_model, get_model_specs = NA) Message Note: You passed a model to explain() which is not natively supported, and did not supply a 'get_model_specs' function to explain(). @@ -55,7 +55,7 @@ feature_specs <- list(labels = NA, classes = NA, factor_levels = NA) }) explain(testing = TRUE, model = model_custom_lm_mixed, x_train = x_train_mixed, - x_explain = x_explain_mixed, approach = "independence", prediction_zero = p0, + x_explain = x_explain_mixed, approach = "independence", phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_get_model_specs_no_lab) Message Note: Feature names extracted from the model contains NA. @@ -88,7 +88,7 @@ feature_specs <- list(labels = labels(x$terms), classes = NA, factor_levels = NA) }) explain(testing = TRUE, model = model_custom_lm_mixed, x_train = x_train_mixed, - x_explain = x_explain_mixed, approach = "independence", prediction_zero = p0, + x_explain = x_explain_mixed, approach = "independence", phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_gms_no_classes) Message Note: Feature classes extracted from the model contains NA. @@ -122,7 +122,7 @@ "dataClasses")[-1], factor_levels = NA) }) explain(testing = TRUE, model = model_custom_lm_mixed, x_train = x_train_mixed, - x_explain = x_explain_mixed, approach = "independence", prediction_zero = p0, + x_explain = x_explain_mixed, approach = "independence", phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_gms_no_factor_levels) Message Note: Feature factor levels extracted from the model contains NA. @@ -153,7 +153,7 @@ Code x_train_wrong_format <- c(a = 1, b = 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_wrong_format, approach = "independence", prediction_zero = p0) + x_train = x_train_wrong_format, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_train should be a matrix or a data.frame/data.table. @@ -163,7 +163,7 @@ Code x_explain_wrong_format <- c(a = 1, b = 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_wrong_format, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0) + x_train = x_train_numeric, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_explain should be a matrix or a data.frame/data.table. @@ -174,7 +174,7 @@ x_train_wrong_format <- c(a = 1, b = 2) x_explain_wrong_format <- c(a = 3, b = 4) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_wrong_format, - x_train = x_train_wrong_format, approach = "independence", prediction_zero = p0) + x_train = x_train_wrong_format, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_train should be a matrix or a data.frame/data.table. @@ -186,8 +186,7 @@ x_train_no_column_names <- as.data.frame(x_train_numeric) names(x_train_no_column_names) <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_no_column_names, approach = "independence", - prediction_zero = p0) + x_train = x_train_no_column_names, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_train misses column names. @@ -198,7 +197,7 @@ x_explain_no_column_names <- as.data.frame(x_explain_numeric) names(x_explain_no_column_names) <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_no_column_names, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0) + x_train = x_train_numeric, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_explain misses column names. @@ -210,8 +209,7 @@ x_explain_no_column_names <- as.data.frame(x_explain_numeric) names(x_explain_no_column_names) <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_no_column_names, - x_train = x_train_no_column_names, approach = "independence", - prediction_zero = p0) + x_train = x_train_no_column_names, approach = "independence", phi0 = p0) Condition Error in `get_data()`: ! x_explain misses column names. @@ -220,7 +218,7 @@ Code explain(testing = TRUE, x_explain = x_explain_numeric, x_train = x_train_numeric, - approach = "independence", prediction_zero = p0) + approach = "independence", phi0 = p0) Condition Error in `explain()`: ! argument "model" is missing, with no default @@ -230,8 +228,7 @@ Code approach_non_character <- 1 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = approach_non_character, - prediction_zero = p0) + x_train = x_train_numeric, approach = approach_non_character, phi0 = p0) Condition Error in `check_approach()`: ! `approach` must be one of the following: 'categorical', 'copula', 'ctree', 'empirical', 'gaussian', 'independence', 'regression_separate', 'regression_surrogate', 'timeseries', 'vaeac'. @@ -242,8 +239,7 @@ Code approach_incorrect_length <- c("empirical", "gaussian") explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = approach_incorrect_length, - prediction_zero = p0) + x_train = x_train_numeric, approach = approach_incorrect_length, phi0 = p0) Condition Error in `check_approach()`: ! `approach` must be one of the following: 'categorical', 'copula', 'ctree', 'empirical', 'gaussian', 'independence', 'regression_separate', 'regression_surrogate', 'timeseries', 'vaeac'. @@ -254,59 +250,58 @@ Code approach_incorrect_character <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = approach_incorrect_character, - prediction_zero = p0) + x_train = x_train_numeric, approach = approach_incorrect_character, phi0 = p0) Condition Error in `check_approach()`: ! `approach` must be one of the following: 'categorical', 'copula', 'ctree', 'empirical', 'gaussian', 'independence', 'regression_separate', 'regression_surrogate', 'timeseries', 'vaeac'. These can also be combined (except 'regression_surrogate' and 'regression_separate') by passing a vector of length one less than the number of features (4). -# erroneous input: `prediction_zero` +# erroneous input: `phi0` Code p0_non_numeric_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0_non_numeric_1) + x_train = x_train_numeric, approach = "independence", phi0 = p0_non_numeric_1) Condition Error in `get_parameters()`: - ! `prediction_zero` (bla) must be numeric and match the output size of the model (1). + ! `phi0` (bla) must be numeric and match the output size of the model (1). --- Code p0_non_numeric_2 <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0_non_numeric_2) + x_train = x_train_numeric, approach = "independence", phi0 = p0_non_numeric_2) Condition Error in `get_parameters()`: - ! `prediction_zero` () must be numeric and match the output size of the model (1). + ! `phi0` () must be numeric and match the output size of the model (1). --- Code p0_too_long <- c(1, 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0_too_long) + x_train = x_train_numeric, approach = "independence", phi0 = p0_too_long) Condition Error in `get_parameters()`: - ! `prediction_zero` (1, 2) must be numeric and match the output size of the model (1). + ! `phi0` (1, 2) must be numeric and match the output size of the model (1). --- Code p0_is_NA <- as.numeric(NA) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0_is_NA) + x_train = x_train_numeric, approach = "independence", phi0 = p0_is_NA) Condition Error in `get_parameters()`: - ! `prediction_zero` (NA) must be numeric and match the output size of the model (1). + ! `phi0` (NA) must be numeric and match the output size of the model (1). # erroneous input: `max_n_coalitions` Code max_n_comb_non_numeric_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_comb_non_numeric_1) Condition Error in `get_parameters()`: @@ -317,7 +312,7 @@ Code max_n_comb_non_numeric_2 <- TRUE explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_comb_non_numeric_2) Condition Error in `get_parameters()`: @@ -328,7 +323,7 @@ Code max_n_coalitions_non_integer <- 10.5 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_coalitions_non_integer) Condition Error in `get_parameters()`: @@ -339,7 +334,7 @@ Code max_n_coalitions_too_long <- c(1, 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_coalitions_too_long) Condition Error in `get_parameters()`: @@ -350,7 +345,7 @@ Code max_n_coalitions_is_NA <- as.numeric(NA) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_coalitions_is_NA) Condition Error in `get_parameters()`: @@ -361,7 +356,7 @@ Code max_n_comb_non_positive <- 0 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, max_n_coalitions = max_n_comb_non_positive) Condition Error in `get_parameters()`: @@ -372,7 +367,7 @@ Code max_n_coalitions <- ncol(x_explain_numeric) - 1 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "gaussian", + x_train = x_train_numeric, phi0 = p0, approach = "gaussian", max_n_coalitions = max_n_coalitions) Message Success with message: @@ -401,8 +396,8 @@ groups <- list(A = c("Solar.R", "Wind"), B = c("Temp", "Month"), C = "Day") max_n_coalitions <- length(groups) - 1 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, prediction_zero = p0, approach = "gaussian", - group = groups, max_n_coalitions = max_n_coalitions) + x_train = x_train_numeric, phi0 = p0, approach = "gaussian", group = groups, + max_n_coalitions = max_n_coalitions) Message Success with message: n_groups is smaller than or equal to 3, meaning there are so few unique coalitions (8) that we should use all to get reliable results. @@ -429,8 +424,7 @@ Code group_non_list <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = group_non_list) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = group_non_list) Condition Error in `get_parameters()`: ! `group` must be NULL or a list @@ -440,8 +434,7 @@ Code group_with_non_characters <- list(A = 1, B = 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = group_with_non_characters) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = group_with_non_characters) Condition Error in `check_groups()`: ! All components of group should be a character. @@ -452,8 +445,7 @@ group_with_non_data_features <- list(A = c("Solar.R", "Wind", "not_a_data_feature"), B = c("Temp", "Month", "Day")) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = group_with_non_data_features) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = group_with_non_data_features) Condition Error in `check_groups()`: ! The group feature(s) not_a_data_feature are not @@ -465,8 +457,7 @@ group_missing_data_features <- list(A = c("Solar.R"), B = c("Temp", "Month", "Day")) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = group_missing_data_features) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = group_missing_data_features) Condition Error in `check_groups()`: ! The data feature(s) Wind do not @@ -478,8 +469,7 @@ group_dup_data_features <- list(A = c("Solar.R", "Solar.R", "Wind"), B = c( "Temp", "Month", "Day")) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = group_dup_data_features) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = group_dup_data_features) Condition Error in `check_groups()`: ! Feature(s) Solar.R are found in more than one group or multiple times per group. @@ -490,8 +480,7 @@ Code single_group <- list(A = c("Solar.R", "Wind", "Temp", "Month", "Day")) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - group = single_group) + x_train = x_train_numeric, approach = "independence", phi0 = p0, group = single_group) Condition Error in `check_groups()`: ! You have specified only a single group named A, containing the features: Solar.R, Wind, Temp, Month, Day. @@ -502,7 +491,7 @@ Code n_samples_non_numeric_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_non_numeric_1) Condition Error in `get_parameters()`: @@ -513,7 +502,7 @@ Code n_samples_non_numeric_2 <- TRUE explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_non_numeric_2) Condition Error in `get_parameters()`: @@ -524,7 +513,7 @@ Code n_samples_non_integer <- 10.5 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_non_integer) Condition Error in `get_parameters()`: @@ -535,7 +524,7 @@ Code n_samples_too_long <- c(1, 2) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_too_long) Condition Error in `get_parameters()`: @@ -546,7 +535,7 @@ Code n_samples_is_NA <- as.numeric(NA) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_is_NA) Condition Error in `get_parameters()`: @@ -557,7 +546,7 @@ Code n_samples_non_positive <- 0 explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, n_MC_samples = n_samples_non_positive) Condition Error in `get_parameters()`: @@ -568,8 +557,7 @@ Code seed_not_integer_interpretable <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - seed = seed_not_integer_interpretable) + x_train = x_train_numeric, approach = "independence", phi0 = p0, seed = seed_not_integer_interpretable) Condition Warning in `set.seed()`: NAs introduced by coercion @@ -581,8 +569,8 @@ Code keep_samp_for_vS_non_logical_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(keep_samp_for_vS = keep_samp_for_vS_non_logical_1)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + keep_samp_for_vS = keep_samp_for_vS_non_logical_1)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -597,8 +585,8 @@ Code keep_samp_for_vS_non_logical_2 <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(keep_samp_for_vS = keep_samp_for_vS_non_logical_2)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + keep_samp_for_vS = keep_samp_for_vS_non_logical_2)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -613,8 +601,8 @@ Code keep_samp_for_vS_too_long <- c(TRUE, FALSE) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(keep_samp_for_vS = keep_samp_for_vS_too_long)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + keep_samp_for_vS = keep_samp_for_vS_too_long)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -629,8 +617,8 @@ Code MSEv_uniform_comb_weights_nl_1 <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_1)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_1)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -645,8 +633,8 @@ Code MSEv_uniform_comb_weights_nl_2 <- NULL explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_2)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_2)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -661,8 +649,8 @@ Code MSEv_uniform_comb_weights_long <- c(TRUE, FALSE) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, - output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_long)) + x_train = x_train_numeric, approach = "independence", phi0 = p0, output_args = list( + MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_long)) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -677,7 +665,7 @@ Code predict_model_nonfunction <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, predict_model = predict_model_nonfunction) Message Success with message: @@ -695,7 +683,7 @@ "bla" }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, predict_model = predict_model_non_num_output) Message Success with message: @@ -719,7 +707,7 @@ rep(1, nrow(x) + 1) }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, predict_model = predict_model_wrong_output_len) Message Success with message: @@ -743,7 +731,7 @@ rep(1, nrow(x)) }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, predict_model = predict_model_invalid_argument) Message Success with message: @@ -766,7 +754,7 @@ 1 + "bla" }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, predict_model = predict_model_error) Message Success with message: @@ -787,7 +775,7 @@ Code get_model_specs_nonfunction <- "bla" explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, get_model_specs = get_model_specs_nonfunction) Condition Error in `get_feature_specs()`: @@ -800,7 +788,7 @@ "bla" }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, get_model_specs = get_ms_output_not_list) Condition Error in `get_feature_specs()`: @@ -816,7 +804,7 @@ list(1, 2, 3, 4) }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, get_model_specs = get_ms_output_too_long) Condition Error in `get_feature_specs()`: @@ -832,7 +820,7 @@ list(labels = 1, classes = 2, not_a_name = 3) }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, get_model_specs = get_ms_output_wrong_names) Condition Error in `get_feature_specs()`: @@ -848,7 +836,7 @@ 1 + "bla" }) explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_train_numeric, approach = "independence", prediction_zero = p0, + x_train = x_train_numeric, approach = "independence", phi0 = p0, get_model_specs = get_model_specs_error) Condition Error in `get_feature_specs()`: @@ -866,7 +854,7 @@ Code non_factor_approach_1 <- "gaussian" explain(testing = TRUE, model = model_lm_mixed, x_explain = x_explain_mixed, - x_train = x_explain_mixed, approach = non_factor_approach_1, prediction_zero = p0) + x_train = x_explain_mixed, approach = non_factor_approach_1, phi0 = p0) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -883,7 +871,7 @@ Code non_factor_approach_2 <- "empirical" explain(testing = TRUE, model = model_lm_mixed, x_explain = x_explain_mixed, - x_train = x_explain_mixed, approach = non_factor_approach_2, prediction_zero = p0) + x_train = x_explain_mixed, approach = non_factor_approach_2, phi0 = p0) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -900,7 +888,7 @@ Code non_factor_approach_3 <- "copula" explain(testing = TRUE, model = model_lm_mixed, x_explain = x_explain_mixed, - x_train = x_explain_mixed, approach = non_factor_approach_3, prediction_zero = p0) + x_train = x_explain_mixed, approach = non_factor_approach_3, phi0 = p0) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -916,7 +904,7 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_explain_numeric, prediction_zero = p0, approach = "gaussian", + x_train = x_explain_numeric, phi0 = p0, approach = "gaussian", max_n_coalitions = max_n_coalitions) Message Success with message: @@ -943,8 +931,8 @@ Code explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, - x_train = x_explain_numeric, prediction_zero = p0, approach = "gaussian", - group = groups, max_n_coalitions = max_n_coalitions) + x_train = x_explain_numeric, phi0 = p0, approach = "gaussian", group = groups, + max_n_coalitions = max_n_coalitions) Message Success with message: n_groups is smaller than or equal to 3, meaning there are so few unique coalitions (8) that we should use all to get reliable results. @@ -971,8 +959,7 @@ Code explanation_exact <- explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, n_MC_samples = 2, seed = 123, max_n_coalitions = NULL, - iterative = FALSE) + phi0 = p0, n_MC_samples = 2, seed = 123, max_n_coalitions = NULL, iterative = FALSE) Message Success with message: max_n_coalitions is NULL or larger than or 2^n_features = 32, @@ -993,7 +980,7 @@ Code explanation_equal <- explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, n_MC_samples = 2, seed = 123, extra_computation_args = list( + phi0 = p0, n_MC_samples = 2, seed = 123, extra_computation_args = list( compute_sd = FALSE), max_n_coalitions = 2^ncol(x_explain_numeric), iterative = FALSE) Message @@ -1012,7 +999,7 @@ Code explanation_larger <- explain(testing = TRUE, model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, n_MC_samples = 2, seed = 123, extra_computation_args = list( + phi0 = p0, n_MC_samples = 2, seed = 123, extra_computation_args = list( compute_sd = FALSE), max_n_coalitions = 2^ncol(x_explain_numeric) + 1, iterative = FALSE) Message diff --git a/tests/testthat/test-adaptive-output.R b/tests/testthat/test-adaptive-output.R index 20b132075..0be8006c0 100644 --- a/tests/testthat/test-adaptive-output.R +++ b/tests/testthat/test-adaptive-output.R @@ -8,7 +8,7 @@ test_that("output_lm_numeric_independence_reach_exact", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, verbose = c("basic", "convergence", "shapley"), paired_shap_sampling = TRUE @@ -25,7 +25,7 @@ test_that("output_lm_numeric_independence_converges_tol", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.1 @@ -45,7 +45,7 @@ test_that("output_lm_numeric_independence_converges_maxit", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.001, @@ -67,7 +67,7 @@ test_that("output_lm_numeric_indep_conv_max_n_coalitions", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 20, iterative = TRUE, verbose = c("convergence", "shapley") @@ -92,7 +92,7 @@ test_that("output_lm_numeric_gaussian_group_converges_tol", { x_train = x_train_numeric, approach = "gaussian", group = groups, - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 5, convergence_tol = 0.1 @@ -112,7 +112,7 @@ test_that("output_lm_numeric_independence_converges_tol_paired", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 10, convergence_tol = 0.1 @@ -135,7 +135,7 @@ test_that("output_lm_numeric_independence_saving_and_cont_est", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, paired_shap_sampling = FALSE, iterative_args = list( initial_n_coalitions = 10, @@ -157,7 +157,7 @@ test_that("output_lm_numeric_independence_saving_and_cont_est", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, paired_shap_sampling = FALSE, iterative_args = list( initial_n_coalitions = 10, @@ -178,7 +178,7 @@ test_that("output_lm_numeric_independence_saving_and_cont_est", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, paired_shap_sampling = FALSE, iterative_args = list( initial_n_coalitions = 10, @@ -206,7 +206,7 @@ test_that("output_lm_numeric_independence_saving_and_cont_est", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, paired_shap_sampling = FALSE, iterative_args = list( initial_n_coalitions = 10, @@ -227,7 +227,7 @@ test_that("output_lm_numeric_independence_saving_and_cont_est", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, paired_shap_sampling = FALSE, iterative_args = list( initial_n_coalitions = 10, @@ -255,7 +255,7 @@ test_that("output_verbose_1", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, verbose = c("basic") ), @@ -271,7 +271,7 @@ test_that("output_verbose_1_3", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, verbose = c("basic", "convergence") ), @@ -287,7 +287,7 @@ test_that("output_verbose_1_3_4", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, verbose = c("basic", "convergence", "shapley") ), @@ -303,7 +303,7 @@ test_that("output_verbose_1_3_4_5", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, verbose = c("basic", "convergence", "shapley", "vS_details") ), diff --git a/tests/testthat/test-adaptive-setup.R b/tests/testthat/test-adaptive-setup.R index fbc85a9f2..45f132e6b 100644 --- a/tests/testthat/test-adaptive-setup.R +++ b/tests/testthat/test-adaptive-setup.R @@ -5,7 +5,7 @@ test_that("iterative_args are respected", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 30, iterative_args = list( initial_n_coalitions = 6, @@ -40,7 +40,7 @@ test_that("iterative feature wise and groupwise computations identical", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 5, convergence_tol = 0.1 @@ -56,7 +56,7 @@ test_that("iterative feature wise and groupwise computations identical", { x_train = x_train_numeric, approach = "gaussian", group = groups, - prediction_zero = p0, + phi0 = p0, iterative_args = list( initial_n_coalitions = 5, convergence_tol = 0.1 @@ -82,7 +82,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_numeric_1) ) }, @@ -99,7 +99,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_numeric_2) ) }, @@ -116,7 +116,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_integer) ) }, @@ -133,7 +133,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_too_long) ) }, @@ -150,7 +150,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_is_NA) ) }, @@ -167,7 +167,7 @@ test_that("erroneous input: `min_n_batches`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = n_batches_non_positive) ) }, @@ -183,7 +183,7 @@ test_that("different n_batches gives same/different shapley values for different x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = 5, max_batch_size = 10) ) @@ -193,7 +193,7 @@ test_that("different n_batches gives same/different shapley values for different x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = 10, max_batch_size = 10) ) @@ -215,7 +215,7 @@ test_that("different n_batches gives same/different shapley values for different x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = 5, max_batch_size = 10) ) @@ -225,7 +225,7 @@ test_that("different n_batches gives same/different shapley values for different x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0, + phi0 = p0, extra_computation_args = list(min_n_batches = 10, max_batch_size = 10) ) diff --git a/tests/testthat/test-asymmetric-causal-output.R b/tests/testthat/test-asymmetric-causal-output.R index 9fb4668c8..bc8f0f017 100644 --- a/tests/testthat/test-asymmetric-causal-output.R +++ b/tests/testthat/test-asymmetric-causal-output.R @@ -7,7 +7,7 @@ test_that("output_asymmetric_conditional", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = NULL, @@ -27,7 +27,7 @@ test_that("output_asym_cond_reg", { x_train = x_train_numeric, approach = "regression_separate", regression.model = parsnip::linear_reg(), - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = NULL, @@ -46,7 +46,7 @@ test_that("output_asym_cond_reg_iterative", { x_train = x_train_numeric, approach = "regression_separate", regression.model = parsnip::linear_reg(), - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = NULL, @@ -65,7 +65,7 @@ test_that("output_symmetric_conditional", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), # Does not matter when asymmetric = TRUE and confounding = NULL confounding = NULL, @@ -83,7 +83,7 @@ test_that("output_symmetric_marginal_independence", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:5), confounding = TRUE, @@ -101,7 +101,7 @@ test_that("output_symmetric_marginal_gaussian", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:5), confounding = TRUE, @@ -119,7 +119,7 @@ test_that("output_asym_caus_conf_TRUE", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = TRUE, @@ -140,7 +140,7 @@ test_that("output_asym_caus_conf_FALSE", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = FALSE, @@ -159,7 +159,7 @@ test_that("output_asym_caus_conf_mix", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -178,7 +178,7 @@ test_that("output_asym_caus_conf_mix_n_coal", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -198,7 +198,7 @@ test_that("output_asym_caus_conf_mix_empirical", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -217,7 +217,7 @@ test_that("output_asym_caus_conf_mix_ctree", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -236,7 +236,7 @@ test_that("output_sym_caus_conf_TRUE", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), confounding = TRUE, @@ -254,7 +254,7 @@ test_that("output_sym_caus_conf_FALSE", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), confounding = FALSE, @@ -272,7 +272,7 @@ test_that("output_sym_caus_conf_mix", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -292,7 +292,7 @@ test_that("output_sym_caus_conf_TRUE_group", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3), confounding = TRUE, @@ -312,7 +312,7 @@ test_that("output_sym_caus_conf_mix_group", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1, 2, 3), confounding = c(TRUE, TRUE, FALSE), @@ -331,7 +331,7 @@ test_that("output_sym_caus_conf_mix_group_iterative", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1, 2, 3), confounding = c(TRUE, TRUE, FALSE), @@ -357,7 +357,7 @@ test_that("output_mixed_sym_caus_conf_TRUE", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), confounding = TRUE, @@ -375,7 +375,7 @@ test_that("output_mixed_sym_caus_conf_TRUE_iterative", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3, 4:5), confounding = TRUE, @@ -394,7 +394,7 @@ test_that("output_mixed_asym_caus_conf_mixed", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(TRUE, FALSE, FALSE), @@ -413,7 +413,7 @@ test_that("output_mixed_asym_caus_conf_mixed_2", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), confounding = c(FALSE, TRUE, TRUE), @@ -434,7 +434,7 @@ test_that("output_mixed_asym_cond_reg", { x_train = x_train_mixed, approach = "regression_separate", regression.model = parsnip::linear_reg(), - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 3, 4:5), paired_shap_sampling = FALSE, @@ -456,7 +456,7 @@ test_that("output_categorical_asym_causal_mixed_cat", { x_explain = x_explain_categorical[1:2], # Temp [1:2] as [1:3] give different sample on GHA-macOS (unknown reason) x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -477,7 +477,7 @@ test_that("output_cat_asym_causal_mixed_cat_ad", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), @@ -496,7 +496,7 @@ test_that("output_categorical_asym_causal_mixed_ctree", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "ctree", - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(3:4, 2, 1), confounding = c(TRUE, FALSE, FALSE), diff --git a/tests/testthat/test-asymmetric-causal-setup.R b/tests/testthat/test-asymmetric-causal-setup.R index 3a17d9d78..75f03cb98 100644 --- a/tests/testthat/test-asymmetric-causal-setup.R +++ b/tests/testthat/test-asymmetric-causal-setup.R @@ -9,7 +9,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:6), confounding = NULL, @@ -28,7 +28,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:5, 5), confounding = NULL, @@ -47,7 +47,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(2:5, 5), confounding = NULL, @@ -66,7 +66,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(1:2, 4), confounding = NULL, @@ -85,7 +85,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list("Solar.R", "Wind", "Temp", "Month", "Day", "Invalid feature name"), confounding = NULL, @@ -104,7 +104,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list("Solar.R", "Wind", "Temp", "Month", "Day", "Day"), confounding = NULL, @@ -123,7 +123,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list("Solar.R", "Wind", "Temp", "Day", "Day"), confounding = NULL, @@ -142,7 +142,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list("Solar.R", "Wind"), confounding = NULL, @@ -161,7 +161,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(c("Solar.R", "Wind", "Temp", "Month"), "Day"), confounding = NULL, @@ -181,7 +181,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(c("A", "C"), "Wrong name"), confounding = NULL, @@ -201,7 +201,7 @@ test_that("asymmetric erroneous input: `causal_ordering`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = TRUE, causal_ordering = list(c("A"), "B"), confounding = NULL, @@ -226,7 +226,7 @@ test_that("asymmetric erroneous input: `approach`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, @@ -249,7 +249,7 @@ test_that("asymmetric erroneous input: `asymmetric`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = c(FALSE, FALSE), causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, @@ -268,7 +268,7 @@ test_that("asymmetric erroneous input: `asymmetric`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = "Must be a single logical", causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, @@ -287,7 +287,7 @@ test_that("asymmetric erroneous input: `asymmetric`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = 1L, causal_ordering = list(1:2, 3:4, 5), confounding = TRUE, @@ -311,7 +311,7 @@ test_that("asymmetric erroneous input: `confounding`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3:4, 5), confounding = c("A", "B", "C"), @@ -330,7 +330,7 @@ test_that("asymmetric erroneous input: `confounding`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, asymmetric = FALSE, causal_ordering = list(1:2, 3:4, 5), confounding = c(TRUE, FALSE), diff --git a/tests/testthat/test-forecast-output.R b/tests/testthat/test-forecast-output.R index 1f90c9b3c..803f028e4 100644 --- a/tests/testthat/test-forecast-output.R +++ b/tests/testthat/test-forecast-output.R @@ -9,7 +9,7 @@ test_that("forecast_output_ar_numeric", { explain_y_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE, n_batches = 1 ), @@ -30,7 +30,7 @@ test_that("forecast_output_arima_numeric", { explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE, max_n_coalitions = 150, iterative = FALSE @@ -52,7 +52,7 @@ test_that("forecast_output_arima_numeric_iterative", { explain_xreg_lags = 3, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE, max_n_coalitions = 150, iterative = TRUE, @@ -75,7 +75,7 @@ test_that("forecast_output_arima_numeric_iterative_groups", { explain_xreg_lags = c(3, 3, 3), horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = TRUE, max_n_coalitions = 150, iterative = TRUE, @@ -96,7 +96,7 @@ test_that("forecast_output_arima_numeric_no_xreg", { explain_y_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE, n_batches = 1 ), @@ -118,7 +118,7 @@ test_that("forecast_output_forecast_ARIMA_group_numeric", { explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = TRUE, n_batches = 1 ), @@ -139,7 +139,7 @@ test_that("forecast_output_arima_numeric_no_lags", { explain_xreg_lags = 0, horizon = 3, approach = "independence", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE, n_batches = 1 ), @@ -159,7 +159,7 @@ test_that("ARIMA gives the same output with different horizons", { explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar[1:3], + phi0 = p0_ar[1:3], group_lags = FALSE, n_batches = 1, max_n_coalitions = 200, @@ -178,7 +178,7 @@ test_that("ARIMA gives the same output with different horizons", { explain_xreg_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = p0_ar[1:2], + phi0 = p0_ar[1:2], group_lags = FALSE, n_batches = 1, max_n_coalitions = 100, @@ -196,7 +196,7 @@ test_that("ARIMA gives the same output with different horizons", { explain_xreg_lags = 2, horizon = 1, approach = "empirical", - prediction_zero = p0_ar[1], + phi0 = p0_ar[1], group_lags = FALSE, n_batches = 1, max_n_coalitions = 50, @@ -233,7 +233,7 @@ test_that("ARIMA gives the same output with different horizons with grouping", { explain_xreg_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar[1:3], + phi0 = p0_ar[1:3], group_lags = TRUE, n_batches = 1, max_n_coalitions = 50, @@ -252,7 +252,7 @@ test_that("ARIMA gives the same output with different horizons with grouping", { explain_xreg_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = p0_ar[1:2], + phi0 = p0_ar[1:2], group_lags = TRUE, n_batches = 1, max_n_coalitions = 50, @@ -270,7 +270,7 @@ test_that("ARIMA gives the same output with different horizons with grouping", { explain_xreg_lags = 2, horizon = 1, approach = "empirical", - prediction_zero = p0_ar[1], + phi0 = p0_ar[1], group_lags = TRUE, n_batches = 1, max_n_coalitions = 50, diff --git a/tests/testthat/test-forecast-setup.R b/tests/testthat/test-forecast-setup.R index 00da0fe0a..cd211d392 100644 --- a/tests/testthat/test-forecast-setup.R +++ b/tests/testthat/test-forecast-setup.R @@ -19,7 +19,7 @@ test_that("error with custom model without providing predict_model", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -46,7 +46,7 @@ test_that("erroneous input: `x_train/x_explain`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -68,7 +68,7 @@ test_that("erroneous input: `x_train/x_explain`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -91,7 +91,7 @@ test_that("erroneous input: `x_train/x_explain`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -114,7 +114,7 @@ test_that("erroneous input: `model`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -122,7 +122,7 @@ test_that("erroneous input: `model`", { }) -test_that("erroneous input: `prediction_zero`", { +test_that("erroneous input: `phi0`", { set.seed(123) expect_snapshot( @@ -141,7 +141,7 @@ test_that("erroneous input: `prediction_zero`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_wrong_length + phi0 = p0_wrong_length ) }, error = TRUE @@ -171,7 +171,7 @@ test_that("erroneous input: `max_n_coalitions`", { explain_xreg_lags = explain_xreg_lags, horizon = horizon, approach = "independence", - prediction_zero = p0_ar, + phi0 = p0_ar, max_n_coalitions = n_coalitions, group_lags = FALSE ) @@ -199,7 +199,7 @@ test_that("erroneous input: `max_n_coalitions`", { explain_xreg_lags = explain_xreg_lags, horizon = horizon, approach = "independence", - prediction_zero = p0_ar, + phi0 = p0_ar, max_n_coalitions = n_coalitions, group_lags = TRUE ) @@ -226,7 +226,7 @@ test_that("erroneous input: `train_idx`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -249,7 +249,7 @@ test_that("erroneous input: `train_idx`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -271,7 +271,7 @@ test_that("erroneous input: `train_idx`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -297,7 +297,7 @@ test_that("erroneous input: `explain_idx`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -319,7 +319,7 @@ test_that("erroneous input: `explain_idx`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -345,7 +345,7 @@ test_that("erroneous input: `explain_y_lags`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -367,7 +367,7 @@ test_that("erroneous input: `explain_y_lags`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -389,7 +389,7 @@ test_that("erroneous input: `explain_y_lags`", { explain_xreg_lags = 2, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -410,7 +410,7 @@ test_that("erroneous input: `explain_y_lags`", { explain_y_lags = 0, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -437,7 +437,7 @@ test_that("erroneous input: `explain_x_lags`", { explain_xreg_lags = explain_xreg_lags_negative, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -459,7 +459,7 @@ test_that("erroneous input: `explain_x_lags`", { explain_xreg_lags = explain_xreg_lags_not_integer, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -481,7 +481,7 @@ test_that("erroneous input: `explain_x_lags`", { explain_xreg_lags = explain_x_lags_wrong_length, horizon = 3, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -507,7 +507,7 @@ test_that("erroneous input: `horizon`", { explain_xreg_lags = 2, horizon = horizon_negative, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE @@ -529,7 +529,7 @@ test_that("erroneous input: `horizon`", { explain_xreg_lags = 2, horizon = horizon_not_integer, approach = "independence", - prediction_zero = p0_ar + phi0 = p0_ar ) }, error = TRUE diff --git a/tests/testthat/test-plot.R b/tests/testthat/test-plot.R index 18598ea2b..6fe9d69de 100644 --- a/tests/testthat/test-plot.R +++ b/tests/testthat/test-plot.R @@ -6,7 +6,7 @@ explain_mixed <- explain( x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) explain_numeric_empirical <- explain( @@ -15,7 +15,7 @@ explain_numeric_empirical <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) explain_numeric_gaussian <- explain( @@ -24,7 +24,7 @@ explain_numeric_gaussian <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0 + phi0 = p0 ) explain_numeric_ctree <- explain( @@ -33,7 +33,7 @@ explain_numeric_ctree <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) explain_numeric_combined <- explain( @@ -42,7 +42,7 @@ explain_numeric_combined <- explain( x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("empirical", "ctree", "gaussian", "ctree"), - prediction_zero = p0 + phi0 = p0 ) # Create a list of explanations with names diff --git a/tests/testthat/test-regression-output.R b/tests/testthat/test-regression-output.R index d730fdb60..d43acc701 100644 --- a/tests/testthat/test-regression-output.R +++ b/tests/testthat/test-regression-output.R @@ -7,7 +7,7 @@ test_that("output_lm_numeric_lm_separate_iterative", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = TRUE ), @@ -24,7 +24,7 @@ test_that("output_lm_numeric_lm_separate", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -40,7 +40,7 @@ test_that("output_lm_numeric_lm_separate_n_comb", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 10, regression.model = parsnip::linear_reg(), iterative = FALSE @@ -57,7 +57,7 @@ test_that("output_lm_categorical_lm_separate", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -73,7 +73,7 @@ test_that("output_lm_mixed_lm_separate", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -89,7 +89,7 @@ test_that("output_lm_mixed_splines_separate", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "regression_separate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression.recipe) { recipes::step_ns(regression.recipe, recipes::all_numeric_predictors(), deg_free = 2) @@ -107,7 +107,7 @@ test_that("output_lm_mixed_decision_tree_cv_separate", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_train_mixed, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = hardhat::tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2)), @@ -126,7 +126,7 @@ test_that("output_lm_mixed_decision_tree_cv_separate_parallel", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_train_mixed, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = hardhat::tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2)), @@ -145,7 +145,7 @@ test_that("output_lm_mixed_xgboost_separate", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_train_mixed, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression.recipe) { @@ -166,7 +166,7 @@ test_that("output_lm_numeric_lm_surrogate_iterative", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = TRUE ), @@ -183,7 +183,7 @@ test_that("output_lm_numeric_lm_surrogate", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -199,7 +199,7 @@ test_that("output_lm_numeric_lm_surrogate_n_comb", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 10, regression.model = parsnip::linear_reg(), iterative = FALSE @@ -216,7 +216,7 @@ test_that("output_lm_numeric_lm_surrogate_reg_surr_n_comb", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 10, regression.model = parsnip::linear_reg(), regression.surrogate_n_comb = 8, @@ -234,7 +234,7 @@ test_that("output_lm_categorical_lm_surrogate", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -250,7 +250,7 @@ test_that("output_lm_mixed_lm_surrogate", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "regression_surrogate", - prediction_zero = p0, + phi0 = p0, regression.model = parsnip::linear_reg(), iterative = FALSE ), @@ -265,7 +265,7 @@ test_that("output_lm_mixed_decision_tree_cv_surrogate", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_train_mixed, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::decision_tree(tree_depth = hardhat::tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2)), @@ -283,7 +283,7 @@ test_that("output_lm_mixed_xgboost_surrogate", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_train_mixed, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression.recipe) { diff --git a/tests/testthat/test-regression-setup.R b/tests/testthat/test-regression-setup.R index 9a8998ae0..f88c3692f 100644 --- a/tests/testthat/test-regression-setup.R +++ b/tests/testthat/test-regression-setup.R @@ -9,7 +9,7 @@ test_that("regression erroneous input: `approach`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = c("regression_surrogate", "gaussian", "independence", "empirical"), iterative = FALSE ) @@ -25,7 +25,7 @@ test_that("regression erroneous input: `approach`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = c("regression_separate", "gaussian", "independence", "empirical"), iterative = FALSE ) @@ -45,7 +45,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = NULL ) @@ -61,7 +61,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = lm ) @@ -77,7 +77,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression") ) @@ -93,7 +93,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(num_terms = c(1, 2, 3)) @@ -110,7 +110,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3), num_terms = c(1, 2, 3)) @@ -127,7 +127,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = 2, engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)) @@ -144,7 +144,7 @@ test_that("regression erroneous input: `regression.model`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), iterative = FALSE @@ -166,7 +166,7 @@ test_that("regression erroneous input: `regression.tune_values`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = 2, engine = "rpart", mode = "regression"), regression.tune_values = as.matrix(data.frame(tree_depth = c(1, 2, 3))) @@ -183,7 +183,7 @@ test_that("regression erroneous input: `regression.tune_values`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = function(x) c(1, 2, 3) @@ -200,7 +200,7 @@ test_that("regression erroneous input: `regression.tune_values`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = function(x) data.frame(wrong_name = c(1, 2, 3)) @@ -221,7 +221,7 @@ test_that("regression erroneous input: `regression.vfold_cv_para`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), @@ -239,7 +239,7 @@ test_that("regression erroneous input: `regression.vfold_cv_para`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), @@ -257,7 +257,7 @@ test_that("regression erroneous input: `regression.vfold_cv_para`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(tree_depth = tune(), engine = "rpart", mode = "regression"), regression.tune_values = data.frame(tree_depth = c(1, 2, 3)), @@ -280,7 +280,7 @@ test_that("regression erroneous input: `regression.recipe_func`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.recipe_func = 3 ) @@ -296,7 +296,7 @@ test_that("regression erroneous input: `regression.recipe_func`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.recipe_func = function(x) { return(2) @@ -319,7 +319,7 @@ test_that("regression erroneous input: `regression.surrogate_n_comb`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.surrogate_n_comb = 2^ncol(x_explain_numeric) - 1, iterative = FALSE @@ -336,7 +336,7 @@ test_that("regression erroneous input: `regression.surrogate_n_comb`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.surrogate_n_comb = 0, iterative = FALSE diff --git a/tests/testthat/test-regular-output.R b/tests/testthat/test-regular-output.R index d9c5cba9f..747080607 100644 --- a/tests/testthat/test-regular-output.R +++ b/tests/testthat/test-regular-output.R @@ -8,7 +8,7 @@ test_that("output_lm_numeric_independence", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_independence" @@ -23,7 +23,7 @@ test_that("output_lm_numeric_independence_MSEv_Shapley_weights", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(MSEv_uniform_comb_weights = FALSE), iterative = FALSE ), @@ -39,7 +39,7 @@ test_that("output_lm_numeric_empirical", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_empirical" @@ -54,7 +54,7 @@ test_that("output_lm_numeric_empirical_n_coalitions", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 20, iterative = FALSE ), @@ -71,7 +71,7 @@ test_that("output_lm_numeric_empirical_independence", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, empirical.type = "independence", iterative = FALSE ), @@ -88,7 +88,7 @@ test_that("output_lm_numeric_empirical_AICc_each", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 8, empirical.type = "AICc_each_k", iterative = FALSE @@ -106,7 +106,7 @@ test_that("output_lm_numeric_empirical_AICc_full", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 8, empirical.type = "AICc_full", iterative = FALSE @@ -123,7 +123,7 @@ test_that("output_lm_numeric_gaussian", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_gaussian" @@ -138,7 +138,7 @@ test_that("output_lm_numeric_copula", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "copula", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_copula" @@ -153,7 +153,7 @@ test_that("output_lm_numeric_ctree", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_ctree" @@ -169,7 +169,7 @@ test_that("output_lm_numeric_vaeac", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, # Low value here to speed up the time vaeac.epochs = 4, # Low value here to speed up the time vaeac.n_vaeacs_initialize = 2, # Low value here to speed up the time @@ -191,7 +191,7 @@ test_that("output_lm_categorical_ctree", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_categorical_ctree" @@ -207,7 +207,7 @@ test_that("output_lm_categorical_vaeac", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, # Low value here to speed up the time vaeac.epochs = 4, # Low value here to speed up the time vaeac.n_vaeacs_initialize = 2, # Low value here to speed up the time @@ -229,7 +229,7 @@ test_that("output_lm_categorical_categorical", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "categorical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_categorical_method" @@ -244,7 +244,7 @@ test_that("output_lm_categorical_independence", { x_explain = x_explain_categorical, x_train = x_train_categorical, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_categorical_independence" @@ -259,7 +259,7 @@ test_that("output_lm_ts_timeseries", { x_explain = x_explain_ts, x_train = x_train_ts, approach = "timeseries", - prediction_zero = p0_ts, + phi0 = p0_ts, group = group_ts, iterative = FALSE ), @@ -275,7 +275,7 @@ test_that("output_lm_numeric_comb1", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("gaussian", "empirical", "ctree", "independence"), - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_comb1" @@ -290,7 +290,7 @@ test_that("output_lm_numeric_comb2", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("ctree", "copula", "independence", "copula"), - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_comb2" @@ -305,7 +305,7 @@ test_that("output_lm_numeric_comb3", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "empirical"), - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_comb3" @@ -323,7 +323,7 @@ test_that("output_lm_mixed_independence", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_mixed_independence" @@ -338,7 +338,7 @@ test_that("output_lm_mixed_ctree", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_mixed_ctree" @@ -354,7 +354,7 @@ test_that("output_lm_mixed_vaeac", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, # Low value here to speed up the time vaeac.epochs = 4, # Low value here to speed up the time vaeac.n_vaeacs_initialize = 2, # Low value here to speed up the time @@ -377,7 +377,7 @@ test_that("output_lm_mixed_comb", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = c("ctree", "independence", "ctree", "independence"), - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_mixed_comb" @@ -404,7 +404,7 @@ test_that("output_custom_lm_numeric_independence_1", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_pred_func, iterative = FALSE ), @@ -431,7 +431,7 @@ test_that("output_custom_lm_numeric_independence_2", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_pred_func, iterative = FALSE )), @@ -444,7 +444,7 @@ test_that("output_custom_lm_numeric_independence_2", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) @@ -494,7 +494,7 @@ test_that("output_custom_xgboost_mixed_dummy_ctree", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "ctree", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model.xgboost_dummy, get_model_specs = NA, iterative = FALSE @@ -517,7 +517,7 @@ test_that("output_lm_numeric_interaction", { x_explain = x_explain_interaction, x_train = x_train_interaction, approach = "independence", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ), "output_lm_numeric_interaction" @@ -534,7 +534,7 @@ test_that("output_lm_numeric_ctree_parallelized", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) }, @@ -556,7 +556,7 @@ test_that("output_lm_numeric_empirical_progress", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) }) @@ -575,7 +575,7 @@ test_that("output_lm_numeric_independence_keep_samp_for_vS", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(keep_samp_for_vS = TRUE), iterative = FALSE )), diff --git a/tests/testthat/test-regular-setup.R b/tests/testthat/test-regular-setup.R index a42314511..ba610ad33 100644 --- a/tests/testthat/test-regular-setup.R +++ b/tests/testthat/test-regular-setup.R @@ -15,7 +15,7 @@ test_that("error with custom model without providing predict_model", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -42,7 +42,7 @@ test_that("messages with missing detail in get_model_specs", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_predict_model, get_model_specs = NA ) @@ -61,7 +61,7 @@ test_that("messages with missing detail in get_model_specs", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_get_model_specs_no_lab ) @@ -80,7 +80,7 @@ test_that("messages with missing detail in get_model_specs", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_gms_no_classes ) @@ -103,7 +103,7 @@ test_that("messages with missing detail in get_model_specs", { x_train = x_train_mixed, x_explain = x_explain_mixed, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = custom_predict_model, get_model_specs = custom_gms_no_factor_levels ) @@ -124,7 +124,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_numeric, x_train = x_train_wrong_format, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -141,7 +141,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_wrong_format, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -159,7 +159,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_wrong_format, x_train = x_train_wrong_format, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -178,7 +178,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_numeric, x_train = x_train_no_column_names, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -196,7 +196,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_no_column_names, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -215,7 +215,7 @@ test_that("erroneous input: `x_train/x_explain`", { x_explain = x_explain_no_column_names, x_train = x_train_no_column_names, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -233,7 +233,7 @@ test_that("erroneous input: `model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -254,7 +254,7 @@ test_that("erroneous input: `approach`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = approach_non_character, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -271,7 +271,7 @@ test_that("erroneous input: `approach`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = approach_incorrect_length, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -288,14 +288,14 @@ test_that("erroneous input: `approach`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = approach_incorrect_character, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE ) }) -test_that("erroneous input: `prediction_zero`", { +test_that("erroneous input: `phi0`", { set.seed(123) expect_snapshot( @@ -309,7 +309,7 @@ test_that("erroneous input: `prediction_zero`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0_non_numeric_1 + phi0 = p0_non_numeric_1 ) }, error = TRUE @@ -326,7 +326,7 @@ test_that("erroneous input: `prediction_zero`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0_non_numeric_2 + phi0 = p0_non_numeric_2 ) }, error = TRUE @@ -344,7 +344,7 @@ test_that("erroneous input: `prediction_zero`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0_too_long + phi0 = p0_too_long ) }, error = TRUE @@ -361,7 +361,7 @@ test_that("erroneous input: `prediction_zero`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0_is_NA + phi0 = p0_is_NA ) }, error = TRUE @@ -382,7 +382,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_comb_non_numeric_1 ) }, @@ -400,7 +400,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_comb_non_numeric_2 ) }, @@ -419,7 +419,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_coalitions_non_integer ) }, @@ -439,7 +439,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_coalitions_too_long ) }, @@ -457,7 +457,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_coalitions_is_NA ) }, @@ -475,7 +475,7 @@ test_that("erroneous input: `max_n_coalitions`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = max_n_comb_non_positive ) }, @@ -491,7 +491,7 @@ test_that("erroneous input: `max_n_coalitions`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "gaussian", max_n_coalitions = max_n_coalitions ) @@ -513,7 +513,7 @@ test_that("erroneous input: `max_n_coalitions`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_train_numeric, - prediction_zero = p0, + phi0 = p0, approach = "gaussian", group = groups, max_n_coalitions = max_n_coalitions @@ -535,7 +535,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = group_non_list ) }, @@ -553,7 +553,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = group_with_non_characters ) }, @@ -573,7 +573,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = group_with_non_data_features ) }, @@ -593,7 +593,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = group_missing_data_features ) }, @@ -613,7 +613,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = group_dup_data_features ) }, @@ -630,7 +630,7 @@ test_that("erroneous input: `group`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, group = single_group ) }, @@ -652,7 +652,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_non_numeric_1 ) }, @@ -670,7 +670,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_non_numeric_2 ) }, @@ -687,7 +687,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_non_integer ) }, @@ -704,7 +704,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_too_long ) }, @@ -721,7 +721,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_is_NA ) }, @@ -738,7 +738,7 @@ test_that("erroneous input: `n_MC_samples`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = n_samples_non_positive ) }, @@ -760,7 +760,7 @@ test_that("erroneous input: `seed`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, seed = seed_not_integer_interpretable ) }, @@ -781,7 +781,7 @@ test_that("erroneous input: `keep_samp_for_vS`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(keep_samp_for_vS = keep_samp_for_vS_non_logical_1) ) }, @@ -798,7 +798,7 @@ test_that("erroneous input: `keep_samp_for_vS`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(keep_samp_for_vS = keep_samp_for_vS_non_logical_2) ) }, @@ -815,7 +815,7 @@ test_that("erroneous input: `keep_samp_for_vS`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(keep_samp_for_vS = keep_samp_for_vS_too_long) ) }, @@ -836,7 +836,7 @@ test_that("erroneous input: `MSEv_uniform_comb_weights`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_1) ) }, @@ -853,7 +853,7 @@ test_that("erroneous input: `MSEv_uniform_comb_weights`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_nl_2) ) }, @@ -870,7 +870,7 @@ test_that("erroneous input: `MSEv_uniform_comb_weights`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, output_args = list(MSEv_uniform_comb_weights = MSEv_uniform_comb_weights_long) ) }, @@ -892,7 +892,7 @@ test_that("erroneous input: `predict_model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model_nonfunction ) }, @@ -912,7 +912,7 @@ test_that("erroneous input: `predict_model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model_non_num_output ) }, @@ -932,7 +932,7 @@ test_that("erroneous input: `predict_model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model_wrong_output_len ) }, @@ -952,7 +952,7 @@ test_that("erroneous input: `predict_model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model_invalid_argument ) }, @@ -972,7 +972,7 @@ test_that("erroneous input: `predict_model`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, predict_model = predict_model_error ) }, @@ -994,7 +994,7 @@ test_that("erroneous input: `get_model_specs`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, get_model_specs = get_model_specs_nonfunction ) }, @@ -1015,7 +1015,7 @@ test_that("erroneous input: `get_model_specs`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, get_model_specs = get_ms_output_not_list ) }, @@ -1035,7 +1035,7 @@ test_that("erroneous input: `get_model_specs`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, get_model_specs = get_ms_output_too_long ) }, @@ -1059,7 +1059,7 @@ test_that("erroneous input: `get_model_specs`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, get_model_specs = get_ms_output_wrong_names ) }, @@ -1079,7 +1079,7 @@ test_that("erroneous input: `get_model_specs`", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "independence", - prediction_zero = p0, + phi0 = p0, get_model_specs = get_model_specs_error ) }, @@ -1100,7 +1100,7 @@ test_that("incompatible input: `data/approach`", { x_explain = x_explain_mixed, x_train = x_explain_mixed, approach = non_factor_approach_1, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -1116,7 +1116,7 @@ test_that("incompatible input: `data/approach`", { x_explain = x_explain_mixed, x_train = x_explain_mixed, approach = non_factor_approach_2, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -1132,7 +1132,7 @@ test_that("incompatible input: `data/approach`", { x_explain = x_explain_mixed, x_train = x_explain_mixed, approach = non_factor_approach_3, - prediction_zero = p0 + phi0 = p0 ) }, error = TRUE @@ -1147,7 +1147,7 @@ test_that("Correct dimension of S when sampling combinations", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_explain_mixed, - prediction_zero = p0, + phi0 = p0, approach = "ctree", max_n_coalitions = max_n_coalitions ) @@ -1164,7 +1164,7 @@ test_that("Message with too low `max_n_coalitions`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_explain_numeric, - prediction_zero = p0, + phi0 = p0, approach = "gaussian", max_n_coalitions = max_n_coalitions ) @@ -1185,7 +1185,7 @@ test_that("Message with too low `max_n_coalitions`", { model = model_lm_numeric, x_explain = x_explain_numeric, x_train = x_explain_numeric, - prediction_zero = p0, + phi0 = p0, approach = "gaussian", group = groups, max_n_coalitions = max_n_coalitions @@ -1207,7 +1207,7 @@ test_that("Shapr with `max_n_coalitions` >= 2^m uses exact Shapley kernel weight x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 2, # Low value for fast computations seed = 123, max_n_coalitions = NULL, @@ -1222,7 +1222,7 @@ test_that("Shapr with `max_n_coalitions` >= 2^m uses exact Shapley kernel weight x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 2, # Low value for fast computations seed = 123, extra_computation_args = list(compute_sd = FALSE), @@ -1240,7 +1240,7 @@ test_that("Shapr with `max_n_coalitions` >= 2^m uses exact Shapley kernel weight x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 2, # Low value for fast computations seed = 123, extra_computation_args = list(compute_sd = FALSE), @@ -1284,7 +1284,7 @@ test_that("Correct dimension of S when sampling combinations with groups", { model = model_lm_mixed, x_explain = x_explain_mixed, x_train = x_explain_mixed, - prediction_zero = p0, + phi0 = p0, approach = "ctree", group = groups, max_n_coalitions = max_n_coalitions @@ -1300,7 +1300,7 @@ test_that("data feature ordering is output_lm_numeric_column_order", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) ex.new_data_feature_order <- explain( @@ -1309,7 +1309,7 @@ test_that("data feature ordering is output_lm_numeric_column_order", { x_explain = rev(x_explain_numeric), x_train = rev(x_train_numeric), approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) explain.new_model_feat_order <- explain( @@ -1318,7 +1318,7 @@ test_that("data feature ordering is output_lm_numeric_column_order", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) # Same Shapley values, but different order @@ -1343,7 +1343,7 @@ test_that("parallelization gives same output for any approach", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) future::plan("multisession", workers = 2) # Parallelized with 2 cores @@ -1353,7 +1353,7 @@ test_that("parallelization gives same output for any approach", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) future::plan("sequential") # Resetting to sequential computation @@ -1372,7 +1372,7 @@ test_that("parallelization gives same output for any approach", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) future::plan("multisession", workers = 2) # Parallelized with 2 cores @@ -1382,7 +1382,7 @@ test_that("parallelization gives same output for any approach", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) future::plan("sequential") # Resetting to sequential computation @@ -1403,7 +1403,7 @@ test_that("gaussian approach use the user provided parameters", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, ) # Expect that gaussian.mu is the sample mean when no values are provided @@ -1428,7 +1428,7 @@ test_that("gaussian approach use the user provided parameters", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, gaussian.mu = gaussian.provided_mu, gaussian.cov_mat = gaussian.provided_cov_mat ) @@ -1455,7 +1455,7 @@ test_that("setting the seed for combined approaches works", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) @@ -1465,7 +1465,7 @@ test_that("setting the seed for combined approaches works", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) @@ -1484,7 +1484,7 @@ test_that("counting the number of unique approaches", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "gaussian", "copula"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) expect_equal(explanation_combined_1$internal$parameters$n_approaches, 4) @@ -1496,7 +1496,7 @@ test_that("counting the number of unique approaches", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("empirical"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) expect_equal(explanation_combined_2$internal$parameters$n_approaches, 1) @@ -1508,7 +1508,7 @@ test_that("counting the number of unique approaches", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("gaussian", "gaussian", "gaussian", "gaussian"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) expect_equal(explanation_combined_3$internal$parameters$n_approaches, 4) @@ -1520,7 +1520,7 @@ test_that("counting the number of unique approaches", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "independence", "empirical"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) expect_equal(explanation_combined_4$internal$parameters$n_approaches, 4) @@ -1533,7 +1533,7 @@ test_that("counting the number of unique approaches", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = c("independence", "empirical", "independence", "empirical"), - prediction_zero = p0, + phi0 = p0, seed = 1 ) expect_equal(explanation_combined_5$internal$parameters$n_approaches, 4) @@ -1550,7 +1550,7 @@ test_that("vaeac_set_seed_works", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, seed = 1, vaeac.epochs = 4, @@ -1567,7 +1567,7 @@ test_that("vaeac_set_seed_works", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, seed = 1, vaeac.epochs = 4, @@ -1592,7 +1592,7 @@ test_that("vaeac_pretreained_vaeac_model", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, seed = 1, vaeac.epochs = 4, @@ -1614,7 +1614,7 @@ test_that("vaeac_pretreained_vaeac_model", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, seed = 1, vaeac.extra_parameters = list( @@ -1637,7 +1637,7 @@ test_that("vaeac_pretreained_vaeac_model", { x_explain = x_explain_mixed, x_train = x_train_mixed, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 10, seed = 1, vaeac.extra_parameters = list( @@ -1666,7 +1666,7 @@ test_that("feature wise and groupwise computations are identical", { x_explain = x_explain_numeric, x_train = x_train_numeric, approach = "gaussian", - prediction_zero = p0 + phi0 = p0 ) @@ -1677,7 +1677,7 @@ test_that("feature wise and groupwise computations are identical", { x_train = x_train_numeric, approach = "gaussian", group = groups, - prediction_zero = p0 + phi0 = p0 ) diff --git a/vignettes/understanding_shapr.Rmd b/vignettes/understanding_shapr.Rmd index f556d6298..6eedd3824 100644 --- a/vignettes/understanding_shapr.Rmd +++ b/vignettes/understanding_shapr.Rmd @@ -471,7 +471,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) #> Note: Feature classes extracted from the model contains NA. @@ -541,7 +541,7 @@ explanation_plot <- explain( x_explain = x_explain_many, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) #> Note: Feature classes extracted from the model contains NA. @@ -605,7 +605,7 @@ explanation_lm_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) #> Success with message: @@ -645,7 +645,7 @@ explanation_ctree <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0, + phi0 = p0, ctree.mincriterion = 0.80, ctree.minsplit = 20, ctree.minbucket = 20, @@ -702,7 +702,7 @@ explanation_cat_method <- explain( x_explain = x_explain_all_cat, x_train = x_train_all_cat, approach = "categorical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) #> Success with message: @@ -779,7 +779,7 @@ explanation_timeseries <- explain( x_explain = x_explain_ts, x_train = x_train_ts, approach = "timeseries", - prediction_zero = p0_ts, + phi0 = p0_ts, group = group_ts, iterative = FALSE ) @@ -905,7 +905,7 @@ explanation_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -933,7 +933,7 @@ explanation_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -961,7 +961,7 @@ explanation_gaussian_1e1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e1, MSEv_uniform_comb_weights = TRUE ) @@ -989,7 +989,7 @@ explanation_gaussian_1e2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -1017,7 +1017,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "empirical", "independence"), - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -1213,7 +1213,7 @@ ex <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, iterative_args = list(convergence_tol = 0.1) ) @@ -1299,7 +1299,7 @@ explanation_par <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1378,7 +1378,7 @@ ex_progress <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) handlers("progress") @@ -1463,7 +1463,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("empirical", "copula", "gaussian"), - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1500,7 +1500,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("ctree", "ctree", "empirical"), - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1543,7 +1543,7 @@ explanation_group <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, group = group_list, iterative = FALSE ) @@ -1680,7 +1680,7 @@ explanation_custom <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, predict_model = MY_predict_model, get_model_specs = MY_get_model_specs ) @@ -1724,7 +1724,7 @@ explanation_custom_minimal <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, predict_model = MY_MINIMAL_predict_model ) #> Note: You passed a model to explain() which is not natively supported, and did not supply a 'get_model_specs' function to explain(). @@ -1794,7 +1794,7 @@ explanation_tidymodels <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) #> Success with message: @@ -1842,7 +1842,7 @@ explanation_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 100, vaeac.width = 16, vaeac.depth = 2, @@ -1887,7 +1887,7 @@ explanation_vaeac_early_stop <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 100, vaeac.width = 16, vaeac.depth = 2, @@ -1971,7 +1971,7 @@ ex_init <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 20, iterative = TRUE ) @@ -2009,7 +2009,7 @@ ex_further <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 25, iterative_args = list(convergence_tol = 0.005), # Decrease the convergence threshold prev_shapr_object = ex_init @@ -2039,7 +2039,7 @@ ex_even_further <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = NULL, prev_shapr_object = ex_further$saving_path ) @@ -2098,7 +2098,7 @@ must also be 2, because at time $t = 1$ there was only a single observation available. Since the data is stationary, the mean of the data is used as value of -`prediction_zero` (i.e. $\phi_0$). This can however be chosen +`phi0` (i.e. $\phi_0$). This can however be chosen differently depending on the data and application. For a multivariate model such as a VAR (Vector AutoRegressive model), it @@ -2130,7 +2130,7 @@ explanation_forecast <- explain_forecast( explain_y_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE ) #> Note: Feature names extracted from the model contains NA. @@ -2200,8 +2200,8 @@ that we want to explain a forecast made from time point 153 in the data. The argument `horizon` is set to 2 in order to explain a forecast of length 2. -The argument `prediction_zero` is set to the mean of the time series, -and is repeated two times. Each value of `prediction_zero` is the +The argument `phi0` is set to the mean of the time series, +and is repeated two times. Each value of `phi0` is the baseline for each forecast horizon. In our example, we assume that given no effect from the two lags, the temperature would just be the average during the observed period. Finally, we opt to not group the lags by @@ -2220,7 +2220,7 @@ explanation_forecast <- explain_forecast( explain_y_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = rep(mean(data$Temp), 2), + phi0 = rep(mean(data$Temp), 2), group_lags = FALSE ) #> Note: Feature names extracted from the model contains NA. @@ -2289,7 +2289,7 @@ explanation_forecast <- explain_forecast( explain_xreg_lags = 1, horizon = 2, approach = "empirical", - prediction_zero = rep(mean(data_fit$Temp), 2), + phi0 = rep(mean(data_fit$Temp), 2), group_lags = FALSE ) #> Note: Feature names extracted from the model contains NA. diff --git a/vignettes/understanding_shapr.Rmd.orig b/vignettes/understanding_shapr.Rmd.orig index 30ce83a7c..9eb39feb7 100644 --- a/vignettes/understanding_shapr.Rmd.orig +++ b/vignettes/understanding_shapr.Rmd.orig @@ -485,7 +485,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) @@ -524,7 +524,7 @@ explanation_plot <- explain( x_explain = x_explain_many, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) plot(explanation_plot, plot_type = "beeswarm") @@ -565,7 +565,7 @@ explanation_lm_cat <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) @@ -587,7 +587,7 @@ explanation_ctree <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = p0, + phi0 = p0, ctree.mincriterion = 0.80, ctree.minsplit = 20, ctree.minbucket = 20, @@ -628,7 +628,7 @@ explanation_cat_method <- explain( x_explain = x_explain_all_cat, x_train = x_train_all_cat, approach = "categorical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) ``` @@ -689,7 +689,7 @@ explanation_timeseries <- explain( x_explain = x_explain_ts, x_train = x_train_ts, approach = "timeseries", - prediction_zero = p0_ts, + phi0 = p0_ts, group = group_ts, iterative = FALSE ) @@ -799,7 +799,7 @@ explanation_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -810,7 +810,7 @@ explanation_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -821,7 +821,7 @@ explanation_gaussian_1e1 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e1, MSEv_uniform_comb_weights = TRUE ) @@ -832,7 +832,7 @@ explanation_gaussian_1e2 <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -843,7 +843,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("gaussian", "empirical", "independence"), - prediction_zero = p0, + phi0 = p0, n_MC_samples = 1e2, MSEv_uniform_comb_weights = TRUE ) @@ -995,7 +995,7 @@ ex <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, iterative = TRUE, iterative_args = list(convergence_tol = 0.1) ) @@ -1057,7 +1057,7 @@ explanation_par <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) future::plan(sequential) # To return to non-parallel computation @@ -1118,7 +1118,7 @@ ex_progress <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) handlers("progress") @@ -1202,7 +1202,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("empirical", "copula", "gaussian"), - prediction_zero = p0 + phi0 = p0 ) # Plot the resulting explanations for observations 1 and 6, excluding # the no-covariate effect @@ -1219,7 +1219,7 @@ explanation_combined <- explain( x_explain = x_explain, x_train = x_train, approach = c("ctree", "ctree", "empirical"), - prediction_zero = p0 + phi0 = p0 ) ``` @@ -1244,7 +1244,7 @@ explanation_group <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, group = group_list, iterative = FALSE ) @@ -1351,7 +1351,7 @@ explanation_custom <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, predict_model = MY_predict_model, get_model_specs = MY_get_model_specs ) @@ -1375,7 +1375,7 @@ explanation_custom_minimal <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, predict_model = MY_MINIMAL_predict_model ) @@ -1424,7 +1424,7 @@ explanation_tidymodels <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0, + phi0 = p0, iterative = FALSE ) @@ -1455,7 +1455,7 @@ explanation_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 100, vaeac.width = 16, vaeac.depth = 2, @@ -1491,7 +1491,7 @@ explanation_vaeac_early_stop <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, n_MC_samples = 100, vaeac.width = 16, vaeac.depth = 2, @@ -1563,7 +1563,7 @@ ex_init <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 20, iterative = TRUE ) @@ -1574,7 +1574,7 @@ ex_further <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = 25, iterative_args = list(convergence_tol = 0.005), # Decrease the convergence threshold prev_shapr_object = ex_init @@ -1589,7 +1589,7 @@ ex_even_further <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0, + phi0 = p0, max_n_coalitions = NULL, prev_shapr_object = ex_further$saving_path ) @@ -1633,7 +1633,7 @@ must also be 2, because at time $t = 1$ there was only a single observation available. Since the data is stationary, the mean of the data is used as value of -`prediction_zero` (i.e. $\phi_0$). This can however be chosen +`phi0` (i.e. $\phi_0$). This can however be chosen differently depending on the data and application. For a multivariate model such as a VAR (Vector AutoRegressive model), it @@ -1664,7 +1664,7 @@ explanation_forecast <- explain_forecast( explain_y_lags = 2, horizon = 3, approach = "empirical", - prediction_zero = p0_ar, + phi0 = p0_ar, group_lags = FALSE ) explanation_forecast @@ -1712,8 +1712,8 @@ that we want to explain a forecast made from time point 153 in the data. The argument `horizon` is set to 2 in order to explain a forecast of length 2. -The argument `prediction_zero` is set to the mean of the time series, -and is repeated two times. Each value of `prediction_zero` is the +The argument `phi0` is set to the mean of the time series, +and is repeated two times. Each value of `phi0` is the baseline for each forecast horizon. In our example, we assume that given no effect from the two lags, the temperature would just be the average during the observed period. Finally, we opt to not group the lags by @@ -1731,7 +1731,7 @@ explanation_forecast <- explain_forecast( explain_y_lags = 2, horizon = 2, approach = "empirical", - prediction_zero = rep(mean(data$Temp), 2), + phi0 = rep(mean(data$Temp), 2), group_lags = FALSE ) @@ -1784,7 +1784,7 @@ explanation_forecast <- explain_forecast( explain_xreg_lags = 1, horizon = 2, approach = "empirical", - prediction_zero = rep(mean(data_fit$Temp), 2), + phi0 = rep(mean(data_fit$Temp), 2), group_lags = FALSE ) diff --git a/vignettes/understanding_shapr_asymmetric_causal.Rmd b/vignettes/understanding_shapr_asymmetric_causal.Rmd index 711654140..cd1e3e55c 100644 --- a/vignettes/understanding_shapr_asymmetric_causal.Rmd +++ b/vignettes/understanding_shapr_asymmetric_causal.Rmd @@ -309,7 +309,7 @@ model <- xgboost::xgboost( ) # Save the phi0 -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Look at the root mean squared error sqrt(mean((predict(model, x_explain) - y_explain)^2)) @@ -413,7 +413,7 @@ explanation_sym_con[["gaussian"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, asymmetric = FALSE, # Default value (TRUE will give the same since `causal_ordering = NULL`) causal_ordering = NULL, # Default value @@ -452,7 +452,7 @@ explanation_sym_con[["ctree"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, asymmetric = FALSE, # Default value (TRUE will give the same since `causal_ordering = NULL`) causal_ordering = NULL, # Default value @@ -493,7 +493,7 @@ explanation_sym_con[["xgboost"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), asymmetric = FALSE, # Default value (TRUE will give the same as `causal_ordering = NULL`) @@ -584,7 +584,7 @@ explanation_asym_con[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -617,7 +617,7 @@ explanation_asym_con[["gaussian_non_iterative"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -650,7 +650,7 @@ explanation_asym_con[["ctree"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "ctree", paired_shap_sampling = FALSE, @@ -683,7 +683,7 @@ explanation_asym_con[["xgboost"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), paired_shap_sampling = FALSE, @@ -855,7 +855,7 @@ explanation_sym_marg[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", asymmetric = FALSE, @@ -894,7 +894,7 @@ explanation_sym_marg[["independence_marg"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "independence", asymmetric = FALSE, @@ -933,7 +933,7 @@ explanation_sym_marg[["independence_con"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "independence" ) @@ -1009,7 +1009,7 @@ explanation_sym_cau[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", asymmetric = FALSE, @@ -1047,7 +1047,7 @@ explanation_sym_cau[["copula"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "copula", asymmetric = FALSE, @@ -1110,7 +1110,7 @@ explanation_asym_cau[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -1145,7 +1145,7 @@ explanation_asym_cau[["copula"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "copula", paired_shap_sampling = FALSE, @@ -1183,7 +1183,7 @@ explanation_asym_cau[["ctree"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "ctree", paired_shap_sampling = FALSE, @@ -1218,7 +1218,7 @@ explanation_asym_cau[["vaeac"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "vaeac", vaeac.epochs = 20, @@ -1590,7 +1590,7 @@ explanation_n_coal[["sym_cau_gaussian_64"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1, 2:3, 4:7), confounding = c(FALSE, TRUE, FALSE), @@ -1625,7 +1625,7 @@ explanation_n_coal[["asym_cau_gaussian_10"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, 2:3, 4:7), confounding = c(FALSE, TRUE, FALSE), @@ -1761,7 +1761,7 @@ explanation_group_gaussian[["symmetric_marginal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(seq(length(group_list))), # or `NULL` confounding = TRUE, @@ -1801,7 +1801,7 @@ explanation_group_gaussian[["symmetric_conditional"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(seq(length(group_list))), # or `NULL` confounding = NULL, @@ -1839,7 +1839,7 @@ explanation_group_gaussian[["asymmetric_conditional"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering_group, confounding = NULL, @@ -1874,7 +1874,7 @@ explanation_group_gaussian[["symmetric_causal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = causal_ordering_group, confounding = confounding, @@ -1914,7 +1914,7 @@ explanation_group_gaussian[["asymmetric_causal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering_group, confounding = confounding, diff --git a/vignettes/understanding_shapr_asymmetric_causal.Rmd.orig b/vignettes/understanding_shapr_asymmetric_causal.Rmd.orig index 66f19c49d..4f97fb010 100644 --- a/vignettes/understanding_shapr_asymmetric_causal.Rmd.orig +++ b/vignettes/understanding_shapr_asymmetric_causal.Rmd.orig @@ -311,7 +311,7 @@ model <- xgboost::xgboost( ) # Save the phi0 -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Look at the root mean squared error sqrt(mean((predict(model, x_explain) - y_explain)^2)) @@ -409,7 +409,7 @@ explanation_sym_con[["gaussian"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, asymmetric = FALSE, # Default value (TRUE will give the same since `causal_ordering = NULL`) causal_ordering = NULL, # Default value @@ -421,7 +421,7 @@ explanation_sym_con[["ctree"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, asymmetric = FALSE, # Default value (TRUE will give the same since `causal_ordering = NULL`) causal_ordering = NULL, # Default value @@ -432,7 +432,7 @@ explanation_sym_con[["xgboost"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), asymmetric = FALSE, # Default value (TRUE will give the same as `causal_ordering = NULL`) @@ -481,7 +481,7 @@ explanation_asym_con[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -494,7 +494,7 @@ explanation_asym_con[["gaussian_non_iterative"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -508,7 +508,7 @@ explanation_asym_con[["ctree"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "ctree", paired_shap_sampling = FALSE, @@ -521,7 +521,7 @@ explanation_asym_con[["xgboost"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), paired_shap_sampling = FALSE, @@ -599,7 +599,7 @@ explanation_sym_marg[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", asymmetric = FALSE, @@ -612,7 +612,7 @@ explanation_sym_marg[["independence_marg"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "independence", asymmetric = FALSE, @@ -625,7 +625,7 @@ explanation_sym_marg[["independence_con"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "independence" ) @@ -669,7 +669,7 @@ explanation_sym_cau[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", asymmetric = FALSE, @@ -687,7 +687,7 @@ explanation_sym_cau[["copula"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "copula", asymmetric = FALSE, @@ -717,7 +717,7 @@ explanation_asym_cau[["gaussian"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "gaussian", paired_shap_sampling = FALSE, @@ -731,7 +731,7 @@ explanation_asym_cau[["copula"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "copula", paired_shap_sampling = FALSE, @@ -745,7 +745,7 @@ explanation_asym_cau[["ctree"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "ctree", paired_shap_sampling = FALSE, @@ -759,7 +759,7 @@ explanation_asym_cau[["vaeac"]] <- explain( model = model, x_train = x_train, x_explain = x_explain, - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 1000, approach = "vaeac", vaeac.epochs = 20, @@ -1086,7 +1086,7 @@ explanation_n_coal[["sym_cau_gaussian_64"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(1, 2:3, 4:7), confounding = c(FALSE, TRUE, FALSE), @@ -1098,7 +1098,7 @@ explanation_n_coal[["asym_cau_gaussian_10"]] <- explain( x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = list(1, 2:3, 4:7), confounding = c(FALSE, TRUE, FALSE), @@ -1171,7 +1171,7 @@ explanation_group_gaussian[["symmetric_marginal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(seq(length(group_list))), # or `NULL` confounding = TRUE, @@ -1185,7 +1185,7 @@ explanation_group_gaussian[["symmetric_conditional"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = list(seq(length(group_list))), # or `NULL` confounding = NULL, @@ -1199,7 +1199,7 @@ explanation_group_gaussian[["asymmetric_conditional"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering_group, confounding = NULL, @@ -1214,7 +1214,7 @@ explanation_group_gaussian[["symmetric_causal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = FALSE, causal_ordering = causal_ordering_group, confounding = confounding, @@ -1228,7 +1228,7 @@ explanation_group_gaussian[["asymmetric_causal"]] <- x_train = x_train, x_explain = x_explain, approach = "gaussian", - prediction_zero = prediction_zero, + phi0 = phi0, asymmetric = TRUE, causal_ordering = causal_ordering_group, confounding = confounding, diff --git a/vignettes/understanding_shapr_regression.Rmd b/vignettes/understanding_shapr_regression.Rmd index 788f33526..84cd223d7 100644 --- a/vignettes/understanding_shapr_regression.Rmd +++ b/vignettes/understanding_shapr_regression.Rmd @@ -263,7 +263,7 @@ explanation_list$MC_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -295,7 +295,7 @@ explanation_list$sep_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -368,7 +368,7 @@ explanation_list$sep_pcr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -404,7 +404,7 @@ explanation_list$sep_splines <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -475,7 +475,7 @@ explanation_list$sep_reicpe_example <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = regression.recipe_func @@ -548,7 +548,7 @@ explanation_list$sep_tree_stump <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = 1, @@ -581,7 +581,7 @@ explanation_list$sep_tree_default <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -698,7 +698,7 @@ explanation_list$sep_tree_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), engine = "rpart", mode = "regression" @@ -729,7 +729,7 @@ explanation_list$sep_tree_cv_2 <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -777,7 +777,7 @@ explanation_list$sep_rf <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -804,7 +804,7 @@ explanation_list$sep_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, verbose = c("basic","vS_details"), # To get printouts approach = "regression_separate", regression.model = parsnip::rand_forest( @@ -1033,7 +1033,7 @@ explanation_list$sep_xgboost <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression") ) @@ -1060,7 +1060,7 @@ explanation_list$sep_xgboost_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(trees = hardhat::tune(), engine = "xgboost", mode = "regression"), @@ -1091,7 +1091,7 @@ explanation_list$sep_xgboost_cv_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(trees = hardhat::tune(), engine = "xgboost", mode = "regression"), @@ -1122,7 +1122,7 @@ explanation_list$sep_xgboost_cv_2_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -1229,7 +1229,7 @@ explanation_list$sur_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -1256,7 +1256,7 @@ explanation_list$sur_xgboost <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression") ) @@ -1283,7 +1283,7 @@ explanation_list$sur_xgboost_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -1317,7 +1317,7 @@ explanation_list$sur_rf <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -1344,7 +1344,7 @@ explanation_list$sur_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -1401,7 +1401,7 @@ explanation_list$sur_rf_cv_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -1625,7 +1625,7 @@ explanation_list$sep_ppr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = ppr_reg(num_terms = 2) ) @@ -1652,7 +1652,7 @@ explanation_list$sep_ppr_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = ppr_reg(num_terms = hardhat::tune()), regression.tune_values = dials::grid_regular(dials::num_terms(c(1, 4)), levels = 3), @@ -1681,7 +1681,7 @@ explanation_list$sur_ppr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = ppr_reg(num_terms = 3) ) @@ -1708,7 +1708,7 @@ explanation_list$sur_ppr_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = ppr_reg(num_terms = hardhat::tune()), regression.tune_values = dials::grid_regular(dials::num_terms(c(1, 8)), levels = 4), @@ -1796,7 +1796,7 @@ explanation_list_MC$MC_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1825,7 +1825,7 @@ explanation_list_MC$MC_gaussian <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1851,7 +1851,7 @@ explanation_list_MC$MC_copula <- explain( x_explain = x_explain, x_train = x_train, approach = "copula", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1877,7 +1877,7 @@ explanation_list_MC$MC_ctree <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) #> Note: Feature classes extracted from the model contains NA. #> Assuming feature classes from the data are correct. @@ -1903,7 +1903,7 @@ explanation_list_MC$MC_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, vaeac.epochs = 10 ) #> Note: Feature classes extracted from the model contains NA. @@ -2089,7 +2089,7 @@ explanation_list_mixed$MC_independence <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "independence" ) #> Success with message: @@ -2113,7 +2113,7 @@ explanation_list_mixed$MC_ctree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "ctree" ) #> Success with message: @@ -2137,7 +2137,7 @@ explanation_list_mixed$MC_vaeac <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "vaeac" ) #> Success with message: @@ -2172,7 +2172,7 @@ explanation_list_mixed$sep_lm <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -2197,7 +2197,7 @@ explanation_list_mixed$sep_splines <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -2225,7 +2225,7 @@ explanation_list_mixed$sep_tree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -2250,7 +2250,7 @@ explanation_list_mixed$sep_tree_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -2283,7 +2283,7 @@ explanation_list_mixed$sep_rf <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -2308,7 +2308,7 @@ explanation_list_mixed$sep_rf_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -2340,7 +2340,7 @@ explanation_list_mixed$sep_xgboost <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression_recipe) { @@ -2368,7 +2368,7 @@ explanation_list_mixed$sep_xgboost_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -2413,7 +2413,7 @@ explanation_list_mixed$sur_lm <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -2439,7 +2439,7 @@ explanation_list_mixed$sur_splines <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(recipe) { @@ -2467,7 +2467,7 @@ explanation_list_mixed$sur_tree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -2492,7 +2492,7 @@ explanation_list_mixed$sur_tree_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -2525,7 +2525,7 @@ explanation_list_mixed$sur_rf <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -2550,7 +2550,7 @@ explanation_list_mixed$sur_rf_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -2579,7 +2579,7 @@ explanation_list_mixed$sur_xgboost <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression_recipe) { @@ -2607,7 +2607,7 @@ explanation_list_mixed$sur_xgboost_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -2739,7 +2739,7 @@ explanation_list_str$sep_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::linear_reg()" ) @@ -2765,7 +2765,7 @@ explanation_list_str$sep_pcr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::linear_reg()", regression.recipe_func = "function(regression_recipe) { @@ -2794,7 +2794,7 @@ explanation_list_str$sep_splines <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = "function(regression_recipe) { @@ -2823,7 +2823,7 @@ explanation_list_str$sep_tree_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::decision_tree( tree_depth = hardhat::tune(), engine = 'rpart', mode = 'regression' @@ -2854,7 +2854,7 @@ explanation_list_str$sep_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = 'ranger', mode = 'regression' @@ -2888,7 +2888,7 @@ explanation_list_str$sur_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = "parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = 'ranger', mode = 'regression' diff --git a/vignettes/understanding_shapr_regression.Rmd.orig b/vignettes/understanding_shapr_regression.Rmd.orig index c9f5f53b7..5c170fcd5 100644 --- a/vignettes/understanding_shapr_regression.Rmd.orig +++ b/vignettes/understanding_shapr_regression.Rmd.orig @@ -272,7 +272,7 @@ explanation_list$MC_empirical <- explain( x_explain = x_explain, x_train = x_train, approach = "empirical", - prediction_zero = p0 + phi0 = p0 ) ``` @@ -286,7 +286,7 @@ explanation_list$sep_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -338,7 +338,7 @@ explanation_list$sep_pcr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -356,7 +356,7 @@ explanation_list$sep_splines <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -409,7 +409,7 @@ explanation_list$sep_reicpe_example <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = regression.recipe_func @@ -452,7 +452,7 @@ explanation_list$sep_tree_stump <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = 1, @@ -468,7 +468,7 @@ explanation_list$sep_tree_default <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -552,7 +552,7 @@ explanation_list$sep_tree_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), engine = "rpart", mode = "regression" @@ -566,7 +566,7 @@ explanation_list$sep_tree_cv_2 <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -596,7 +596,7 @@ explanation_list$sep_rf <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -606,7 +606,7 @@ explanation_list$sep_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, verbose = c("basic","vS_details"), # To get printouts approach = "regression_separate", regression.model = parsnip::rand_forest( @@ -680,7 +680,7 @@ explanation_list$sep_xgboost <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression") ) @@ -690,7 +690,7 @@ explanation_list$sep_xgboost_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(trees = hardhat::tune(), engine = "xgboost", mode = "regression"), @@ -704,7 +704,7 @@ explanation_list$sep_xgboost_cv_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree(trees = hardhat::tune(), engine = "xgboost", mode = "regression"), @@ -718,7 +718,7 @@ explanation_list$sep_xgboost_cv_2_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -790,7 +790,7 @@ explanation_list$sur_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -800,7 +800,7 @@ explanation_list$sur_xgboost <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression") ) @@ -810,7 +810,7 @@ explanation_list$sur_xgboost_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -827,7 +827,7 @@ explanation_list$sur_rf <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -837,7 +837,7 @@ explanation_list$sur_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -876,7 +876,7 @@ explanation_list$sur_rf_cv_par <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -1055,7 +1055,7 @@ explanation_list$sep_ppr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = ppr_reg(num_terms = 2) ) @@ -1065,7 +1065,7 @@ explanation_list$sep_ppr_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = ppr_reg(num_terms = hardhat::tune()), regression.tune_values = dials::grid_regular(dials::num_terms(c(1, 4)), levels = 3), @@ -1077,7 +1077,7 @@ explanation_list$sur_ppr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = ppr_reg(num_terms = 3) ) @@ -1087,7 +1087,7 @@ explanation_list$sur_ppr_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = ppr_reg(num_terms = hardhat::tune()), regression.tune_values = dials::grid_regular(dials::num_terms(c(1, 8)), levels = 4), @@ -1128,7 +1128,7 @@ explanation_list_MC$MC_independence <- explain( x_explain = x_explain, x_train = x_train, approach = "independence", - prediction_zero = p0 + phi0 = p0 ) # Copy the Shapley value explanations for the empirical method @@ -1140,7 +1140,7 @@ explanation_list_MC$MC_gaussian <- explain( x_explain = x_explain, x_train = x_train, approach = "gaussian", - prediction_zero = p0 + phi0 = p0 ) # Compute the Shapley value explanations using the copula method @@ -1149,7 +1149,7 @@ explanation_list_MC$MC_copula <- explain( x_explain = x_explain, x_train = x_train, approach = "copula", - prediction_zero = p0 + phi0 = p0 ) # Compute the Shapley value explanations using the ctree method @@ -1158,7 +1158,7 @@ explanation_list_MC$MC_ctree <- explain( x_explain = x_explain, x_train = x_train, approach = "ctree", - prediction_zero = p0 + phi0 = p0 ) # Compute the Shapley value explanations using the vaeac method @@ -1167,7 +1167,7 @@ explanation_list_MC$MC_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = p0, + phi0 = p0, vaeac.epochs = 10 ) @@ -1281,7 +1281,7 @@ explanation_list_mixed$MC_independence <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "independence" ) @@ -1289,7 +1289,7 @@ explanation_list_mixed$MC_ctree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "ctree" ) @@ -1297,7 +1297,7 @@ explanation_list_mixed$MC_vaeac <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "vaeac" ) ``` @@ -1315,7 +1315,7 @@ explanation_list_mixed$sep_lm <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::linear_reg() ) @@ -1325,7 +1325,7 @@ explanation_list_mixed$sep_splines <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(regression_recipe) { @@ -1338,7 +1338,7 @@ explanation_list_mixed$sep_tree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -1348,7 +1348,7 @@ explanation_list_mixed$sep_tree_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -1366,7 +1366,7 @@ explanation_list_mixed$sep_rf <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -1376,7 +1376,7 @@ explanation_list_mixed$sep_rf_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -1393,7 +1393,7 @@ explanation_list_mixed$sep_xgboost <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression_recipe) { @@ -1406,7 +1406,7 @@ explanation_list_mixed$sep_xgboost_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_separate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -1435,7 +1435,7 @@ explanation_list_mixed$sur_lm <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::linear_reg() ) @@ -1446,7 +1446,7 @@ explanation_list_mixed$sur_splines <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::linear_reg(), regression.recipe_func = function(recipe) { @@ -1459,7 +1459,7 @@ explanation_list_mixed$sur_tree <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::decision_tree(engine = "rpart", mode = "regression") ) @@ -1469,7 +1469,7 @@ explanation_list_mixed$sur_tree_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::decision_tree( tree_depth = hardhat::tune(), @@ -1487,7 +1487,7 @@ explanation_list_mixed$sur_rf <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::rand_forest(engine = "ranger", mode = "regression") ) @@ -1497,7 +1497,7 @@ explanation_list_mixed$sur_rf_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = "ranger", mode = "regression" @@ -1511,7 +1511,7 @@ explanation_list_mixed$sur_xgboost <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::boost_tree(engine = "xgboost", mode = "regression"), regression.recipe_func = function(regression_recipe) { @@ -1524,7 +1524,7 @@ explanation_list_mixed$sur_xgboost_cv <- explain( model = model_cat, x_explain = x_explain_cat, x_train = x_train_cat, - prediction_zero = p0_cat, + phi0 = p0_cat, approach = "regression_surrogate", regression.model = parsnip::boost_tree( trees = hardhat::tune(), @@ -1608,7 +1608,7 @@ explanation_list_str$sep_lm <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::linear_reg()" ) @@ -1617,7 +1617,7 @@ explanation_list_str$sep_pcr <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::linear_reg()", regression.recipe_func = "function(regression_recipe) { @@ -1629,7 +1629,7 @@ explanation_list_str$sep_splines <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = parsnip::linear_reg(), regression.recipe_func = "function(regression_recipe) { @@ -1641,7 +1641,7 @@ explanation_list_str$sep_tree_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::decision_tree( tree_depth = hardhat::tune(), engine = 'rpart', mode = 'regression' @@ -1655,7 +1655,7 @@ explanation_list_str$sep_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_separate", regression.model = "parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = 'ranger', mode = 'regression' @@ -1672,7 +1672,7 @@ explanation_list_str$sur_rf_cv <- explain( model = model, x_explain = x_explain, x_train = x_train, - prediction_zero = p0, + phi0 = p0, approach = "regression_surrogate", regression.model = "parsnip::rand_forest( mtry = hardhat::tune(), trees = hardhat::tune(), engine = 'ranger', mode = 'regression' diff --git a/vignettes/understanding_shapr_vaeac.Rmd b/vignettes/understanding_shapr_vaeac.Rmd index dcf30355e..91ca7f772 100644 --- a/vignettes/understanding_shapr_vaeac.Rmd +++ b/vignettes/understanding_shapr_vaeac.Rmd @@ -135,7 +135,7 @@ model <- xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) ``` @@ -155,7 +155,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = vaeac.epochs, vaeac.n_vaeacs_initialize = vaeac.n_vaeacs_initialize @@ -206,7 +206,7 @@ expl_pretrained_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = explanation$internal$parameters$vaeac @@ -238,7 +238,7 @@ expl_pretrained_vaeac_path <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = explanation$internal$parameters$vaeac$models$best @@ -276,7 +276,7 @@ expl_batches_combinations <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 10, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( @@ -331,7 +331,7 @@ expl_batches_combinations_2 <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 10, n_MC_samples = n_MC_samples, vaeac.n_vaeacs_initialize = 1, @@ -385,7 +385,7 @@ expl_paired_sampling_TRUE <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = 10, vaeac.n_vaeacs_initialize = 1, @@ -402,7 +402,7 @@ expl_paired_sampling_FALSE <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = 10, vaeac.n_vaeacs_initialize = 1, @@ -467,7 +467,7 @@ expl_with_messages <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, verbose = c("basic","vS_details"), vaeac.epochs = 5, @@ -524,7 +524,7 @@ progressr::with_progress({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, verbose = "vS_details", vaeac.epochs = 5, @@ -583,7 +583,7 @@ expl_little_training <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 3, vaeac.n_vaeacs_initialize = 2 @@ -622,7 +622,7 @@ expl_train_more_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_train_more$internal$parameters$vaeac @@ -654,7 +654,7 @@ expl_train_even_more_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_train_even_more$internal$parameters$vaeac @@ -725,7 +725,7 @@ expl_early_stopping <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, verbose = c("basic","vS_details"), vaeac.epochs = 1000, # Set it to a big number @@ -827,7 +827,7 @@ expl_early_stopping_train_more <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_early_stopping_train_more$internal$parameters$vaeac @@ -876,7 +876,7 @@ expl_group <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, group = list(A = c("Temp", "Month"), B = c("Wind", "Solar.R")), n_MC_samples = n_MC_samples, verbose = "vS_details", @@ -943,7 +943,7 @@ model <- ranger(as.formula(paste0(y_var, " ~ ", paste0(x_var_cat, collapse = " + ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(data_train_cat[, get(y_var)]) +phi0 <- mean(data_train_cat[, get(y_var)]) ``` Then we compute explanations using the `ctree` and `vaeac` approaches. For the `vaeac` approach, we consider two setups: the default architecture, and a simpler one without skip connections. We do this @@ -957,7 +957,7 @@ expl_ctree <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250 ) #> Note: Feature classes extracted from the model contains NA. @@ -972,7 +972,7 @@ expl_vaeac_with <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 4 @@ -990,7 +990,7 @@ expl_vaeac_without <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 4, @@ -1091,7 +1091,7 @@ x_explain <- dt_explain[, -1] model <- lm(y ~ ., dt_train) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Fit vaeac model using the CPU time_cpu <- system.time({ @@ -1100,7 +1100,7 @@ time_cpu <- system.time({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 100, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 2, @@ -1115,7 +1115,7 @@ time_cuda <- system.time({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 100, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 2, diff --git a/vignettes/understanding_shapr_vaeac.Rmd.orig b/vignettes/understanding_shapr_vaeac.Rmd.orig index 20499c9b7..3d621ff48 100644 --- a/vignettes/understanding_shapr_vaeac.Rmd.orig +++ b/vignettes/understanding_shapr_vaeac.Rmd.orig @@ -147,7 +147,7 @@ model <- xgboost( ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) ``` @@ -166,7 +166,7 @@ explanation <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = vaeac.epochs, vaeac.n_vaeacs_initialize = vaeac.n_vaeacs_initialize @@ -200,7 +200,7 @@ expl_pretrained_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = explanation$internal$parameters$vaeac @@ -225,7 +225,7 @@ expl_pretrained_vaeac_path <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = explanation$internal$parameters$vaeac$models$best @@ -256,7 +256,7 @@ expl_batches_combinations <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 10, n_MC_samples = n_MC_samples, vaeac.extra_parameters = list( @@ -282,7 +282,7 @@ expl_batches_combinations_2 <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_coalitions = 10, n_MC_samples = n_MC_samples, vaeac.n_vaeacs_initialize = 1, @@ -312,7 +312,7 @@ expl_paired_sampling_TRUE <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = 10, vaeac.n_vaeacs_initialize = 1, @@ -324,7 +324,7 @@ expl_paired_sampling_FALSE <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, vaeac.epochs = 10, vaeac.n_vaeacs_initialize = 1, @@ -370,7 +370,7 @@ expl_with_messages <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, verbose = c("basic","vS_details"), vaeac.epochs = 5, @@ -390,7 +390,7 @@ progressr::with_progress({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = n_MC_samples, verbose = "vS_details", vaeac.epochs = 5, @@ -423,7 +423,7 @@ expl_little_training <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 3, vaeac.n_vaeacs_initialize = 2 @@ -452,7 +452,7 @@ expl_train_more_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_train_more$internal$parameters$vaeac @@ -479,7 +479,7 @@ expl_train_even_more_vaeac <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_train_even_more$internal$parameters$vaeac @@ -534,7 +534,7 @@ expl_early_stopping <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, verbose = c("basic","vS_details"), vaeac.epochs = 1000, # Set it to a big number @@ -593,7 +593,7 @@ expl_early_stopping_train_more <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.extra_parameters = list( vaeac.pretrained_vaeac_model = expl_early_stopping_train_more$internal$parameters$vaeac @@ -629,7 +629,7 @@ expl_group <- explain( x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, group = list(A = c("Temp", "Month"), B = c("Wind", "Solar.R")), n_MC_samples = n_MC_samples, verbose = "vS_details", @@ -669,7 +669,7 @@ model <- ranger(as.formula(paste0(y_var, " ~ ", paste0(x_var_cat, collapse = " + ) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(data_train_cat[, get(y_var)]) +phi0 <- mean(data_train_cat[, get(y_var)]) ``` Then we compute explanations using the `ctree` and `vaeac` approaches. For the `vaeac` approach, we consider two setups: the default architecture, and a simpler one without skip connections. We do this @@ -682,7 +682,7 @@ expl_ctree <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "ctree", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250 ) @@ -692,7 +692,7 @@ expl_vaeac_with <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 4 @@ -704,7 +704,7 @@ expl_vaeac_without <- explain( x_explain = x_explain_cat, x_train = x_train_cat, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 250, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 4, @@ -786,7 +786,7 @@ x_explain <- dt_explain[, -1] model <- lm(y ~ ., dt_train) # Specifying the phi_0, i.e. the expected prediction without any features -prediction_zero <- mean(y_train) +phi0 <- mean(y_train) # Fit vaeac model using the CPU time_cpu <- system.time({ @@ -795,7 +795,7 @@ time_cpu <- system.time({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 100, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 2, @@ -810,7 +810,7 @@ time_cuda <- system.time({ x_explain = x_explain, x_train = x_train, approach = "vaeac", - prediction_zero = prediction_zero, + phi0 = phi0, n_MC_samples = 100, vaeac.epochs = 50, vaeac.n_vaeacs_initialize = 2,