diff --git a/R-package/R/lgb.cv.R b/R-package/R/lgb.cv.R index db30bbe9fe02..0690936f5624 100644 --- a/R-package/R/lgb.cv.R +++ b/R-package/R/lgb.cv.R @@ -43,18 +43,6 @@ CVBooster <- R6::R6Class( #' @param callbacks List of callback functions that are applied at each iteration. #' @param reset_data Boolean, setting it to TRUE (not the default value) will transform the booster model #' into a predictor model which frees up memory and the original datasets -#' @param ... other parameters, see Parameters.rst for more information. A few key parameters: -#' \itemize{ -#' \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.} -#' \item{\code{num_leaves}: Maximum number of leaves in one tree.} -#' \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with -#' overfit when #data is small. Tree still grow by leaf-wise.} -#' \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to -#' the number of real CPU cores(\code{parallel::detectCores(logical = FALSE)}), -#' not the number of threads (most CPU using hyper-threading to generate 2 threads -#' per CPU core).} -#' } -#' NOTE: As of v3.3.0, use of \code{...} is deprecated. Add parameters to \code{params} directly. #' @inheritSection lgb_shared_params Early Stopping #' @return a trained model \code{lgb.CVBooster}. #' @@ -99,7 +87,6 @@ lgb.cv <- function(params = list() , callbacks = list() , reset_data = FALSE , serializable = TRUE - , ... ) { if (nrounds <= 0L) { @@ -115,23 +102,12 @@ lgb.cv <- function(params = list() } # Setup temporary variables - additional_params <- list(...) - params <- append(params, additional_params) params$verbose <- verbose params <- lgb.check.obj(params = params, obj = obj) params <- lgb.check.eval(params = params, eval = eval) fobj <- NULL eval_functions <- list(NULL) - if (length(additional_params) > 0L) { - warning(paste0( - "lgb.cv: Found the following passed through '...': " - , paste(names(additional_params), collapse = ", ") - , ". These will be used, but in future releases of lightgbm, this warning will become an error. " - , "Add these to 'params' instead. See ?lgb.cv for documentation on how to call this function." - )) - } - # set some parameters, resolving the way they were passed in with other parameters # in `params`. # this ensures that the model stored with Booster$save() correctly represents diff --git a/R-package/man/lgb.cv.Rd b/R-package/man/lgb.cv.Rd index 6a5f18a512be..f240a241b7ac 100644 --- a/R-package/man/lgb.cv.Rd +++ b/R-package/man/lgb.cv.Rd @@ -25,8 +25,7 @@ lgb.cv( early_stopping_rounds = NULL, callbacks = list(), reset_data = FALSE, - serializable = TRUE, - ... + serializable = TRUE ) } \arguments{ @@ -121,19 +120,6 @@ into a predictor model which frees up memory and the original datasets} \item{serializable}{whether to make the resulting objects serializable through functions such as \code{save} or \code{saveRDS} (see section "Model serialization").} - -\item{...}{other parameters, see Parameters.rst for more information. A few key parameters: -\itemize{ - \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.} - \item{\code{num_leaves}: Maximum number of leaves in one tree.} - \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with - overfit when #data is small. Tree still grow by leaf-wise.} - \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to - the number of real CPU cores(\code{parallel::detectCores(logical = FALSE)}), - not the number of threads (most CPU using hyper-threading to generate 2 threads - per CPU core).} -} -NOTE: As of v3.3.0, use of \code{...} is deprecated. Add parameters to \code{params} directly.} } \value{ a trained model \code{lgb.CVBooster}. diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index 8bd67aac48f9..c85b34a15394 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -347,14 +347,17 @@ context("lgb.cv()") test_that("cv works", { dtrain <- lgb.Dataset(train$data, label = train$label) - params <- list(objective = "regression", metric = "l2,l1") + params <- list( + objective = "regression" + , metric = "l2,l1" + , min_data = 1L + , learning_rate = 1.0 + ) bst <- lgb.cv( params , dtrain , 10L , nfold = 5L - , min_data = 1L - , learning_rate = 1.0 , early_stopping_rounds = 10L ) expect_false(is.null(bst$record_evals)) @@ -362,7 +365,11 @@ test_that("cv works", { test_that("lgb.cv() rejects negative or 0 value passed to nrounds", { dtrain <- lgb.Dataset(train$data, label = train$label) - params <- list(objective = "regression", metric = "l2,l1") + params <- list( + objective = "regression" + , metric = "l2,l1" + , min_data = 1L + ) for (nround_value in c(-10L, 0L)) { expect_error({ bst <- lgb.cv( @@ -370,7 +377,6 @@ test_that("lgb.cv() rejects negative or 0 value passed to nrounds", { , dtrain , nround_value , nfold = 5L - , min_data = 1L ) }, "nrounds should be greater than zero") } @@ -388,11 +394,14 @@ test_that("lgb.cv() throws an informative error is 'data' is not an lgb.Dataset for (val in bad_values) { expect_error({ bst <- lgb.cv( - params = list(objective = "regression", metric = "l2,l1") + params = list( + objective = "regression" + , metric = "l2,l1" + , min_data = 1L + ) , data = val , 10L , nfold = 5L - , min_data = 1L ) }, regexp = "'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'", fixed = TRUE) } @@ -409,11 +418,11 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric data = dtrain , nfold = 5L , nrounds = nrounds - , num_leaves = 5L , params = list( objective = "binary" , metric = "auc,binary_error" , learning_rate = 1.5 + , num_leaves = 5L ) ) expect_is(cv_bst, "lgb.CVBooster") @@ -470,7 +479,11 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea test_that("lgb.cv() respects showsd argument", { dtrain <- lgb.Dataset(train$data, label = train$label) - params <- list(objective = "regression", metric = "l2") + params <- list( + objective = "regression" + , metric = "l2" + , min_data = 1L + ) nrounds <- 5L set.seed(708L) bst_showsd <- lgb.cv( @@ -478,7 +491,6 @@ test_that("lgb.cv() respects showsd argument", { , data = dtrain , nrounds = nrounds , nfold = 3L - , min_data = 1L , showsd = TRUE ) evals_showsd <- bst_showsd$record_evals[["valid"]][["l2"]] @@ -488,7 +500,6 @@ test_that("lgb.cv() respects showsd argument", { , data = dtrain , nrounds = nrounds , nfold = 3L - , min_data = 1L , showsd = FALSE ) evals_no_showsd <- bst_no_showsd$record_evals[["valid"]][["l2"]] diff --git a/R-package/tests/testthat/test_learning_to_rank.R b/R-package/tests/testthat/test_learning_to_rank.R index d4f573710cfd..d0966692f5ba 100644 --- a/R-package/tests/testthat/test_learning_to_rank.R +++ b/R-package/tests/testthat/test_learning_to_rank.R @@ -78,6 +78,8 @@ test_that("learning-to-rank with lgb.cv() works as expected", { , ndcg_at = ndcg_at , lambdarank_truncation_level = 3L , label_gain = "0,1,3" + , min_data = 1L + , learning_rate = 0.01 ) nfold <- 4L nrounds <- 10L @@ -86,8 +88,6 @@ test_that("learning-to-rank with lgb.cv() works as expected", { , data = dtrain , nrounds = nrounds , nfold = nfold - , min_data = 1L - , learning_rate = 0.01 ) expect_is(cv_bst, "lgb.CVBooster") expect_equal(length(cv_bst$boosters), nfold)