Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[R-package] De-couple evaluation monitoring from booster verbosity #6172

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions R-package/R/lgb.cv.R
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ CVBooster <- R6::R6Class(
#' @param eval_train_metric \code{boolean}, whether to add the cross validation results on the
#' training data. This parameter defaults to \code{FALSE}. Setting it to \code{TRUE}
#' will increase run time.
#' @param print_metrics \code{boolean} when calculating evaluation metrics after boosting rounds, whether
#' to print informational messages with these metrics (see parameters \code{eval_freq},
#' \code{eval_train_metric}, and \code{eval}).
#' @inheritSection lgb_shared_params Early Stopping
#' @return a trained model \code{lgb.CVBooster}.
#'
Expand Down Expand Up @@ -92,6 +95,7 @@ lgb.cv <- function(params = list()
, reset_data = FALSE
, serializable = TRUE
, eval_train_metric = FALSE
, print_metrics = TRUE
) {

if (nrounds <= 0L) {
Expand Down Expand Up @@ -244,7 +248,8 @@ lgb.cv <- function(params = list()
}

# Add printing log callback
if (params[["verbosity"]] > 0L && eval_freq > 0L) {
print_metrics <- as.logical(print_metrics)
if (print_metrics && eval_freq > 0L) {
callbacks <- add.cb(cb_list = callbacks, cb = cb_print_evaluation(period = eval_freq))
}

Expand Down Expand Up @@ -287,7 +292,7 @@ lgb.cv <- function(params = list()
, cb = cb_early_stop(
stopping_rounds = early_stopping_rounds
, first_metric_only = isTRUE(params[["first_metric_only"]])
, verbose = params[["verbosity"]] > 0L
, verbose = print_metrics
)
)
}
Expand Down
7 changes: 6 additions & 1 deletion R-package/man/lgb.cv.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

32 changes: 31 additions & 1 deletion R-package/tests/testthat/test_basic.R
Original file line number Diff line number Diff line change
Expand Up @@ -3256,7 +3256,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
expect_equal(record_evals[["valid"]][["auc"]][["eval_err"]], list())
}

.train_for_verbosity_test <- function(train_function, verbose_kwarg, verbose_param) {
.train_for_verbosity_test <- function(train_function, verbose_kwarg, verbose_param, print_metrics = FALSE) {
set.seed(708L)
nrounds <- 5L
params <- list(
Expand Down Expand Up @@ -3301,6 +3301,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
)
train_kwargs[["nfold"]] <- 3L
train_kwargs[["showsd"]] <- FALSE
train_kwargs[["print_metrics"]] <- print_metrics
}
log_txt <- capture.output({
bst <- do.call(
Expand Down Expand Up @@ -3549,6 +3550,7 @@ test_that("lgb.cv() only prints eval metrics when expected to", {
out <- .train_for_verbosity_test(
verbose_kwarg = verbose_keyword_arg
, verbose_param = -1L
, print_metrics = FALSE
, train_function = lgb.cv
)
.assert_has_expected_logs(
Expand Down Expand Up @@ -3583,6 +3585,7 @@ test_that("lgb.cv() only prints eval metrics when expected to", {
out <- .train_for_verbosity_test(
verbose_kwarg = verbose_keyword_arg
, verbose_param = 1L
, print_metrics = TRUE
, train_function = lgb.cv
)
.assert_has_expected_logs(
Expand Down Expand Up @@ -3638,6 +3641,7 @@ test_that("lgb.cv() only prints eval metrics when expected to", {
out <- .train_for_verbosity_test(
verbose_kwarg = verbose_keyword_arg
, verbose_param = 1L
, print_metrics = TRUE
, train_function = lgb.cv
)
.assert_has_expected_logs(
Expand Down Expand Up @@ -3828,3 +3832,29 @@ test_that("Evaluation metrics aren't printed as a single-element vector", {
})
expect_false(grepl("[1] \"[1]", log_txt, fixed = TRUE))
})

test_that("lgb.cv() doesn't mix booster messages with evaluation metrics messages", {
log_txt <- capture_output({
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1L])
cv_result <- lgb.cv(
data = lgb.Dataset(x, label = y)
, params = list(
objective = "regression"
, metric = "l2"
, min_data_in_leaf = 5L
, max_depth = 3L
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 2L
, nfold = 3L
, verbose = -1L
, eval_train_metric = TRUE
, print_metrics = TRUE
)
})
expect_false(grepl("Warning", log_txt, fixed = TRUE))
expect_false(grepl("gain", log_txt, fixed = TRUE))
expect_false(grepl("inf", log_txt, fixed = TRUE))
})
Loading