Skip to content

Commit

Permalink
[R-package] silence more logs in tests (#5208)
Browse files Browse the repository at this point in the history
  • Loading branch information
jameslamb committed May 11, 2022
1 parent eababef commit 0018206
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 13 deletions.
64 changes: 51 additions & 13 deletions R-package/tests/testthat/test_basic.R
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
num_leaves = 5L
, objective = "binary"
, metric = "binary_error"
, verbose = VERBOSITY
)
, nrounds = nrounds
)
Expand All @@ -173,6 +174,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
num_leaves = 5L
, objective = "regression"
, metric = "l2"
, verbose = VERBOSITY
)
, nrounds = nrounds
)
Expand Down Expand Up @@ -206,6 +208,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, verbose = VERBOSITY
)
)

Expand All @@ -218,6 +221,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
, verbose = VERBOSITY
)
)

Expand All @@ -231,6 +235,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
, verbose = VERBOSITY
)
)

Expand Down Expand Up @@ -335,6 +340,7 @@ test_that("cv works", {
, metric = "l2,l1"
, min_data = 1L
, learning_rate = 1.0
, verbose = VERBOSITY
)
bst <- lgb.cv(
params
Expand Down Expand Up @@ -431,6 +437,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric
, metric = "auc,binary_error"
, learning_rate = 1.5
, num_leaves = 5L
, verbose = VERBOSITY
)
)
expect_true(methods::is(cv_bst, "lgb.CVBooster"))
Expand Down Expand Up @@ -491,6 +498,7 @@ test_that("lgb.cv() respects showsd argument", {
objective = "regression"
, metric = "l2"
, min_data = 1L
, verbose = VERBOSITY
)
nrounds <- 5L
set.seed(708L)
Expand Down Expand Up @@ -549,6 +557,7 @@ test_that("lgb.cv() respects parameter aliases for objective", {
num_leaves = 5L
, application = "binary"
, num_iterations = nrounds
, verbose = VERBOSITY
)
, nfold = nfold
)
Expand Down Expand Up @@ -600,6 +609,7 @@ test_that("lgb.cv() respects parameter aliases for metric", {
, objective = "binary"
, num_iterations = nrounds
, metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY
)
, nfold = nfold
)
Expand All @@ -616,6 +626,7 @@ test_that("lgb.cv() respects eval_train_metric argument", {
objective = "regression"
, metric = "l2"
, min_data = 1L
, verbose = VERBOSITY
)
nrounds <- 5L
set.seed(708L)
Expand Down Expand Up @@ -707,6 +718,7 @@ test_that("lgb.train() respects parameter aliases for objective", {
num_leaves = 5L
, application = "binary"
, num_iterations = nrounds
, verbose = VERBOSITY
)
, valids = list(
"the_training_data" = dtrain
Expand Down Expand Up @@ -755,6 +767,7 @@ test_that("lgb.train() respects parameter aliases for metric", {
, objective = "binary"
, num_iterations = nrounds
, metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY
)
, valids = list(
"train" = dtrain
Expand Down Expand Up @@ -1722,6 +1735,7 @@ test_that("lgb.train() works with integer, double, and numeric data", {
, min_data_in_leaf = 1L
, learning_rate = 0.01
, seed = 708L
, verbose = VERBOSITY
)
, nrounds = nrounds
)
Expand Down Expand Up @@ -2061,6 +2075,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings
params = list(
objective = "binary"
, metric = "binary_error"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds
Expand Down Expand Up @@ -2094,6 +2109,7 @@ test_that("lgb.cv() works when you give a function for eval", {
params = list(
objective = "binary"
, metric = "None"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_CLASSIFICATION
, nfold = nfolds
Expand All @@ -2119,6 +2135,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on
, metric = "None"
, early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_REGRESSION
, nfold = nfolds
Expand Down Expand Up @@ -2175,6 +2192,7 @@ test_that("early stopping works with lgb.cv()", {
, metric = "None"
, early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_REGRESSION
, nfold = nfolds
Expand Down Expand Up @@ -2620,7 +2638,11 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
set.seed(1L)
dtrain <- lgb.Dataset(train$data, label = train$label)

params <- list(objective = "regression", interaction_constraints = list(c(1L, 2L), 3L))
params <- list(
objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY
)
bst <- lightgbm(
data = dtrain
, params = params
Expand All @@ -2629,15 +2651,23 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
pred1 <- bst$predict(test$data)

cnames <- colnames(train$data)
params <- list(objective = "regression", interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]]))
params <- list(
objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]])
, verbose = VERBOSITY
)
bst <- lightgbm(
data = dtrain
, params = params
, nrounds = 2L
)
pred2 <- bst$predict(test$data)

params <- list(objective = "regression", interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L))
params <- list(
objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L)
, verbose = VERBOSITY
)
bst <- lightgbm(
data = dtrain
, params = params
Expand All @@ -2654,7 +2684,11 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
set.seed(1L)
dtrain <- lgb.Dataset(train$data, label = train$label)

params <- list(objective = "regression", interaction_constraints = list(c(1L, 2L), 3L))
params <- list(
objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY
)
bst <- lightgbm(
data = dtrain
, params = params
Expand All @@ -2663,8 +2697,11 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
pred1 <- bst$predict(test$data)

new_colnames <- paste0(colnames(train$data), "_x")
params <- list(objective = "regression"
, interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L]))
params <- list(
objective = "regression"
, interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L])
, verbose = VERBOSITY
)
bst <- lightgbm(
data = dtrain
, params = params
Expand Down Expand Up @@ -2807,6 +2844,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) {
, monotone_constraints = c(1L, -1L, 0L)
, monotone_constraints_method = monotone_constraints_method
, use_missing = FALSE
, verbose = VERBOSITY
)
constrained_model <- lgb.train(
params = params
Expand All @@ -2830,7 +2868,7 @@ test_that("lightgbm() accepts objective as function argument and under params",
, label = train$label
, params = list(objective = "regression_l1")
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
expect_equal(bst1$params$objective, "regression_l1")
model_txt_lines <- strsplit(
Expand All @@ -2845,7 +2883,7 @@ test_that("lightgbm() accepts objective as function argument and under params",
, label = train$label
, objective = "regression_l1"
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
expect_equal(bst2$params$objective, "regression_l1")
model_txt_lines <- strsplit(
Expand All @@ -2863,7 +2901,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
, objective = "regression"
, params = list(objective = "regression_l1")
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
expect_equal(bst1$params$objective, "regression_l1")
model_txt_lines <- strsplit(
Expand All @@ -2879,7 +2917,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
, objective = "regression"
, params = list(loss = "regression_l1")
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
expect_equal(bst2$params$objective, "regression_l1")
model_txt_lines <- strsplit(
Expand All @@ -2896,7 +2934,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, label = train$label
, objective = "binary"
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
pred1 <- predict(bst1, train$data, rawscore = TRUE)

Expand All @@ -2906,7 +2944,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, init_score = pred1
, objective = "binary"
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
pred2 <- predict(bst2, train$data, rawscore = TRUE)

Expand All @@ -2918,7 +2956,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw
data = train$data
, label = train$label
, nrounds = 5L
, verbose = -1L
, verbose = VERBOSITY
)
expect_equal(bst$params$objective, "regression")
model_txt_lines <- strsplit(
Expand Down
1 change: 1 addition & 0 deletions R-package/tests/testthat/test_learning_to_rank.R
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
, label_gain = "0,1,3"
, min_data = 1L
, learning_rate = 0.01
, verbose = VERBOSITY
)
nfold <- 4L
nrounds <- 10L
Expand Down
2 changes: 2 additions & 0 deletions R-package/tests/testthat/test_lgb.Booster.R
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file <- bst$eval(
data = lgb.Dataset(
data = test_file
, params = list(verbose = VERBOSITY)
)$construct()
, name = "test"
)
Expand Down Expand Up @@ -551,6 +552,7 @@ test_that("Booster$update() passing a train_set works as expected", {
train_set = Dataset$new(
data = agaricus.train$data
, label = agaricus.train$label
, params = list(verbose = VERBOSITY)
)
)
expect_true(lgb.is.Booster(bst))
Expand Down

0 comments on commit 0018206

Please sign in to comment.