I’ve coded a graph that performs stacking using the mlr3 package. In the first step, I tuned the parameters of the level 0 learners, and in the final step, I used the predictions from the tuned level 0 learners to obtain the predictions of an ensemble learner. However, when I ran my code, I received this error message:
Error in family@check_y(y) :
response is not a factor at two levels but ‘family = Binomial()’
This happened PipeOp classif.glmboost's $train()
Here is a reproducible example:
data <- data.frame(ID = 1:1742, x = runif(1742, -130.88, -61.12), y = runif(1742, 12.12, 61.38), year = runif(1742, 2005, 2020), presence = rep(0:1, each=871), V1 = runif(1742, -3.66247, 2.95120), V2 = runif(1742, -1.6501, 7.5510))
data$presence <- as.factor(data$presence)
## summary(data)
task <- mlr3spatial::as_task_classif_st(x = data, target = "presence", positive = "1", coordinate_names = c("x", "y"), crs = "+proj=longlat +datum=WGS84 +no_defs +type=crs")
task$set_col_roles("ID", roles = "space")
task$set_col_roles("year", roles = "time")
learner_avg <- mlr3pipelines::LearnerClassifAvg$new(id = "classif.avg")
learner_avg$predict_type <- "prob"
learner_avg$param_set$values$measure <- "classif.auc"
source("H:/PipeOpLearnerCV_mod.R")
learner_glmboost <- mlr3::lrn("classif.glmboost", predict_type = "prob", family = "Binomial", mstop = list(mstop = paradox::to_tune(1, 500))$mstop)
learner_gamboost <- mlr3::lrn("classif.gamboost", predict_type = "prob", family = "Binomial", mstop = list(mstop = paradox::to_tune(1, 500))$mstop)
po_learner_glmboost <- PipeOpLearnerCV_mod$new(learner = learner_glmboost, param_vals = list(resampling.method = "sptcv_cstf", resampling.folds = 2))
po_learner_gamboost <- PipeOpLearnerCV_mod$new(learner = learner_gamboost, param_vals = list(resampling.method = "sptcv_cstf", resampling.folds = 2))
level_0_graph = gunion(list(po_learner_glmboost, po_learner_gamboost)) %>>% po("featureunion")
level_0_and_1_graph = level_0_graph %>>% learner_avg
level_0_and_1_graph_learner <- mlr3::as_learner(level_0_and_1_graph)
tuning_instance <- mlr3tuning::auto_tuner(tuner = mlr3tuning::tnr("mbo"),
learner = level_0_and_1_graph_learner,
resampling = mlr3::rsmp("sptcv_cstf", folds = 2),
measure = mlr3::msr("classif.auc"),
terminator = mlr3tuning::trm("evals", n_evals = 5, k = 0))
run_resampling <- mlr3::resample(task = task, learner = tuning_instance, resampling = mlr3::rsmp("sptcv_cstf", folds = 2), store_models = TRUE)
Here is my function PipeOpLearnerCV_mod
. It is the mlr3pipelines::PipeOpLearnerCV()
function that I have modified to integrate a spatial resampling.
PipeOpLearnerCV_mod = R6Class("PipeOpLearnerCV_mod",
inherit = PipeOpTaskPreproc,
public = list(
initialize = function(learner, id = NULL, param_vals = list()) {
private$.learner = as_learner(learner, clone = TRUE)
if (mlr3pipelines:::paradox_info$is_old) {
private$.learner$param_set$set_id = ""
}
########################################################################
## My edits
id = private$.learner$id
## id = id %??% private$.learner$id
# FIXME: can be changed when mlr-org/mlr3#470 has an answer
########################################
type = private$.learner$task_type
task_type = mlr_reflections$task_types[type, mult = "first"]$task
########################################################################
## My edits
private$.crossval_param_set = ps(
method = p_fct(levels = c("cv", "insample", "sptcv_cstf", "repeated_sptcv_cstf"), tags = c("train", "required")),
folds = p_int(lower = 2L, upper = Inf, tags = c("train", "required")),
repeats = p_int(lower = 1L, upper = Inf),
keep_response = p_lgl(tags = c("train", "required"))
)
########################################
private$.crossval_param_set$values = list(method = "cv", folds = 3, keep_response = FALSE)
if (mlr3pipelines:::paradox_info$is_old) {
private$.crossval_param_set$set_id = "resampling"
}
# Dependencies in paradox have been broken from the start and this is known since at least a year:
# https://github.com/mlr-org/paradox/issues/216
# The following would make it _impossible_ to set "method" to "insample", because then "folds"
# is both _required_ (required tag above) and at the same time must be unset (because of this
# dependency). We will opt for the least annoying behaviour here and just not use dependencies
# in PipeOp ParamSets.
# private$.crossval_param_set$add_dep("folds", "method", CondEqual$new("cv")) # don't do this.
super$initialize(id, alist(resampling = private$.crossval_param_set, private$.learner$param_set), param_vals = param_vals, can_subset_cols = TRUE, task_type = task_type, tags = c("learner", "ensemble"))
}
),
active = list(
learner = function(val) {
if (!missing(val)) {
if (!identical(val, private$.learner)) {
stop("$learner is read-only.")
}
}
private$.learner
},
learner_model = function(val) {
if (!missing(val)) {
if (!identical(val, private$.learner)) {
stop("$learner_model is read-only.")
}
}
if (is.null(self$state) || is_noop(self$state)) {
private$.learner
} else {
multiplicity_recurse(self$state, clone_with_state, learner = private$.learner)
}
},
predict_type = function(val) {
if (!missing(val)) {
assert_subset(val, names(mlr_reflections$learner_predict_types[[private$.learner$task_type]]))
private$.learner$predict_type = val
}
private$.learner$predict_type
}
),
private = list(
.train_task = function(task) {
on.exit({private$.learner$state = NULL})
# Train a learner for predicting
self$state = private$.learner$train(task)$state
pv = private$.crossval_param_set$values
# Compute CV Predictions
if (pv$method != "insample") {
rdesc = mlr_resamplings$get(pv$method)
if (pv$method == "cv") rdesc$param_set$values = list(folds = pv$folds)
########################################################################
## My edits
if (pv$method == "sptcv_cstf") rdesc$param_set$values = list(folds = pv$folds, stratify = FALSE)
if (pv$method == "repeated_sptcv_cstf") rdesc$param_set$values = list(folds = pv$folds, repeats = pv$repeats, stratify = FALSE)
########################################################################
rr = resample(task, private$.learner, rdesc)
prds = as.data.table(rr$prediction(predict_sets = "test"))
} else {
prds = as.data.table(private$.learner$predict(task))
}
private$pred_to_task(prds, task)
},
.predict_task = function(task) {
on.exit({private$.learner$state = NULL})
private$.learner$state = self$state
prediction = as.data.table(private$.learner$predict(task))
private$pred_to_task(prediction, task)
},
pred_to_task = function(prds, task) {
if (!is.null(prds$truth)) prds[, truth := NULL]
if (!self$param_set$values$resampling.keep_response && self$learner$predict_type == "prob") {
prds[, response := NULL]
}
renaming = setdiff(colnames(prds), c("row_id", "row_ids"))
data.table::setnames(prds, renaming, sprintf("%s.%s", self$id, renaming))
# This can be simplified for mlr3 >= 0.11.0;
# will be always "row_ids"
row_id_col = intersect(colnames(prds), c("row_id", "row_ids"))
data.table::setnames(prds, old = row_id_col, new = task$backend$primary_key)
task$select(character(0))$cbind(prds)
},
.crossval_param_set = NULL,
.learner = NULL,
.additional_phash_input = function() private$.learner$phash
)
)
mlr_pipeops$add("learner_cv", PipeOpLearnerCV_mod, list(R6Class("Learner", public = list(id = "learner_cv", task_type = "classif", param_set = ps()))$new()))