When I run my code, which performs feature selection and hyperparameter tuning on stacked learners (glmnet and rpart), I receive the following error message:
Error in assert_binary(truth, prob = prob, positive = positive, na_value = na_value) :
Assertion on 'prob' failed: Contains missing values (element 1).
This happened PipeOp classif.avg's $train()
I ran my code (see below) first by replacing level_0_and_1_graph_learner
with learner_glmnet
, and then with learner_rpart
. In both cases, there were no NAs in the predictions using these commands:
test <- as.data.table(stacking$prediction())
which(is.na(test))
I ran my code by replacing learner_glmnet_cv
and learner_rpart_cv
with learner_debug_1_cv
and learner_debug_2_cv
, respectively, and I get the same error message. I used these commands:
learner_debug <- lrn("classif.debug", x = to_tune(0, 1))
learner_debug$predict_type <- "prob"
learner_debug_1_cv <- PipeOpLearnerCV_mod$new(learner = learner_debug, id = "learner_debug_1_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
learner_debug_2_cv <- PipeOpLearnerCV_mod$new(learner = learner_debug, id = "learner_debug_2_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
Here are my data: https://www.dropbox.com/scl/fi/hkjs79i89gjz0j5mjlbj8/Data.csv?rlkey=08yuzet3mjr9gcezkryo93vqm&st=hfv2cbeo&dl=0
Here is my code:
set.seed(1)
source("C:/Users/Marine/PipeOpLearnerCV_mod.R")
data <- read.csv("C:/Users/Marine/Downloads/Data.csv")
data$presence <- as.factor(data$presence)
data$L3_ID <- as.factor(data$L3_ID)
data$L2_ID <- as.factor(data$L2_ID)
data$L1_ID <- as.factor(data$L1_ID)
data$year <- as.factor(data$year)
## summary(data)
data <- data[, !((names(data) %in% c("L1_ID", "L2_ID", "L3_ID", "year")))]
task <- mlr3spatial::as_task_classif_st(x = data, target = "presence", positive = "1", coordinate_names = c("x", "y"), crs = "+proj=longlat +datum=WGS84 +no_defs +type=crs")
summary(task)
learner_glmnet <- mlr3tuningspaces::lts(mlr3::lrn("classif.glmnet", predict_type = "prob"))
learner_rpart <- mlr3tuningspaces::lts(mlr3::lrn("classif.rpart", predict_type = "prob"))
learner_glmnet_cv <- PipeOpLearnerCV_mod$new(learner = learner_glmnet, id = "learner_glmnet_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
learner_rpart_cv <- PipeOpLearnerCV_mod$new(learner = learner_rpart, id = "learner_rpart_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
learner_debug <- lrn("classif.debug", x = to_tune(0, 1))
learner_debug$predict_type <- "prob"
learner_debug_1_cv <- PipeOpLearnerCV_mod$new(learner = learner_debug, id = "learner_debug_1_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
learner_debug_2_cv <- PipeOpLearnerCV_mod$new(learner = learner_debug, id = "learner_debug_2_cv", param_vals = list(resampling.method = "spcv_coords", resampling.folds = 2))
learner_avg <- mlr3pipelines::LearnerClassifAvg$new(id = "classif.avg")
learner_avg$predict_type <- "prob"
learner_avg$param_set$values$measure <- "classif.auc"
## level_0_graph <- mlr3pipelines::gunion(list(learner_glmnet_cv, learner_rpart_cv)) %>>% mlr3pipelines::po("featureunion")
level_0_graph <- mlr3pipelines::gunion(list(learner_debug_1_cv, learner_debug_2_cv)) %>>% mlr3pipelines::po("featureunion")
level_0_and_1_graph <- level_0_graph %>>% learner_avg
level_0_and_1_graph_learner <- mlr3::as_learner(level_0_and_1_graph)
tuning <- mlr3tuning::auto_tuner(tuner = mlr3tuning::tnr("mbo"),
learner = learner_debug,
resampling = mlr3::rsmp("spcv_coords", folds = 2),
measure = mlr3::msr("classif.auc"),
terminator = mlr3tuning::trm("evals", n_evals = 1, k = 0))
feature_selection <- mlr3fselect::auto_fselector(fselector = mlr3fselect::fs("sequential", strategy = "sfs", min_features = 2),
learner = tuning,
resampling = mlr3::rsmp("spcv_coords", folds = 2),
measure = mlr3::msr("classif.auc"),
terminator = mlr3tuning::trm("evals", n_evals = 1, k = 0))
stacking <- mlr3::resample(task = task,
learner = feature_selection,
resampling = mlr3::rsmp("spcv_coords", folds = 2),
store_models = TRUE)
test <- as.data.table(stacking$prediction())
which(is.na(test))
Here is the function PipeOpLearnerCV_mod.R to run my code:
PipeOpLearnerCV_mod = R6Class("PipeOpLearnerCV_mod",
inherit = PipeOpTaskPreproc,
public = list(
initialize = function(learner, id, param_vals = list()) {
private$.learner = as_learner(learner, clone = TRUE)
if (mlr3pipelines:::paradox_info$is_old) {
private$.learner$param_set$set_id = ""
}
########################################################################
## My edits
id = ifelse(is.null(id), private$.learner$id, id)
## id = id %??% private$.learner$id
# FIXME: can be changed when mlr-org/mlr3#470 has an answer
########################################
type = private$.learner$task_type
task_type = mlr_reflections$task_types[type, mult = "first"]$task
########################################################################
## My edits
private$.crossval_param_set = ps(
method = p_fct(levels = c("cv", "insample", "spcv_coords"), tags = c("train", "required")),
folds = p_int(lower = 2L, upper = Inf, tags = c("train", "required")),
keep_response = p_lgl(tags = c("train", "required"))
)
########################################
private$.crossval_param_set$values = list(method = "cv", folds = 3, keep_response = FALSE)
if (mlr3pipelines:::paradox_info$is_old) {
private$.crossval_param_set$set_id = "resampling"
}
# Dependencies in paradox have been broken from the start and this is known since at least a year:
# https://github.com/mlr-org/paradox/issues/216
# The following would make it _impossible_ to set "method" to "insample", because then "folds"
# is both _required_ (required tag above) and at the same time must be unset (because of this
# dependency). We will opt for the least annoying behaviour here and just not use dependencies
# in PipeOp ParamSets.
# private$.crossval_param_set$add_dep("folds", "method", CondEqual$new("cv")) # don't do this.
super$initialize(id, alist(resampling = private$.crossval_param_set, private$.learner$param_set), param_vals = param_vals, can_subset_cols = TRUE, task_type = task_type, tags = c("learner", "ensemble"))
}
),
active = list(
learner = function(val) {
if (!missing(val)) {
if (!identical(val, private$.learner)) {
stop("$learner is read-only.")
}
}
private$.learner
},
learner_model = function(val) {
if (!missing(val)) {
if (!identical(val, private$.learner)) {
stop("$learner_model is read-only.")
}
}
if (is.null(self$state) || is_noop(self$state)) {
private$.learner
} else {
multiplicity_recurse(self$state, clone_with_state, learner = private$.learner)
}
},
predict_type = function(val) {
if (!missing(val)) {
assert_subset(val, names(mlr_reflections$learner_predict_types[[private$.learner$task_type]]))
private$.learner$predict_type = val
}
private$.learner$predict_type
}
),
private = list(
.train_task = function(task) {
on.exit({private$.learner$state = NULL})
# Train a learner for predicting
self$state = private$.learner$train(task)$state
pv = private$.crossval_param_set$values
# Compute CV Predictions
if (pv$method != "insample") {
rdesc = mlr_resamplings$get(pv$method)
if (pv$method == "cv") rdesc$param_set$values = list(folds = pv$folds)
########################################################################
## My edits
if (pv$method == "spcv_coords") rdesc$param_set$values = list(folds = pv$folds)
########################################################################
rr = resample(task, private$.learner, rdesc)
prds = as.data.table(rr$prediction(predict_sets = "test"))
} else {
prds = as.data.table(private$.learner$predict(task))
}
private$pred_to_task(prds, task)
},
.predict_task = function(task) {
on.exit({private$.learner$state = NULL})
private$.learner$state = self$state
prediction = as.data.table(private$.learner$predict(task))
private$pred_to_task(prediction, task)
},
pred_to_task = function(prds, task) {
if (!is.null(prds$truth)) prds[, truth := NULL]
if (!self$param_set$values$resampling.keep_response && self$learner$predict_type == "prob") {
prds[, response := NULL]
}
renaming = setdiff(colnames(prds), c("row_id", "row_ids"))
data.table::setnames(prds, renaming, sprintf("%s.%s", self$id, renaming))
# This can be simplified for mlr3 >= 0.11.0;
# will be always "row_ids"
row_id_col = intersect(colnames(prds), c("row_id", "row_ids"))
data.table::setnames(prds, old = row_id_col, new = task$backend$primary_key)
task$select(character(0))$cbind(prds)
},
.crossval_param_set = NULL,
.learner = NULL,
.additional_phash_input = function() private$.learner$phash
)
)
mlr_pipeops$add("learner_cv", PipeOpLearnerCV_mod, list(R6Class("Learner", public = list(id = "learner_cv", task_type = "classif", param_set = ps()))$new()))