As in jupyter notebook shows, I have run r.run("split")
and suppose the data processing started and returned new df that I wanted to use in training. However, when I run r.run("train")
, I am facing with mlflow.exceptions.MlflowException: Error has occurred during training of AutoML model using FLAML: AssertionError('Input data must not be empty.')
error. Below you can see my yaml files
local.yaml
experiment:
name: "test_food"
tracking_uri: "sqlite:///metadata/mlflow/mlruns.db"
artifact_location: "./metadata/mlflow/mlartifacts"
model_registry:
model_name: "random-forest"
INGEST_CONFIG:
using: "csv"
location: "./data/data.csv"
loader_method: "load_file_as_dataframe"
recipe.yaml
recipe: "classification/v1"
target_col: "target"
positive_class: "1"
primary_metric: "f1_score"
steps:
ingest: {{INGEST_CONFIG}}
split:
using: split_ratios
split_ratios: [0.75, 0.125, 0.125]
post_split_filter_method: create_dataset_filter
transform:
using: "custom"
transformer_method: transformer_fn
train:
using: "automl/flaml"
time_budget_secs: 3000
predict_scores_for_all_classes: True
predict_prefix: "predicted_"
evaluate:
validation_criteria:
- metric: f1_score
threshold: 0.9
register:
allow_non_validated_model: false
split.py
"""
This module defines the following routines used by the 'split' step:
- ``create_dataset_filter``: Defines customizable logic for filtering the training,
datasets produced by the data splitting procedure. Note that arbitrary transformations
should go into the transform step.
"""
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
import ast
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
def create_dataset_filter(dataset: DataFrame) -> Series:
"""
Mark rows of the split datasets to be additionally filtered. This function will be called on
the training datasets.
:param dataset: The {train,validation,test} dataset produced by the data splitting procedure.
:return: A Series indicating whether each row should be filtered
"""
# Step 1: Process the dataset
processed_data = start_preprocessing(dataset)
# Step 2: Check for NA values and log a warning if found
print(processed_data.isna().any())
if processed_data.empty:
print("Warning: Processed data is empty.")
return Series(False, index=dataset.index) # Return False for all rows if processed data is empty
# Step 3: Create a filtering Series based on your conditions
# Example: Keep rows that are not null in a specific column (e.g., 'target')
filter_condition = processed_data['target'].notna() # Adjust this based on your target column or filtering criteria
# Optional: Log the number of rows being filtered
print(f"Filtered rows: {filter_condition.sum()} out of {len(dataset)}")
return filter_condition
def fill_null_values_with_average_values(df: pd.DataFrame) -> pd.DataFrame:
"""
This method identifies null values in specific nutritional columns
and fills them with the average of their respective categories.
"""
# Specify the columns to check for null values
columns_to_fix_nulls = [
'nutritional_saturated_fat_100g',
'nutritional_carbohydrates_100g',
'nutritional_fat_100g',
'nutritional_sugars_100g',
'nutritional_proteins_100g',
'nutritional_fiber_100g',
'nutritional_energy_100g',
'nutritional_salt_100g'
]
for col in tqdm(columns_to_fix_nulls):
category_means = df.groupby('category')[col].mean().fillna(0)
df[col] = df.apply(
lambda row: category_means[row['category']] if pd.isnull(row[col]) else row[col],
axis=1
)
return df
def extract_number_of_ingredients_from_string(datum) -> int:
return len(ast.literal_eval(datum))
def convert_string_to_list_size(df: DataFrame) -> DataFrame:
convert = lambda x: extract_number_of_ingredients_from_string(x)
df['ingredients_ordered'] = df['ingredients_ordered'].apply(convert)
return df
def encode_category(df: DataFrame) -> DataFrame:
le = LabelEncoder()
df['category'] = le.fit_transform(df['category'])
return df
def start_preprocessing(df: DataFrame) -> DataFrame:
df_no_null = fill_null_values_with_average_values(df)
df_ingridients_list = convert_string_to_list_size(df_no_null)
df_encoded = encode_category(df_ingridients_list)
return df_encoded.drop(columns=['id', 'category', 'is_liquid', 'nutritional_saturated_fat_100g', 'nutritional_fat_100g', 'nutritional_fiber_100g', 'nutritional_salt_100g'])
train.py
"""
This module defines the following routines used by the 'train' step:
- ``estimator_fn``: Defines the customizable estimator type and parameters that are used
during training to produce a model recipe.
"""
from typing import Dict, Any
from sklearn.ensemble import RandomForestClassifier
def estimator_fn(estimator_params: Dict[str, Any] = None) -> Any:
"""
Returns an *unfitted* estimator that defines ``fit()`` and ``predict()`` methods.
The estimator's input and output signatures should be compatible with scikit-learn
estimators.
"""
#
# FIXME::OPTIONAL: return a scikit-learn-compatible classification estimator with fine-tuned
# hyperparameters.
if estimator_params is None:
estimator_params = {
'n_estimators': 100,
'max_depth': None,
'class_weight': 'balanced',
'random_state': 42,
}
return RandomForestClassifier(**estimator_params)
Here is the error that I am facing
Run MLflow Recipe step: train
2024/09/25 13:40:44 INFO mlflow.recipes.step: Running step train...
2024/09/25 13:40:44 INFO mlflow.recipes.steps.train: Detected class imbalance: minority class percentage is 0.00
2024/09/25 13:40:44 INFO mlflow.recipes.steps.train: After downsampling: minority class percentage is 0.30
2024/09/25 13:40:45 WARNING mlflow.recipes.steps.automl.flaml: Input data must not be empty.
Traceback (most recent call last):
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 166, in _create_model_automl
automl.fit(X, y, **automl_settings)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/flaml/automl/automl.py", line 1712, in fit
task.validate_data(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/flaml/automl/task/generic_task.py", line 125, in validate_data
assert X_train_all.size != 0 and y_train_all.size != 0, "Input data must not be empty."
AssertionError: Input data must not be empty.
Stack (most recent call last):
File "<string>", line 1, in <module>
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/step.py", line 132, in run
self.step_card = self._run(output_directory=output_directory)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 369, in _run
estimator = self._resolve_estimator(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 702, in _resolve_estimator
return self._resolve_estimator_plugin(using_plugin, X_train, y_train, output_directory)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 677, in _resolve_estimator_plugin
estimator, best_parameters = estimator_fn(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 52, in get_estimator_and_best_params
return _create_model_automl(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 176, in _create_model_automl
_logger.warning(e, exc_info=e, stack_info=True)
Traceback (most recent call last):
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 166, in _create_model_automl
automl.fit(X, y, **automl_settings)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/flaml/automl/automl.py", line 1712, in fit
task.validate_data(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/flaml/automl/task/generic_task.py", line 125, in validate_data
assert X_train_all.size != 0 and y_train_all.size != 0, "Input data must not be empty."
AssertionError: Input data must not be empty.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/step.py", line 132, in run
self.step_card = self._run(output_directory=output_directory)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 369, in _run
estimator = self._resolve_estimator(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 702, in _resolve_estimator
return self._resolve_estimator_plugin(using_plugin, X_train, y_train, output_directory)
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/train.py", line 677, in _resolve_estimator_plugin
estimator, best_parameters = estimator_fn(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 52, in get_estimator_and_best_params
return _create_model_automl(
File "/home/serdarakyol/.pyenv/versions/recipe/lib/python3.9/site-packages/mlflow/recipes/steps/automl/flaml.py", line 177, in _create_model_automl
raise MlflowException(
mlflow.exceptions.MlflowException: Error has occurred during training of AutoML model using FLAML: AssertionError('Input data must not be empty.')
make: *** [Makefile:40: steps/train/outputs/model] Error 1
Additionally, when I tested my dataset with r.get_artifact("training_data").isnull().any()
line, I see there is no null value.
Can anyone help me in this case?
Serdar Akyol is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.