I am trying to create xgboost model aqi prediction using PM2.5 and i am splitting my dataset based on timeseries split. I created two models:
- xgboost model with lag features and rolling statistics (provided better MAE and RMSE)
- simple xgboost model (provided better R2 score)
The first question is which model should i prefer?
The second question is regarding “feature importance plotting” of xgboost model with lag features and rolling statistics. It showed some dates too in plotting? Does date can he impact on AQI prediction?
Here is my code:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# Load the CSV file
file = '/content/drive/MyDrive/merged_file.csv'
data = pd.read_csv(file)
# Function to create lag features and rolling statistics
def create_features(df, lags, rolling_window):
df_copy = df.copy()
for lag in lags:
df_copy[f'Raw_Conc_Lag_{lag}'] = df_copy['Raw Conc.'].shift(lag)
df_copy[f'NowCast_Conc_Lag_{lag}'] = df_copy['NowCast Conc.'].shift(lag)
df_copy['Raw_Conc_Rolling_Mean'] = df_copy['Raw Conc.'].shift(1).rolling(rolling_window).mean()
df_copy['NowCast_Conc_Rolling_Mean'] = df_copy['NowCast Conc.'].shift(1).rolling(rolling_window).mean()
df_copy.dropna(inplace=True)
return df_copy
# Apply feature creation
data_features = create_features(data, lags=[1, 2, 3], rolling_window=7)
# One-hot encode categorical variables
data_features_encoded = pd.get_dummies(data_features)
# Select input features (X) and target variable (y)
X = data_features_encoded.drop(columns=['AQI'])
y = data_features_encoded['AQI']
# Initialize TimeSeriesSplit
tscv = TimeSeriesSplit(n_splits=5)
# Function to evaluate model
def evaluate_model(model, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
predictions_train = model.predict(X_train)
predictions_test = model.predict(X_test)
rmse_train = np.sqrt(mean_squared_error(y_train, predictions_train))
mae_train = mean_absolute_error(y_train, predictions_train)
r2_train = r2_score(y_train, predictions_train)
rmse_test = np.sqrt(mean_squared_error(y_test, predictions_test))
mae_test = mean_absolute_error(y_test, predictions_test)
r2_test = r2_score(y_test, predictions_test)
return rmse_train, mae_train, r2_train, rmse_test, mae_test, r2_test
# Initialize lists to store evaluation metrics for default model
metrics_default = []
# Iterate over each time series split for default model
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
xgb_regressor_default = xgb.XGBRegressor()
metrics_default.append(evaluate_model(xgb_regressor_default, X_train, y_train, X_test, y_test))
# Calculate average evaluation metrics across all time series splits for default model
metrics_default = np.array(metrics_default)
avg_metrics_default = metrics_default.mean(axis=0)
print("nAverage Model Evaluation on Training Set (Default Parameters) across all splits:n")
print("Average RMSE:", avg_metrics_default[0])
print("Average MAE:", avg_metrics_default[1])
print("Average R^2 Score:", avg_metrics_default[2])
print("nAverage Model Evaluation on Test Set (Default Parameters) across all splits:n")
print("Average RMSE:", avg_metrics_default[3])
print("Average MAE:", avg_metrics_default[4])
print("Average R^2 Score:", avg_metrics_default[5])
# Define parameter grid for grid search
param_grid = {
'n_estimators': [100, 200, 300],
'max_depth': [3, 5, 7],
'learning_rate': [0.01, 0.1, 0.2]
}
# Initialize lists to store evaluation metrics for tuned model
metrics_tuned = []
# Outer loop for cross-validation
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
xgb_regressor = xgb.XGBRegressor()
grid_search = GridSearchCV(estimator=xgb_regressor, param_grid=param_grid, cv=3, scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)
best_xgb = grid_search.best_estimator_
metrics_tuned.append(evaluate_model(best_xgb, X_train, y_train, X_test, y_test))
# Calculate average evaluation metrics across all splits for tuned model
metrics_tuned = np.array(metrics_tuned)
avg_metrics_tuned = metrics_tuned.mean(axis=0)
print("nAverage Model Evaluation on Training Set (Tuned Parameters) across all splits:n")
print("Average RMSE:", avg_metrics_tuned[0])
print("Average MAE:", avg_metrics_tuned[1])
print("Average R^2 Score:", avg_metrics_tuned[2])
print("nAverage Model Evaluation on Test Set (Tuned Parameters) across all splits:n")
print("Average RMSE:", avg_metrics_tuned[3])
print("Average MAE:", avg_metrics_tuned[4])
print("Average R^2 Score:", avg_metrics_tuned[5])[enter image description here](https://i.sstatic.net/7i38fgeK.png)
I need opinion on using lag features since i did not know enough about it.
New contributor
Urwa Shanza99 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.