My question is very simple.
I have this code
from pycaret.time_series import TSForecastingExperiment
import pandas as pd
# Transformar X_pca em um dataframe Pandas
X_pca_df = pd.DataFrame(X_pca, columns=[f'Component_{i+1}' for i in range(X_pca.shape[1])])
# Converter a variável alvo y para um dataframe Pandas e redefinir os índices
y_df = y.compute().to_frame(name='close').reset_index(drop=True)
# Concatenar X_pca_df com a variável alvo y
data = pd.concat([X_pca_df, y_df], axis=1)
# Configurar o experimento de previsão de séries temporais com o Pycaret
exp = TSForecastingExperiment()
exp.setup(data=data, target='close', session_id=42)
# Executar o experimento
best_model = exp.compare_models()
# Visualizar os resultados
print(best_model)
I would like to know how to speed up the setup and compare time because im dealing with an very big dataset of (3496398, 7).
Im trying do FastICA on rows and FastICA on columns.
If theres something more proper to reduce dimensionality of rows, i would like to know. For now i have reduced the size of the dataset to 20% with an variance reduce of only 26.41095905017702%. Heres my code
import dask.dataframe as dd
from dask_ml.preprocessing import StandardScaler
from sklearn.decomposition import FastICA
import numpy as np
def calcular_numero_registros(desired_percentage, total_samples):
num_registros = int(desired_percentage * total_samples)
return num_registros
# Load your CSV file using Dask
dtypes = {
'open_time': 'int64',
'open': 'float32',
'high': 'float32',
'low': 'float32',
'close': 'float32',
'volume': 'float32',
'close_time': 'int32',
'quote_volume': 'float32',
'count': 'int32',
'taker_buy_volume': 'float32',
'taker_buy_quote_volume': 'float32',
'ignore': 'float64'
}
usecols = ['close_time', 'close', 'high', 'volume', 'quote_volume','count','taker_buy_volume', 'taker_buy_quote_volume'] # carregue apenas as colunas necessárias para a análise
# Carregar dataset usando Dask com dtypes e usecols especificados
print("Leitura de datasets iniciada")
df = dd.read_csv('/content/drive/MyDrive/btcpricetest/binancedata/data/spot/monthly/klines/BTCUSDT/1m/merged.csv',
dtype=dtypes, usecols=usecols)
# Extract the features from your Dask DataFrame
X = df.compute()
# Assuming your target variable (if any) is stored in a column named 'target'
y = X.pop('close')
# Display shape of the dataset
print("Original dataset shape:", X.shape)
# Scale the data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Perform ICA
ica_transformer = FastICA(random_state=42)
X_ica = ica_transformer.fit_transform(X_scaled)
# Calculate variance of each row
row_variances = np.var(X_ica, axis=1)
# Sort row variances in descending order
sorted_variances_indices = np.argsort(row_variances)[::-1]
# Calculate cumulative explained variance ratio
cumulative_variance_ratio = np.cumsum(row_variances[sorted_variances_indices]) / np.sum(row_variances)
# Find the number of samples to keep based on desired percentage of records
total_samples = X.shape[0]
desired_percentage_records = 0.20 # Change this to desired percentage of records
n_samples_95 = calcular_numero_registros(desired_percentage_records, total_samples)
print("Number of samples to keep based on 95% of records:", n_samples_95)
percentage_variance_eliminated = (1 - cumulative_variance_ratio[n_samples_95 - 1]) * 100
print("Percentage of variance eliminated:", percentage_variance_eliminated)
# Select only the required number of rows (samples)
X_ica_selected = X_ica[sorted_variances_indices[:n_samples_95]]
# Display shape of the transformed dataset
print("Transformed dataset shape after ICA:", X_ica_selected.shape)
and
import dask.dataframe as dd
from dask_ml.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import numpy as np
import dask.dataframe as dd
import dask.array as da
import numpy as np
import dask.dataframe as dd
import numpy as np
import dask.dataframe as dd
import numpy as np
# Carregar os dados transformados por ICA diretamente do array NumPy para um dataframe Dask
X_ica_selected_dask = dd.from_array(X_ica_selected, columns=[f'Component_{i+1}' for i in range(X_ica_selected.shape[1])])
# Assuming your target variable (if any) is stored in a column named 'close'
# Acessar a coluna 'close' do dataframe Dask
y = X_ica_selected_dask[X_ica_selected_dask.columns[-1]] # Seleciona a última coluna do dataframe
# Display shape of the dataset
# Se quiser exibir a forma do dataset, você pode fazer isso aqui, se necessário
# Restante do seu código continua aqui...
# Display shape of the dataset
# Se quiser exibir a forma do dataset, você pode fazer isso aqui, se necessário
# Restante do seu código continua aqui...
# Display shape of the dataset
# Se quiser exibir a forma do dataset, você pode fazer isso aqui, se necessário
# Restante do seu código continua aqui...
# Display shape of the dataset
# Se quiser exibir a forma do dataset, você pode fazer isso aqui, se necessário
# Scale the data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X_ica_selected)
# Determine the maximum number of components based on the minimum of samples and features
n_components = min(X_scaled.shape[0], X_scaled.shape[1])
# Perform PCA with the determined number of components
pca_transformer = PCA(n_components=n_components) # Number of components determined automatically
X_pca = pca_transformer.fit_transform(X_scaled)
# Calculate cumulative explained variance ratio
cumulative_variance_ratio = np.cumsum(pca_transformer.explained_variance_ratio_)
# Find the number of components that explain 95% of the variance
n_components_95 = np.argmax(cumulative_variance_ratio >= 0.6666666666666666) + 1
print("Number of components to explain 95% of the variance:", n_components_95)
# Perform PCA with the specified number of components
pca_transformer = PCA(n_components=n_components_95)
X_pca = pca_transformer.fit_transform(X_scaled)
# Display shape of the transformed dataset
print("Transformed dataset shape after PCA:", X_pca.shape)
Daniel Figueiredo is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.