While experimenting with PyTorch for neural network training, we encounter a choice: Should we load data in batches using PyTorch’s DataLoader, or should we input the entire dataset at once directly into the model (No GPU memory issues)? I was thinking that using DataLoader with a batch size equal to the entire dataset should mirror the performance of directly loading the full dataset. However, observations indicate otherwise.
It’s observed that when using DataLoader with maximum batch size, the training performance (e.g. loss) tends to be poorer compared to loading the entire dataset directly. Moreover, this method of using DataLoader also seems to consume more time.
As someone new to PyTorch, I find these differences puzzling. Why does training with DataLoader in batches, even when the batch size is at its maximum, yield worse performance than loading all the data directly into the model?
Very appreciate any assistance with unpacking the intricacies of data loading in PyTorch and seeking explanations for these curious behaviors.
from torch.utils.data import DataLoader, TensorDataset
import torch.optim as optim
import matplotlib.pyplot as plt
# Set the random seed for reproducibility
# Generate synthetic data
x = torch.linspace(-10, 10, 1000).unsqueeze(1) # x data tensor
y = x**2 + torch.randn_like(x) * 10 # y data with noise
class SimpleLinearModel(nn.Module):
super(SimpleLinearModel, self).__init__()
self.fc1 = nn.Linear(1, 10) # First linear layer
self.relu = nn.ReLU() # ReLU activation
self.fc2 = nn.Linear(10, 1) # Second linear layer to map back to output
x = self.relu(self.fc1(x))
def train_model(model, loader, optimizer, epochs=2000):
for epoch in range(epochs):
for x_batch, y_batch in loader:
loss = criterion(output, y_batch)
print("Loader: Loss is {}".format(loss.item()))
model_direct = SimpleLinearModel()
model_loader = SimpleLinearModel()
optimizer_direct = optim.Adam(model_direct.parameters(), lr=0.01)
optimizer_loader = optim.Adam(model_loader.parameters(), lr=0.01)
dataset = TensorDataset(x, y)
full_batch_loader = DataLoader(dataset, batch_size=len(dataset), shuffle=False)
# Train directly using the full dataset
for epoch in range(2000):
optimizer_direct.zero_grad()
outputs = model_direct(x)
loss = nn.MSELoss()(outputs, y)
print("Direct: Time is {}".format(time.time() - time_start))
print("Direct: loss is {}".format(loss.item()))
# Train using the DataLoader
model_loader = train_model(model_loader, full_batch_loader, optimizer_loader)
print("Loader: Time is {}".format(time.time() - time_start))
direct_preds = model_direct(x)
loader_preds = model_loader(x)
plt.figure(figsize=(10, 5))
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), direct_preds.numpy(), color='r')
plt.title('Direct Training')
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), loader_preds.numpy(), color='r')
plt.title('Training with DataLoader')
<code>import torch
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import time
# Set the random seed for reproducibility
torch.manual_seed(0)
# Generate synthetic data
x = torch.linspace(-10, 10, 1000).unsqueeze(1) # x data tensor
y = x**2 + torch.randn_like(x) * 10 # y data with noise
class SimpleLinearModel(nn.Module):
def __init__(self):
super(SimpleLinearModel, self).__init__()
self.fc1 = nn.Linear(1, 10) # First linear layer
self.relu = nn.ReLU() # ReLU activation
self.fc2 = nn.Linear(10, 1) # Second linear layer to map back to output
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
def train_model(model, loader, optimizer, epochs=2000):
criterion = nn.MSELoss()
for epoch in range(epochs):
for x_batch, y_batch in loader:
optimizer.zero_grad()
output = model(x_batch)
loss = criterion(output, y_batch)
loss.backward()
optimizer.step()
print("Loader: Loss is {}".format(loss.item()))
return model
# Model instances
model_direct = SimpleLinearModel()
model_loader = SimpleLinearModel()
# Optimizers
optimizer_direct = optim.Adam(model_direct.parameters(), lr=0.01)
optimizer_loader = optim.Adam(model_loader.parameters(), lr=0.01)
# DataLoader
dataset = TensorDataset(x, y)
full_batch_loader = DataLoader(dataset, batch_size=len(dataset), shuffle=False)
# Train directly using the full dataset
model_direct.train()
time_start = time.time()
for epoch in range(2000):
optimizer_direct.zero_grad()
outputs = model_direct(x)
loss = nn.MSELoss()(outputs, y)
loss.backward()
optimizer_direct.step()
print("Direct: Time is {}".format(time.time() - time_start))
print("Direct: loss is {}".format(loss.item()))
# Train using the DataLoader
model_loader.train()
time_start = time.time()
model_loader = train_model(model_loader, full_batch_loader, optimizer_loader)
print("Loader: Time is {}".format(time.time() - time_start))
# Evaluate and compare
model_direct.eval()
model_loader.eval()
with torch.no_grad():
direct_preds = model_direct(x)
loader_preds = model_loader(x)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), direct_preds.numpy(), color='r')
plt.title('Direct Training')
plt.subplot(1, 2, 2)
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), loader_preds.numpy(), color='r')
plt.title('Training with DataLoader')
plt.show()
</code>
import torch
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import time
# Set the random seed for reproducibility
torch.manual_seed(0)
# Generate synthetic data
x = torch.linspace(-10, 10, 1000).unsqueeze(1) # x data tensor
y = x**2 + torch.randn_like(x) * 10 # y data with noise
class SimpleLinearModel(nn.Module):
def __init__(self):
super(SimpleLinearModel, self).__init__()
self.fc1 = nn.Linear(1, 10) # First linear layer
self.relu = nn.ReLU() # ReLU activation
self.fc2 = nn.Linear(10, 1) # Second linear layer to map back to output
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
def train_model(model, loader, optimizer, epochs=2000):
criterion = nn.MSELoss()
for epoch in range(epochs):
for x_batch, y_batch in loader:
optimizer.zero_grad()
output = model(x_batch)
loss = criterion(output, y_batch)
loss.backward()
optimizer.step()
print("Loader: Loss is {}".format(loss.item()))
return model
# Model instances
model_direct = SimpleLinearModel()
model_loader = SimpleLinearModel()
# Optimizers
optimizer_direct = optim.Adam(model_direct.parameters(), lr=0.01)
optimizer_loader = optim.Adam(model_loader.parameters(), lr=0.01)
# DataLoader
dataset = TensorDataset(x, y)
full_batch_loader = DataLoader(dataset, batch_size=len(dataset), shuffle=False)
# Train directly using the full dataset
model_direct.train()
time_start = time.time()
for epoch in range(2000):
optimizer_direct.zero_grad()
outputs = model_direct(x)
loss = nn.MSELoss()(outputs, y)
loss.backward()
optimizer_direct.step()
print("Direct: Time is {}".format(time.time() - time_start))
print("Direct: loss is {}".format(loss.item()))
# Train using the DataLoader
model_loader.train()
time_start = time.time()
model_loader = train_model(model_loader, full_batch_loader, optimizer_loader)
print("Loader: Time is {}".format(time.time() - time_start))
# Evaluate and compare
model_direct.eval()
model_loader.eval()
with torch.no_grad():
direct_preds = model_direct(x)
loader_preds = model_loader(x)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), direct_preds.numpy(), color='r')
plt.title('Direct Training')
plt.subplot(1, 2, 2)
plt.scatter(x.numpy(), y.numpy(), s=1)
plt.plot(x.numpy(), loader_preds.numpy(), color='r')
plt.title('Training with DataLoader')
plt.show()