I’m working on an image classification model for the FashionMNIST dataset, and when I’m training the model by enumerating through the trainloader, I get the following error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-bdd8d0119d0d> in <cell line: 6>()
7
8 running_loss = 0.0
----> 9 for i, data in enumerate(trainloader, 0):
10 # get the inputs; data is a list of [inputs, labels]
11 inputs, labels = data
3 frames
/usr/local/lib/python3.10/dist-packages/torch/_utils.py in reraise(self)
703 # instantiate since we don't know how to
704 raise RuntimeError(msg) from None
--> 705 raise exception
706
707
TypeError: Caught TypeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/worker.py", line 308, in _worker_loop
data = fetcher.fetch(index) # type: ignore[possibly-undefined]
File "/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/fetch.py", line 51, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/fetch.py", line 51, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.10/dist-packages/torchvision/datasets/mnist.py", line 146, in __getitem__
img = self.transform(img)
TypeError: 'module' object is not callable
I’m working on google notebooks, if that makes any difference. Here’s the rest of the code:
import matplotlib.pyplot as plt # for plotting
import numpy as np # for transformation
import torch # PyTorch package
import torchvision # load datasets
import torchvision.transforms as transforms # transform data
from torchvision.transforms import v2
import torch.nn as nn # basic building block for neural neteorks
import torch.nn.functional as F # import convolution functions like Relu
import torch.optim as optim # optimzer
# python image libary of range [0, 1]
#transform them to tensors of normalized range[-1, 1]
transform = transforms.Compose( # composing several transforms together
[transforms.ToTensor(), # to tensor object
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5 ,0.5))]) # mean = 0.5, std = 0.5
# set batch_size
batch_size = 4
# set numbers of workers
num_workers = 2
# load train data
trainset = torchvision.datasets.FashionMNIST(root="./data", train=True,
download=True, transform=transforms)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=num_workers)
# load test data
testset = torchvision.datasets.FashionMNIST(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
classes = ('T-shirt/top', 'Trouser', 'Pullover,' 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
class Net(nn.Module):
''' Models a simple Convolutional Neural Network'''
def __init__(self):
''' initialize the network '''
super(Net, self).__init__()
# 3 input image channel, 6 output channels,
# 5x5 square convolution kernel
self.conv1 = nn.Conv2d(3, 6, 5)
# Max pooling over a (2, 2) window
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)# 5x5 from image dimension
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
''' the forward propagation algorithm '''
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
# whatever you are timing goes here
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print('Finished Training')
print(start.elapsed_time(end)) # milliseconds
I researched the error, and I know it has to do with using functions from importing models, but none of the solutions I found works easily with my error, especially since not many had to do with this specific scenario. I expect it to work normally, as I’m using code from a tutorial for the base of the code, and it worked for a bit until it suddenly stopped working, even though I tried recopying the code and nothing happened.
Qwirdle is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.