The code of a fully connected NN (I know what is better convolutional, I will do it further), which determines the numbers from the MNIST dataset. When enabled, accuracy does not change at all. What could be the mistake?
<code>max_epochs = 10
accuracy = {"train": [], "valid": []}
for epoch in range(max_epochs):
for k, dataloader in loaders.items():
epoch_correct = 0
epoch_all = 0
for x_batch, y_batch in dataloader:
if k == "train":
model.train()
optimizer.zero_grad()
outp = model(x_batch)
outp = outp[:y_batch.shape[0]] #otherwise, the last batch is less than necessary
else:
model.eval()
with torch.no_grad():
outp = model(x_batch)
outp = outp[:y_batch.shape[0]]
preds = outp.argmax(-1)
correct = (preds == y_batch).sum()
all = tuple(preds.shape)[0]
epoch_correct += correct.item()
epoch_all += all
if k == "train":
loss = criterion(outp, y_batch)
loss.backward()
optimizer.step()
if k == "train":
print(f"Epoch: {epoch+1}")
print(f"Loader: {k}. Accuracy: {epoch_correct/epoch_all}")
accuracy[k].append(epoch_correct/epoch_all)
</code>
<code>max_epochs = 10
accuracy = {"train": [], "valid": []}
for epoch in range(max_epochs):
for k, dataloader in loaders.items():
epoch_correct = 0
epoch_all = 0
for x_batch, y_batch in dataloader:
if k == "train":
model.train()
optimizer.zero_grad()
outp = model(x_batch)
outp = outp[:y_batch.shape[0]] #otherwise, the last batch is less than necessary
else:
model.eval()
with torch.no_grad():
outp = model(x_batch)
outp = outp[:y_batch.shape[0]]
preds = outp.argmax(-1)
correct = (preds == y_batch).sum()
all = tuple(preds.shape)[0]
epoch_correct += correct.item()
epoch_all += all
if k == "train":
loss = criterion(outp, y_batch)
loss.backward()
optimizer.step()
if k == "train":
print(f"Epoch: {epoch+1}")
print(f"Loader: {k}. Accuracy: {epoch_correct/epoch_all}")
accuracy[k].append(epoch_correct/epoch_all)
</code>
max_epochs = 10
accuracy = {"train": [], "valid": []}
for epoch in range(max_epochs):
for k, dataloader in loaders.items():
epoch_correct = 0
epoch_all = 0
for x_batch, y_batch in dataloader:
if k == "train":
model.train()
optimizer.zero_grad()
outp = model(x_batch)
outp = outp[:y_batch.shape[0]] #otherwise, the last batch is less than necessary
else:
model.eval()
with torch.no_grad():
outp = model(x_batch)
outp = outp[:y_batch.shape[0]]
preds = outp.argmax(-1)
correct = (preds == y_batch).sum()
all = tuple(preds.shape)[0]
epoch_correct += correct.item()
epoch_all += all
if k == "train":
loss = criterion(outp, y_batch)
loss.backward()
optimizer.step()
if k == "train":
print(f"Epoch: {epoch+1}")
print(f"Loader: {k}. Accuracy: {epoch_correct/epoch_all}")
accuracy[k].append(epoch_correct/epoch_all)
New contributor
tehnofury is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.
3