kwargs = {'batch_size': 512, 'num_workers': 6, 'persistent_workers': True}
subgraph_loader = NeighborLoader(copy.copy(data), input_nodes=None,
num_neighbors=[-1], shuffle=False, **kwargs)
for _ in range(n_epochs):
model.train()
total_loss = total_correct = total_examples = 0
for batch in train_loader:
optimizer.zero_grad()
y = batch.y[:batch.batch_size]
y_hat = model(batch.x, batch.edge_index)[:batch.batch_size]
loss = F.cross_entropy(y_hat, y)
loss.backward()
optimizer.step()
total_loss += float(loss) * batch.batch_size
total_correct += int((y_hat.argmax(dim=-1) == y).sum())
total_examples += batch.batch_size
model.eval()
val_acc = val_correct = val_examples = 0.
with torch.no_grad():
for batch in val_loader:
y = batch.y[:batch.batch_size]
y_hat = model(batch.x, batch.edge_index)[:batch.batch_size]
val_correct += int((y_hat.argmax(dim=-1) == y).sum())
val_examples += batch.batch_size
for i in range(2):
for batch in subgraph_loader:
pass
If I keep this code below, the model is 83.55% accurate in the final test:
for i in range(2):
for batch in subgraph_loader:
pass
If I remove this code, the model is 77.27% accurate in the final test.
I would like to know what the cause of this phenomenon is.
New contributor
Janson Song is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.