I have the following model. It is training well. The shapes of my splits are:
- X_train (98, 1, 40, 844)
- X_val (21, 1, 40, 844)
- X_test (21, 1, 40, 844)
However, I am getting the following error at x = F.relu(self.fc1(x))
in forward
. When I attempt to interpret the model on the validation set.
<code># Create a DataLoader for the validation set
valid_dl = learn.dls.test_dl(X_val, y_val)
# Get predictions and interpret them on the validation set
interp = ClassificationInterpretation.from_learner(learn, dl=valid_dl)
</code>
<code># Create a DataLoader for the validation set
valid_dl = learn.dls.test_dl(X_val, y_val)
# Get predictions and interpret them on the validation set
interp = ClassificationInterpretation.from_learner(learn, dl=valid_dl)
</code>
# Create a DataLoader for the validation set
valid_dl = learn.dls.test_dl(X_val, y_val)
# Get predictions and interpret them on the validation set
interp = ClassificationInterpretation.from_learner(learn, dl=valid_dl)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x2110 and 67520x128)
I have checked dozens of similar questions but I am unable to find a solution.
<code>class DraftCNN(nn.Module):
def __init__(self):
super(AudioCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
# Calculate flattened size based on input dimensions
with torch.no_grad():
dummy_input = torch.zeros(1, 1, 40, 844) # shape of one input sample
dummy_output = self.pool(self.conv2(self.pool(F.relu(self.conv1(dummy_input)))))
self.flattened_size = dummy_output.view(dummy_output.size(0), -1).size(1)
self.fc1 = nn.Linear(self.flattened_size, 128)
self.fc2 = nn.Linear(128, 4)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1) # Flatten the output of convolutions
x = F.relu(self.fc1(x))
</code>
<code>class DraftCNN(nn.Module):
def __init__(self):
super(AudioCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
# Calculate flattened size based on input dimensions
with torch.no_grad():
dummy_input = torch.zeros(1, 1, 40, 844) # shape of one input sample
dummy_output = self.pool(self.conv2(self.pool(F.relu(self.conv1(dummy_input)))))
self.flattened_size = dummy_output.view(dummy_output.size(0), -1).size(1)
self.fc1 = nn.Linear(self.flattened_size, 128)
self.fc2 = nn.Linear(128, 4)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1) # Flatten the output of convolutions
x = F.relu(self.fc1(x))
</code>
class DraftCNN(nn.Module):
def __init__(self):
super(AudioCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
# Calculate flattened size based on input dimensions
with torch.no_grad():
dummy_input = torch.zeros(1, 1, 40, 844) # shape of one input sample
dummy_output = self.pool(self.conv2(self.pool(F.relu(self.conv1(dummy_input)))))
self.flattened_size = dummy_output.view(dummy_output.size(0), -1).size(1)
self.fc1 = nn.Linear(self.flattened_size, 128)
self.fc2 = nn.Linear(128, 4)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1) # Flatten the output of convolutions
x = F.relu(self.fc1(x))
<code> x = self.fc2(x)
return x
</code>
<code> x = self.fc2(x)
return x
</code>
x = self.fc2(x)
return x
I tried changing the forward function and the shapes of the layers but I keep getting the same error.