`class CNN(nn.Module):
def init(self):
super(CNN, self).init()
self.conv1 = nn.Conv1d(in_channels=4, out_channels=64, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm1d(num_features=64)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool1d(kernel_size=2)
self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm1d(num_features=128)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool1d(kernel_size=2)
self.conv3 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm1d(num_features=256)
self.relu3 = nn.ReLU()
self.pool3 = nn.MaxPool1d(kernel_size=2)
self.conv4 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm1d(num_features=512)
self.relu4 = nn.ReLU()
self.pool4 = nn.MaxPool1d(kernel_size=2)
self.fc1 = nn.Linear(6144, 512)
self.bp1 = nn.BatchNorm1d(num_features=512)
self.relu5 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(512, 256)
self.bp2 = nn.BatchNorm1d(num_features=256)
self.relu6 = nn.ReLU()
self.dropout2 = nn.Dropout(p=0.2)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.pool4(x)
x = x.view(x.size(0), -1)
#print(x.size())
x = self.fc1(x)
x = self.bp1(x)
x = self.relu5(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.bp2(x)
x = self.relu6(x)
x = self.dropout2(x)
#print(x.size())
out = self.fc3(x)
#print(out.size())
#out = torch.flatten(x,1)
#print(out)
return out`
the loss on each epoch: 0, loss: 22.778826143549775
the loss on each epoch: 10, loss: 18.96111195662926
the loss on each epoch: 20, loss: 17.036733890401905
the loss on each epoch: 30, loss: 17.06982804440904
the loss on each epoch: 40, loss: 17.262248587334295
the loss on each epoch: 50, loss: 17.27236198556834
I want to know how to change the out_channels, and kernel_size.
BXHU is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.