class MyDataClassification(nn.Module):
def init(self, ):
super(MyDataClassification, self).init()
self.layer_1a = torch.nn.Conv1d(in_channels = ch1, out_channels = 32, kernel_size = 4, stride=1)
self.relu = nn.ReLU()
self.layer_2a = torch.nn.Conv1d(in_channels = 32, out_channels = 16, kernel_size = 3, stride=1)
self.relu = nn.ReLU()
self.layer_3a = torch.nn.Conv1d(in_channels = 16, out_channels = 1, kernel_size = 2, stride=1)
self.relu = nn.ReLU()
self.layer_1b = torch.nn.Conv1d(in_channels = ch2, out_channels = 32, kernel_size = 4, stride=1)
self.relu = nn.ReLU()
self.layer_2b = torch.nn.Conv1d(in_channels = 32, out_channels = 16, kernel_size = 3, stride=1)
self.relu = nn.ReLU()
self.layer_3b = torch.nn.Conv1d(in_channels = 16, out_channels = 1, kernel_size = 2, stride=1)
self.relu = nn.ReLU()
self.layer_3 = nn.Linear(whatever_value_makes_this_work, seq_len)
self.dropout = nn.Dropout(p=0.2)
def forward(self, x1, x2):
x1 = self.layer_1a(x1)
x1 = self.layer_2a(x1)
x1 = self.layer_3a(x1)
x2 = self.layer_1b(x2)
x2 = self.layer_2b(x2)
x2 = self.layer_3b(x2)
x = torch.add(x1, x2)
x = torch.flatten(x,start_dim=2, end_dim=-1)
x = self.layer_3(x)
x = self.layer_4(x)
return x
My input x1 is a tensor of shape [batch_size, ch1 ,seq_len] and x2 is of shape [batch_size, ch2, seq_len]
My target is of shape [batch_size, seq_len]
The output of the above model is [batch_size, no_of_classes, seq_len].
I am using CrossEntropyLoss( ) and the model seems to be training.
But when I print the loss and accuracy using
print(f’Epoch {e+0:03}: | Train Loss: {train_epoch_loss/len(train_loader.dataset):.5f} | Val Loss: {val_epoch_loss/len(val_loader.dataset):.5f} | Train Acc: {train_epoch_acc/len(train_loader.dataset):.3f}| Val Acc: {val_epoch_acc/len(val_loader.dataset):.3f}’)
The values are accuracy extremely high! In thousands or ten thousands!! What is going wrong? Am I making some error that I am not aware of here?