Hello all,

I am trying to make a CNN model for Encoder Parameter Suggestion for Audio data

Input: Audio File(Length : 480000 samples)

Output: Parameter(Length : 469, eg :[0001111222001233312000] )

I am getting very low Validation and Test Accuracy

```
class AudioClassifier(nn.Module):
def __init__(self):
super(AudioClassifier, self).__init__()
self.conv11 = nn.Conv1d(in_channels=1, out_channels=16, kernel_size=Kernel_size_conv, stride=1,
bias=True, padding=int((Kernel_size_conv - 1) / 2))
self.conv12 = nn.Conv1d(in_channels=16, out_channels=16, kernel_size=Kernel_size_conv, stride=1,
bias=True, padding=int((Kernel_size_conv - 1) / 2))
self.conv13 = nn.Conv1d(in_channels=16, out_channels=4, kernel_size=Kernel_size_conv,
stride=1024, bias=True)
def forward(self, X):
out1 = torch.tanh(self.conv11(X))
out1 = torch.tanh(self.conv12(out1))
out1 = (self.conv13(out1))
return out1
```

optimizer = torch.optim.Adam(model.parameters(),lr=1e-2, weight_decay=1e-5)

loss_fn = nn.CrossEntropyLoss()

```
for epoch in range(E):
for batch in range(B)
pred_1= model(training_data)
loss = loss_fn(pred_1, Target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
```

Can Anybody suggest any other approach to rectify it.