Precision, Acuuracy not increasing on binary Timeseries classifer

Hi,

I am unable to increase the precision and recall on my time-series classifier in below code.
Can someone please help to let me know what can be done further to improve the precision and recall.

class lstmfcn(nn.Module):
	def __init__(self, input_shape, size):
		super(lstmfcn, self).__init__()
		self.lstm = nn.LSTM(input_size=input_shape[1], hidden_size=size, batch_first=True)
		self.conv1d_1 = nn.Conv1d(input_shape[0], size, kernel_size=8, padding='same')
		self.batch_norm_1 = nn.BatchNorm1d(size)
		self.activation_1 = nn.ReLU()
		self.conv1d_2 = nn.Conv1d(size, size*2, kernel_size=5, padding='same')
		self.batch_norm_2 = nn.BatchNorm1d(size*2)
		self.activation_2 = nn.ReLU()
		self.conv1d_3 = nn.Conv1d(size*2, size, kernel_size=3, padding='same')
		self.batch_norm_3 = nn.BatchNorm1d(size)
		self.activation_3 = nn.ReLU()
		self.global_avg_pooling_1d = nn.AdaptiveAvgPool1d(1)
		self.fc = nn.Linear(size*2, 1)
		self.sigmoid = nn.Sigmoid()

	def forward(self, x):
		h_lstm, _ = self.lstm(x)
		y = self.conv1d_1(x)
		y = self.batch_norm_1(y)
		y = self.activation_1(y)
		y = self.conv1d_2(y)
		y = self.batch_norm_2(y)
		y = self.activation_2(y)
		y = self.conv1d_3(y)
		y = self.batch_norm_3(y)
		y = self.activation_3(y)
		y = self.global_avg_pooling_1d(y)
		x = torch.cat((h_lstm[:, -1], y.squeeze(dim=2)), dim=1)
		x = self.fc(x)
		x = self.sigmoid(x)
		return x

Has already done scaling and resampling.
Increasing the size is not helping.

Based on your model definition it seems you are using torch.sigmoid + nn.BCELoss.
Could you remove the torch.sigmoid usage and use nn.BCEWithLogitsLoss instead for a better numerical stability and check if this would help the model training?

Hi,
I already tried that, the model loss decreases but the precision and recall remains the same.

After adding dropout, the minimum val_loss has decreased, but not by much.