Being a beginner, I am trying to implement my CNN on IRIS dataset with only 2 labels considered:
- Iris-setosa: 0
- Iris-versicolor: 1
I am using 90% data for training and 10% for testing with 1D CNN and BCE Loss and learning rate of 0.001. The accuracy achieved is around 40-50% which is very low but when I try to to compare and print both the (Label, predicted_label), all the predicted labels are 0 and the model is not predicting any of the label as 1, both in training and testing data. Please suggest me where could be the error in training the model on IRIS dataset. Please suggest what should be done.
#Training data
class IrisDataset(T.utils.data.Dataset):
def __init__(self, Iris):
sc = StandardScaler()
X_tr = sc.fit_transform(trainX)
Y_tr = trainY
self.X_tr = torch.tensor(X_tr, dtype = torch.float32)
self.Y_tr = torch.tensor(Y_tr, dtype = torch.float32)
def __len__(self):
return len(self.Y_tr)
def __getitem__(self, idx):
return self.X_tr[idx], self.Y_tr[idx]
train_ds = IrisDataset(Iris)
bat_size = 1
# Leaving only labels 0 and 1
idx = np.append(np.where(train_ds.Y_tr == 0)[0],
np.where(train_ds.Y_tr == 1)[0])
train_ds.X_tr = train_ds.X_tr[idx]
train_ds.Y_tr = train_ds.Y_tr[idx]
#len(train_ds)
train_ldr = T.utils.data.DataLoader(train_ds,
batch_size=bat_size, shuffle=True)
batch = next(iter(train_ldr))
# and in the same way test data
#NETWORK CLASS
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv1d(1, 6, kernel_size=1)
self.conv2 = nn.Conv1d(6, 16, kernel_size=1)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(64, 16)
self.fc2 = nn.Linear(16, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return (x)
# MODEL TRAINING
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.BCEWithLogitsLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for X_tr, Y_tr in train_ldr:
X_tr = X_tr.unsqueeze(0)
optimizer.zero_grad()
output = model(X_tr)
pred = output.argmax(dim=1, keepdim=True)
Y_tr = torch.tensor(Y_tr, dtype=torch.long)
loss = loss_func(output, Y_tr.squeeze(1))
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
I have tried so many times but unable to find, where could be the error