Hello, maybe it’s easy but it is very confusing to me.
So doing binary classification with BCEWithlogitsloss.
this is my Model:
class BreastCancerModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1=nn.Conv2d(3,6,3,1)
self.conv2=nn.Conv2d(6,10,3,1)
self.conv3=nn.Conv2d(10,13,3,1)
self.fc1=nn.Linear(13*4*4,84)
self.fc2=nn.Linear(84,10)
self.fc3=nn.Linear(10,1)
def forward(self,X):
X=X.view(-1,3,50,50)
X=F.relu(self.conv1(X))
X=F.max_pool2d(X,2,2)
X=F.relu(self.conv2(X))
X=F.max_pool2d(X,2,2)
X=F.relu(self.conv3(X))
X=F.max_pool2d(X,2,2)
X=X.view(-1,13*4*4)
X=F.relu(self.fc1(X))
X=F.relu(self.fc2(X))
X=self.fc3(X)
return X
This is Training loop :
def train(optim,criterion,num_epochs):
train_correct=[]
train_losses=[]
test_correct=[]
test_losses=[]
for i in range(num_epochs):
trn_corr=0
tst_corr=0
for b,(X_train,y_train) in enumerate(train_loader):
b+=1
y_pred=model(X_train)
print('ypred')
print(y_pred)
loss=criterion(y_pred,y_train.unsqueeze(1).float())
print('ytrain')
print(y_train.unsqueeze(1))
#get the number of correct predictions
predicted = torch.round((y_pred.data)[1])
print('predicted')
print(predicted)
batch_corr = (predicted == y_train).sum()
trn_corr += batch_corr
#update Parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if b%1000 == 0:
print(f'epoch: {i:2}/{num_epochs} loss: {loss.item():10.8f} \
accuracy: {trn_corr.item()*100/(64*b):7.3f}%')
train_losses.append(loss)
train_correct.append(trn_corr)
with torch.no_grad():
for b,(X_test,y_test) in enumerate(val_loader):
y_val=torch.sigmoid(model(X_test))
loss=criterion(y_val,y_test.unsqueeze(1).float())
predicted=round(y_val.data)[1]
tst_corr+=(predicted==y_test).sum()
test_losses.append(loss)
test_correct.append(tst_corr)
This is THe output of first batch and its very confusing to me. Should i be doing training loop differently ?
ypred
tensor([[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236],
[0.0236]], grad_fn=<AddmmBackward>)
ytrain
tensor([[1.],
[0.],
[1.],
[1.],
[0.],
[1.],
[1.],
[0.],
[0.],
[1.],
[0.],
[0.],
[1.],
[0.],
[1.],
[0.],
[1.],
[0.],
[1.],
[1.],
[0.],
[0.],
[1.],
[1.],
[0.],
[0.],
[1.],
[1.],
[0.],
[0.],
[1.],
[0.],
[1.],
[0.],
[0.],
[0.],
[1.],
[1.],
[0.],
[0.],
[0.],
[0.],
[1.],
[0.],
[0.],
[0.],
[0.],
[0.],
[1.],
[1.],
[0.],
[1.],
[0.],
[0.],
[1.],
[1.],
[1.],
[1.],
[1.],
[0.],
[1.],
[0.],
[1.],
[1.]])
predicted
tensor([0.]
What should i change in training loop for accuracy to be correct?
Now my accuracy is 50 %.