'int' object is not callable during traning

I have converted my images to tensors and use a batch size of 1
print((data.shape))
print(target.shape)
print(target)

outputOf Above code{
torch.Size([1, 3, 128, 128])
()
0
}
#WHY TARGET SIZE IS EMPTY
#USED torch.tensor(label) to convery ndArray to tensor

(TRIED OTHER answers but nothing works)

n_epochs = 10

valid_loss_min = np.Inf # track change in validation loss
model.train()
for epoch in range(1, n_epochs+1):

     train_loss = 0.0                

for i in range((images.shape)[0]):

    data=images[i]
    target=labels[i]
        # clear the gradients of all optimized variables
    optimizer.zero_grad()
        # forward pass: compute predicted outputs by passing inputs to the model
    output = model(data)

ERROR------
TypeError Traceback (most recent call last)

in ()
25 output = model(data)
26 # calculate the batch loss
—> 27 loss = criterion(output, target)
28 # backward pass: compute gradient of the loss with respect to model parameters
29 loss.backward()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
902 def forward(self, input, target):
903 return F.cross_entropy(input, target, weight=self.weight,
–> 904 ignore_index=self.ignore_index, reduction=self.reduction)
905
906

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
1968 if size_average is not None or reduce is not None:
1969 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 1970 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
1971
1972

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1784 raise ValueError(‘Expected 2 or more dimensions (got {})’.format(dim))
1785
-> 1786 if input.size(0) != target.size(0):
1787 raise ValueError(‘Expected input batch_size ({}) to match target batch_size ({}).’
1788 .format(input.size(0), target.size(0)))

TypeError: ‘int’ object is not callable
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
print(“ALL ABOUT LOSS--------”,train_loss,"----------/n")

MY MODEL
from torch import nn
from torch.nn import functional as F
class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = nn.Conv2d(3, 12, 5, padding=4)
#self.conv2 = nn.Conv2d(32, 96, 5)
# max pooling layer
self.pool = nn.MaxPool2d(4, 4)

    #self.fc1 = nn.Linear(31*31*96, 10)
    self.fc1 = nn.Linear(13068, 10)
    
    # linear layer (500 -> 10)
    #self.fc2 = nn.Linear(500, 10)
    # dropout layer (p=0.25)
    self.dropout = nn.Dropout(0.25)

def forward(self, x):
    # add sequence of convolutional and max pooling layers
    x = self.pool(F.relu(self.conv1(x)))
    #x = self.pool(F.relu(self.conv2(x)))
    # flatten image input
    #ash=x.shape
    x = x.view(-1,13068)
    # add dropout layer
    x = self.dropout(x)
    # add 1st hidden layer, with relu activation function
    x = F.relu(self.fc1(x))
    # add dropout layer
    #x = self.dropout(x)
    # add 2nd hidden layer, with relu activation function
    #x = (self.fc2(x))
    return x

model = Net()
model = model.double()

Could you try to add a dimension to your target using target = target.unsqueeze(0) before passing it to the criterion? Let me know, if that works.

label=torch.Tensor(labels)
n_epochs = 10

valid_loss_min = np.Inf # track change in validation loss
model.train()
for epoch in range(1, n_epochs+1):
train_loss = 0.0
for i in range((images.shape)[0]):

      data=images[i]
      target=label[i]
      target = target.unsqueeze(0)
        # clear the gradients of all optimized variables
      optimizer.zero_grad()
        # forward pass: compute predicted outputs by passing inputs to the model
      output = model(data)
        # calculate the batch loss
      loss = criterion(output, target)
        # backward pass: compute gradient of the loss with respect to model parameters
      loss.backward()
        # perform a single optimization step (parameter update)
      optimizer.step()
        # update training loss
      train_loss += loss.item()*data.size(0)
      print("ALL ABOUT LOSS--------",train_loss,"----------/n")

RuntimeError Traceback (most recent call last)

in ()
15 output = model(data)
16 # calculate the batch loss
—> 17 loss = criterion(output, target)
18 # backward pass: compute gradient of the loss with respect to model parameters
19 loss.backward()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
902 def forward(self, input, target):
903 return F.cross_entropy(input, target, weight=self.weight,
–> 904 ignore_index=self.ignore_index, reduction=self.reduction)
905
906

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
1968 if size_average is not None or reduce is not None:
1969 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 1970 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
1971
1972

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1788 .format(input.size(0), target.size(0)))
1789 if dim == 2:
-> 1790 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
1791 elif dim == 4:
1792 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

RuntimeError: Expected object of scalar type Long but got scalar type Double for argument #2 ‘target’


AND WHEN i convert it to type long using label.long() so it giving error–

TypeError Traceback (most recent call last)

in ()
9
10 data=images[i]
—> 11 target=label[i]
12 target = target.unsqueeze(0)
13 # clear the gradients of all optimized variables

TypeError: ‘builtin_function_or_method’ object is not subscriptable

Did you assigned label to a buildin method accidentally?
E.g. label = label.long (Note the missing parentheses)?
If you can’t find a similar line of code, could you post the output of print(label)?

This is what i have done…during filtering
previously i have done label = label.long…but when i removed it i got the error
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.DoubleTensor) should be the same

flower_images=np.zeros((210,128,128,3))

for i in flower[‘file’].index:
pl=cv2.imread(“flower_images/”+files[i])
rgb_flower_image = cv2.cvtColor(pl, cv2.COLOR_BGR2RGB)
flower_images[i]=rgb_flower_image

plt.figure(figsize=(3,3))
plt.imshow(flower_images[2])
plt.show()
#convert images to tensors
images=torch.Tensor(flower_images)
images=images.view(210,1,3,128,128)
img=images.double()
label=torch.Tensor(labels)

I am posting my code-----plz help

I assume your model’s parameters are torch.FloatTensors while your input is a torch.DoubleTensor.
Both dtypes should be the same, so you would have to use img = images.float() or model = model.double().
Could you try that and let me know, if this works for you?

PS: You can post code using three backticks `
This will make sure that your code can be searched in this forum and is usually easier to read and copy. :wink:

Got Error
After Doing— model.double()
Expected object of scalar type Long but got scalar type Double for argument #2 ‘target’

my label values
print(label)–
tensor([0., 0., 2., 0., 0., 1., 6., 0., 0., 0., 0., 0., 0., 7., 7., 1., 0., 0.,
6., 0., 2., 4., 7., 4., 5., 6., 2., 5., 6., 6., 3., 6., 5., 0., 3., 8.,
5., 9., 2., 8., 9., 1., 7., 3., 1., 4., 7., 3., 8., 1., 3., 4., 7., 9.,
3., 6., 5., 8., 6., 8., 2., 1., 7., 8., 0., 5., 6., 3., 6., 4., 9., 7.,
9., 1., 5., 3., 6., 6., 8., 3., 1., 4., 3., 9., 8., 5., 2., 4., 6., 4.,
7., 1., 5., 2., 1., 5., 8., 5., 8., 3., 1., 2., 4., 5., 1., 2., 8., 3.,
8., 3., 5., 4., 2., 9., 5., 0., 8., 6., 0., 8., 5., 2., 4., 5., 8., 3.,
2., 0., 8., 6., 9., 2., 8., 4., 5., 8., 0., 6., 2., 4., 9., 4., 5., 5.,
2., 7., 8., 4., 9., 3., 2., 4., 7., 5., 9., 3., 1., 8., 1., 3., 6., 9.,
1., 2., 8., 2., 7., 9., 9., 5., 9., 8., 3., 9., 8., 5., 1., 4., 2., 7.,
0., 5., 8., 6., 3., 9., 6., 1., 3., 7., 4., 7., 1., 9., 8., 3., 6., 5.,
6., 4., 1., 3., 8., 5., 4., 6., 0., 4., 6., 1.])

nn.CrossEntropyLoss expects your labels to be a torch.LongTensor containing the class indices.
Try to convert it using target = target.long().

Finally Worked…thnq very much for the help

1 Like