AttributeError: 'Normalize' object has no attribute 'float'

I need to normalize my train set in the following code:

network = Network()
network.cuda()    

criterion = nn.MSELoss()
optimizer = optim.Adam(network.parameters(), lr=0.0001)

loss_min = np.inf
num_epochs = 1

start_time = time.time()
for epoch in range(1,num_epochs+1):
    
    loss_train = 0
    loss_test = 0
    running_loss = 0
    
    
    network.train()
    print('size of train loader is: ', len(train_loader))

    for step in range(1,len(train_loader)+1):

        
        batch = next(iter(train_loader))
        images, landmarks = batch['image'], batch['landmarks']
        #RuntimeError: Given groups=1, weight of size [64, 3, 7, 7], expected input[64, 600, 800, 3] to have 3 channels, but got 600 channels instead
        #using permute below to fix the above error
        images = images.permute(0,3,1,2)
        
        images = images.cuda()
    
        landmarks = landmarks.view(landmarks.size(0),-1).cuda() 
    
        images = transforms.Normalize([97.13, 97.15, 97.15], [28.74, 28.79, 28.81]) #find the args later
        ##landmarks = torchvision.transforms.Normalize(landmarks) #Do I need to normalize the target?
        
        predictions = network(images)
        
        # clear all the gradients before calculating them
        optimizer.zero_grad()
        
        print('predictions are: ', predictions.float())
        print('landmarks are: ', landmarks.float())
        # find the loss for the current step
        loss_train_step = criterion(predictions.float(), landmarks.float())
        
        
        loss_train_step = loss_train_step.to(torch.float32)
        print("loss_train_step before backward: ", loss_train_step)
        
        # calculate the gradients
        loss_train_step.backward()
        
        # update the parameters
        optimizer.step()
        
        print("loss_train_step after backward: ", loss_train_step)

        
        loss_train += loss_train_step.item()
        
        print("loss_train: ", loss_train)
        running_loss = loss_train/step
        print('step: ', step)
        print('running loss: ', running_loss)
        
        print_overwrite(step, len(train_loader), running_loss, 'train')
        
    network.eval() 
    with torch.no_grad():
        
        for step in range(1,len(test_loader)+1):
            
            batch = next(iter(train_loader))
            images, landmarks = batch['image'], batch['landmarks']
            images = images.permute(0,3,1,2)
            images = images.cuda()
            landmarks = landmarks.view(landmarks.size(0),-1).cuda()
        
            predictions = network(images)

            # find the loss for the current step
            loss_test_step = criterion(predictions, landmarks)

            loss_test += loss_test_step.item()
            running_loss = loss_test/step

            print_overwrite(step, len(test_loader), running_loss, 'Validation')
    
    loss_train /= len(train_loader)
    loss_test /= len(test_loader)
    
    print('\n--------------------------------------------------')
    print('Epoch: {}  Train Loss: {:.4f} Valid Loss: {:.4f}'.format(epoch, loss_train, loss_test))
    print('--------------------------------------------------')
    
    if loss_test < loss_min:
        loss_min = loss_test
        torch.save(network.state_dict(), '../moth_landmarks.pth') 
        print("\nMinimum Valid Loss of {:.4f} at epoch {}/{}".format(loss_min, epoch, num_epochs))
        print('Model Saved\n')
     
print('Training Complete')
print("Total Elapsed Time : {} s".format(time.time()-start_time))

and I get this error:

size of train loader is:  90

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-101-ea938d47ca19> in <module>
     35         ##landmarks = torchvision.transforms.Normalize(landmarks) #Do I need to normalize the target?
     36 
---> 37         predictions = network(images)
     38 
     39         # clear all the gradients before calculating them

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

<ipython-input-19-46116d2a7101> in forward(self, x)
      9 
     10     def forward(self, x):
---> 11         x = x.float()
     12         out = self.model(x)
     13         return out

AttributeError: 'Normalize' object has no attribute 'float'

How else should I normalize the data to avoid this error?

You are passing the transforms.Normalize object to the network instead of applying it on the input data.
Pass the input to transforms.Normalize and pass the return value to the model.

1 Like

Thanks a lot. Like you mentioned I was able to Normalize images.


        landmarks = landmarks.view(landmarks.size(0),-1).cuda() 
    
        norm = transforms.Normalize([97.13, 97.15, 97.15], [28.74, 28.79, 28.81]) #find the args later
        image = image.float()

        images = norm(images)
        ##landmarks = torchvision.transforms.Normalize(landmarks) #Do I need to normalize the target?
        
        predictions = network(images)