Unction CudnnConvolutionBackward returned an invalid gradient at index 1 - expected type torch.FloatTensor but got torch.cuda.FloatTensor

Hi, I encountered this problem when I tried to remove some filters and then retrain the network though I have moved both model and data to cuda. It appears that the error is at the backward stage when I called loss.backward()
here is my code:

criterion = nn.NLLLoss()

optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0005)
def train(model, train_loader, criterion, optimizer, device, epoch):
    model.train()
    for idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()
        if idx % 5 == 0:
            print("Epoch {} [{}/{} ({:.2f}%)]\tLoss: {:.6f}"
                  .format(epoch,idx*len(data),len(train_loader.dataset),100.0*idx/len(train_loader),loss.item()))

def test(model,test_loader,criterion,device):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data,target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output,target)
            test_loss += loss.item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
        test_loss /= len(test_loader.dataset)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
              .format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))

def index_pruned(model, layer):
    l = []
    for i in range(model.features[layer].weight.size(0)):
        l.append(abs(model.features[layer].weight[i,:,:,:]).sum())
    return l.index(min(l))

def prune_filter(model, layer, next_layer, indexes):     
    new_conv = torch.nn.Conv2d(in_channels=model.features[layer].in_channels, 
                               out_channels=model.features[layer].out_channels-1,
                               kernel_size=model.features[layer].kernel_size,
                               stride=model.features[layer].stride,
                               padding=model.features[layer].padding)
    new_conv.weight[0:indexes,:,:,:] = model.features[layer].weight[0:indexes,:,:,:]
    new_conv.weight[indexes:,:,:,:] = model.features[layer].weight[indexes+1,:,:,:]
    new_conv.bias[0:indexes] = model.features[layer].bias[0:indexes]
    new_conv.bias[indexes:] = model.features[layer].bias[indexes+1:]
    model.features[layer] = new_conv
    
    if layer != 10:
        next_new_conv = torch.nn.Conv2d(in_channels=model.features[next_layer].in_channels-1, 
                                        out_channels=model.features[next_layer].out_channels,
                                        kernel_size=model.features[next_layer].kernel_size,
                                        stride=model.features[next_layer].stride,
                                        padding=model.features[next_layer].padding)
        next_new_conv.weight[:,0:indexes,:,:] = model.features[next_layer].weight[:,0:indexes,:,:]
        next_new_conv.weight[:,indexes:,:,:] = model.features[next_layer].weight[:,indexes+1:,:,:]
        model.features[next_layer] = next_new_conv
    elif layer == 10:
        params = int(model.classifier[0].in_features / (model.features[10].out_channels+1))
        new_fc1 = torch.nn.Linear(in_features=int(model.classifier[0].in_features-params),
                                  out_features=int(model.classifier[0].out_features))
        new_fc1.weight[:,0:indexes*params] = model.classifier[0].weight[:,0:indexes*params]
        new_fc1.weight[:,:params*indexes] = model.classifier[0].weight[:,:(indexes+1)*params]
        new_fc1.bias = model.classifier[0].bias
        model.classifier[0]=new_fc1
    return model

def main(model, train_loader,test_loader,criterion,optimizer,
         pretrained=False,prune=False,save=False,pruneFilter=False):
    device = 'cuda'
    if pretrained == True:
        model.load_state_dict(torch.load('AlexNet_pruned.pt'))
        for params in model.parameters():
            params.requires_grad = True
    model.to(device)
    '''
    if prune == True:
        threshold = 0.02
        for epoch in range(1, 10):
            for name, p in model.named_parameters():
                if 'weight' in name:
                    m = mask(p.data, threshold)
                    p.data = p.data.mul_(m)
        threshold = threshold + 0.01
    #for epoch in range(2):
    #    train(model, train_loader, criterion, optimizer, device, epoch)
    #    test(model,test_loader,criterion,device)
    for name, p in model.named_parameters():
            if 'weight' in name:
                m = mask(p.data, threshold)
                p.data = p.data.mul_(m)
    '''
    if pruneFilter == True:
        #conv0:
        for num_filters_pruned in range(16):
            model=prune_filter(model=model, layer=0, next_layer=3, indexes=index_pruned(model,0))
            if num_filters_pruned %4 == 0:
                model=model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
        #conv2:
        for num_filters_pruned in range(81):
            model=prune_filter(model=model, layer=3, next_layer=6, indexes=index_pruned(model, 3))
            if num_filters_pruned %10 == 0:
                model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
        #conv3:
        for num_filters_pruned in range(192):
            model=prune_filter(model=model, layer=6, next_layer=8, indexes=index_pruned(model, 6))
            if num_filters_pruned %10 == 0:
                model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
        #conv4:
        for num_filters_pruned in range(128):
            model=prune_filter(model=model, layer=8, next_layer=10, indexes=index_pruned(model, 8))
            if num_filters_pruned %10 == 0:
                model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
        #conv5:
        for num_filters_pruned in range(128):
            model=prune_filter(model=model, layer=10, next_layer=None, indexes=index_pruned(model, 10))
            if num_filters_pruned %10 == 0:
                model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
        torch.save(model.state_dict(),'AlexNet_filers_pruned.pt')


RuntimeError: Function CudnnConvolutionBackward returned an invalid gradient at index 1 - expected type torch.FloatTensor but got torch.cuda.FloatTensor

Could you print the device attribute of all parameters before calling train?

for name, param in model.named_parameters():
    device = param.device
    if device == torch.device('cpu'):
        print('ERROR in', name, device)
        #conv0:
        for num_filters_pruned in range(16):
            model=prune_filter(model=model, layer=0, next_layer=3, indexes=index_pruned(model,0))
            if num_filters_pruned %4 == 0:
                model=model.cuda()
                for name, param in model.named_parameters():
                    device = param.device
                    if device == torch.device('cpu'):
                        print('ERROR in', name, device)
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)

I added your piece of code but there is no “Error in” so I guess every parameter is in cuda

I change your code a bit and this is the result:

#conv0:
        for num_filters_pruned in range(16):
            model=prune_filter(model=model, layer=0, next_layer=3, indexes=index_pruned(model,0))
            if num_filters_pruned %4 == 0:
                model=model.cuda()
                for name, param in model.named_parameters():
                    print(name,':',param.device)
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)
features.0.weight : cuda:0
features.0.bias : cuda:0
features.3.weight : cuda:0
features.3.bias : cuda:0
features.6.weight : cuda:0
features.6.bias : cuda:0
features.8.weight : cuda:0
features.8.bias : cuda:0
features.10.weight : cuda:0
features.10.bias : cuda:0
classifier.fc1.weight : cuda:0
classifier.fc1.bias : cuda:0
classifier.fc2.weight : cuda:0
classifier.fc2.bias : cuda:0
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-12-46a65917686e> in <module>
----> 1 main(model,train_loader,test_loader,criterion,optimizer,pretrained=True,prune=False,save=False,pruneFilter=True)

<ipython-input-11-8c11e06a650b> in main(model, train_loader, test_loader, criterion, optimizer, pretrained, prune, save, pruneFilter)
     33                     print(name,':',param.device)
     34 
---> 35                 train(model, train_loader, criterion, optimizer, device, 1)
     36                 test(model,test_loader,criterion,device)
     37         #conv2:

<ipython-input-8-f4fd4c83eff2> in train(model, train_loader, criterion, optimizer, device, epoch)
      9         loss = criterion(output,target)
     10         loss = loss.cuda()
---> 11         loss.backward()
     12         optimizer.step()
     13         if idx % 5 == 0:

/usr/local/lib/python3.5/dist-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
    100                 products. Defaults to ``False``.
    101         """
--> 102         torch.autograd.backward(self, gradient, retain_graph, create_graph)
    103 
    104     def register_hook(self, hook):

/usr/local/lib/python3.5/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
     88     Variable._execution_engine.run_backward(
     89         tensors, grad_tensors, retain_graph, create_graph,
---> 90         allow_unreachable=True)  # allow_unreachable flag
     91 
     92 

RuntimeError: Function CudnnConvolutionBackward returned an invalid gradient at index 1 - expected type torch.FloatTensor but got torch.cuda.FloatTensor

Thanks for the information!
Could you post a (small) executable code snippet to reproduce this issue so that we could debug it?

import torch
import torch.nn.functional as F
from torch import optim
from torch import nn
from torchvision import datasets, transforms, models
#The dataset is cats vs dogs from Kaggle
train_data_direction = '/home/hoangminhq/TestFolder/dogs-vs-cats/Cat_Dog_data/Cat_Dog_data/train'
test_data_direction  = '/home/hoangminhq/TestFolder/dogs-vs-cats/Cat_Dog_data/Cat_Dog_data/test'
train_transform = transforms.Compose([transforms.RandomRotation(30),
                                      transforms.RandomResizedCrop(224),
                                      transforms.RandomHorizontalFlip(),
                                      transforms.ToTensor(),
                                      transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])

test_transform = transforms.Compose([transforms.Resize(255),
                                     transforms.CenterCrop(224),
                                     transforms.ToTensor(),
                                     transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])

train_dataset = datasets.ImageFolder(train_data_direction, transform = train_transform)
test_dataset  = datasets.ImageFolder(test_data_direction, transform = test_transform)

train_loader   = torch.utils.data.DataLoader(train_dataset, batch_size = 128, shuffle = True)
test_loader    = torch.utils.data.DataLoader(test_dataset, batch_size = 128)

class AlexNet(nn.Module):

    def __init__(self, num_classes=10):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        x = F.log_softmax(x,dim=1)
        return x
model = AlexNet()
#Modify Alexnet abit
for params in model.parameters():
    params.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
                          ('fc1', nn.Linear(9216, 4096)),
                          ('relu', nn.ReLU()),
                          ('fc2', nn.Linear(4096, 2)),
                          ('output', nn.LogSoftmax(dim=1))
                          ]))
    
model.classifier = classifier

criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0005)
def train(model, train_loader, criterion, optimizer, device, epoch):
    model.train()
    for idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()

def prune_filter(model, layer, next_layer, indexes):     
    new_conv = torch.nn.Conv2d(in_channels=model.features[layer].in_channels, 
                               out_channels=model.features[layer].out_channels-1,
                               kernel_size=model.features[layer].kernel_size,
                               stride=model.features[layer].stride,
                               padding=model.features[layer].padding)
    new_conv.weight[0:indexes,:,:,:] = model.features[layer].weight[0:indexes,:,:,:]
    new_conv.weight[indexes:,:,:,:] = model.features[layer].weight[indexes+1,:,:,:]
    new_conv.bias[0:indexes] = model.features[layer].bias[0:indexes]
    new_conv.bias[indexes:] = model.features[layer].bias[indexes+1:]
    model.features[layer] = new_conv
    next_new_conv = torch.nn.Conv2d(in_channels=model.features[next_layer].in_channels-1, 
                                        out_channels=model.features[next_layer].out_channels,
                                        kernel_size=model.features[next_layer].kernel_size,
                                        stride=model.features[next_layer].stride,
                                        padding=model.features[next_layer].padding)
        next_new_conv.weight[:,0:indexes,:,:] = model.features[next_layer].weight[:,0:indexes,:,:]
        next_new_conv.weight[:,indexes:,:,:] = model.features[next_layer].weight[:,indexes+1:,:,:]
        model.features[next_layer] = next_new_conv
def main(model, train_loader,test_loader,criterion,optimizer,
         pretrained=False,prune=False,save=False,pruneFilter=False):
    device = 'cuda'
    if pruneFilter == True:
        #conv0:
        for num_filters_pruned in range(16):
            model=prune_filter(model=model, layer=0, next_layer=3, indexes=index_pruned(model,0))
            if num_filters_pruned %4 == 0:
                model=model.cuda()
                train(model, train_loader, criterion, optimizer, device, 1)
                test(model,test_loader,criterion,device)

main(model,train_loader,test_loader,criterion,optimizer,pretrained=True,prune=False,save=False,pruneFilter=True)

#This should be enough

index_pruned is undefined and I’m not sure what to pass.

I’m so sorry, there is one function missing

def index_pruned(model, layer):
    l = []
    for i in range(model.features[layer].weight.size(0)):
        l.append(abs(model.features[layer].weight[i,:,:,:]).sum())
    return l.index(min(l))

Hi @Hoang_Minh_Q,

Did you solve the problem?

Yeah!
I don’t know why but after changing weight type from torch.Tensor to numpy array and switch it back to torch.Tensor it works

1 Like