CNN training doesn't work because of tensor type

Hello. I am writing code to classify images using CNN. However, it shows this error

    RuntimeError: expected Byte tensor (got Float tensor)

And error happens on these lines

    outputs = CNN(images)

and

    out = self.layer1(x)

Could anyone help me out?
I’ll put model and training code below.

    """
    Model Definition
    """
    
    
    # CNN Model (2 conv layer)
    class CNN(nn.Module):
        def __init__(self):
            super(CNN, self).__init__()
            self.layer1 = nn.Sequential(
                nn.Conv2d(1, 16, kernel_size=5, padding=2),
                nn.BatchNorm2d(16),
                nn.ReLU(),
                nn.MaxPool2d(2))
            self.layer2 = nn.Sequential(
                nn.Conv2d(16, 32, kernel_size=5, padding=2),
                nn.BatchNorm2d(32),
                nn.ReLU(),
                nn.MaxPool2d(2))
            self.fc = nn.Linear(7 * 7 * 32, 10)
    
        def forward(self, x):
            out = self.layer1(x)
            out = self.layer2(out)
            out = out.view(out.size(0), -1)
            out = self.fc(out)
            return out.double()
    
    
    CNN = CNN()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)
    
    """
    Training
    """
    batch_size = 50
    learning_rate = 0.001
    # Data Loader (Input Pipeline)
    train = torch.utils.data.TensorDataset(torch.from_numpy(X_train), torch.from_numpy(Y_train))
    train_loader = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
    
    # test = torch.utils.data.TensorDataset(torch.from_numpy(X_test), torch.from_numpy(Y_test))
    # test_loader = torch.utils.data.DataLoader(train, batch_size=100, shuffle=True)
    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(CNN.parameters(), lr=learning_rate)
    
    for epoch in range(2):  # loop over the dataset multiple times
        running_loss = 0.0
        for i, (images, labels) in enumerate(train_loader):
    
            images = Variable(images)
            labels = Variable(labels)
    
            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = CNN(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            # print statistics
            running_loss += loss.data[0]
            if i % 100 == 99:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
            i += 1
    print('Finished Training')

It seems that your input (images) are ByteTensors, so when you multiply it with layer1’s conv2d weight matrix (which is a FloatTensor), torch complains.

Try:

 outputs = CNN(images.float())