RuntimeError: expected scalar type Long but found Float. - test

Hello,

I am trying to classify images into different classes → 62 classes 0-9, A-Z, a-z. I have the images of shape 12009003. It seems to me that I am doing something wrong. I keep going into a loop of fixing errors again and again.
This is my code: It’s a first draft. I am looking to get it working before I start improving the Net

# Hyper parameters
input_size = 1024 # 32*32 -> flatten
hidden_size = 124
num_classes = 62
num_epochs = 2
batch_size = 32
learning_rate = 0.001

class HandwrittenCharacter(Dataset):
    def __init__(self, csv_file, image_dir, transform=None):
        self.data = pd.read_csv(csv_file)
        self.root = image_dir
        self.transform = transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        # get the image name at the idx
        image_name = self.data.loc[idx, "image"]
        # Get the image
        image = Image.open(os.path.join(self.root, image_name))
        # Transform the image if applicable
        if self.transform:
            image = self.transform(image)
            # print(image.size)
        # labels
        label = [self.data.loc[idx, "label"]]
        return image, torch.Tensor(label)

class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNet, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        out = self.linear1(x)
        out = self.relu(out)
        out = self.linear2(out)
        return out

# Transformation
composed = transforms.Compose([transforms.Grayscale(), transforms.Resize(size=(32, 32)), transforms.ToTensor()])

# TRAINING
train_dataset = HandwrittenCharacter('data/train.csv', 'data', composed)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# print(len(train_dataset))

# TESTING
test_dataset = HandwrittenCharacter('data/test.csv', 'data', composed)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# device config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# print(device)

model = NeuralNet(input_size, hidden_size, num_classes)

# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Training Loop
n_total_steps = len(train_loader)
# print(n_total_steps)

for epoch in range(num_epochs):
    # print(epoch+1)
    for i, (images, labels) in enumerate(train_loader):
        # print(images.shape)
        images = images.reshape(-1, 32*32).to(device)
        labels = labels.to(device)

        # forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # backward pass
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i+1) % 100 == 0:
            print(f'epoch {epoch+1} / {num_epochs}, step {i+1}/{n_total_steps}, loss = {loss.item():.4f}')


# Testing
with torch.no_grad():
    n_correct = 0
    n_samples = 0
    for images, labels in test_loader:
        images = images.reshape(-1, 32*32).to(device)
        labels = labels.to(device)
        outputs = model(images)

        # value, index
        _, predictions = torch.max(outputs, 1)
        n_samples += labels.shape[0]
        n_correct += (predictions == labels).sum().item()

    acc = 100*n_correct / n_samples
    print(f'accuracy = {acc}')

I have divided a single csv file into train and test csv files using this:

# csv file path
csv_file = 'data/english.csv'
# Read CSV
df = pd.read_csv(csv_file, dtype={"image": pd.StringDtype(), "label": pd.CategoricalDtype()})
classes = df['label'].unique()
quantity = len(df['label'].unique())
print(f'\n The %d Classes are: \n {classes}' %quantity)

# Create instance of labelencoder
labelencoder = LabelEncoder()
df['label'] = labelencoder.fit_transform(df['label'])
print(df.loc[2000])

#  ONLY ONCE -> Do a training and testing split
train, test = train_test_split(df, test_size=0.10, random_state=42)
train.to_csv("data/train.csv")
test.to_csv("data/test.csv")

I am currently getting this error:

RuntimeError: expected scalar type Long but found Float

Thank you

Hi,

Maybe you can post the full stack trace to see where the error occurs.

I currently have these errors:

Traceback (most recent call last):
  File "/Users/iamharsh/Desktop/Python codes/pytorch/handwritten_character.py", line 112, in <module>
    loss = criterion(outputs, labels)
  File "/opt/anaconda3/envs/pytorch_env/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/anaconda3/envs/pytorch_env/lib/python3.9/site-packages/torch/nn/modules/loss.py", line 1163, in forward
    return F.cross_entropy(input, target, weight=self.weight,
  File "/opt/anaconda3/envs/pytorch_env/lib/python3.9/site-packages/torch/nn/functional.py", line 2996, in cross_entropy
    return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
RuntimeError: 0D or 1D target tensor expected, multi-target not supported
[Finished in 4.959s]

Maybe this old question can help you. It seems you have a similar problem, due to CrossEntropyLoss

I think I am doing the label encoding the wrong way