I do need help right now...I am a pretty newbie here. Sorry

Can anybody help with solving this? I think it is multiclass-multiouput case.

class CustomDataset(Dataset):
def init(self, csv_data, img_root_path, transform=None):
self.df = csv_data
self.root_dir = img_root_path
self.transform = transform

def len(self):
return len(self.df)

def getitem(self, index):
img = PIL.Image.open(self.root_dir + self.df.iloc[index, 0]).convert(“RGB”)
if self.transform is not None:
img = self.transform(img)

label = self.df.drop(columns=['fname', 'suara_paslon_1',	'suara_paslon_2',	'suara_paslon_3'])
label = torch.from_numpy(label.values).type(torch.LongTensor)

return img, label

bs = 32

train_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.ColorJitter(brightness=0.3),
transforms.ColorJitter(contrast=0.3),
transforms.RandomAffine(0, shear=10),
transforms.ToTensor()
])

test_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])

train_set = CustomDataset(csv_data=df_train, img_root_path=‘/content/drive/MyDrive/Gammafest/data/Train/’, transform=train_transform)
trainloader = DataLoader(train_set, batch_size=bs, num_workers=4)

test_set = CustomDataset(csv_data=df_test, img_root_path=‘/content/drive/MyDrive/Gammafest/data/Train/’, transform=test_transform)
testloader = DataLoader(test_set, batch_size=bs)

label2cat = list(np.arange(0, 10))

class CNN(nn.Module):
def init(self):
super().init()
self.conv = nn.Sequential(
conv_block(3, 8),
conv_block(8, 16),
conv_block(16, 32),
conv_block(32, 64),
conv_block(64, 128),
nn.Flatten()
)

    self.fc = nn.Sequential(
        linear_block(8192, 4096, dropout=0.1),
        linear_block(4096, 2048, dropout=0.1),
        linear_block(2048, 1024, dropout=0.1)
    )

    self.digit1_paslon_1 = linear_block(1024, 10, activation='lsoftmax')
    self.digit2_paslon_1 = linear_block(1024, 10, activation='lsoftmax')
    self.digit3_paslon_1 = linear_block(1024, 10, activation='lsoftmax')

    self.digit1_paslon_2 = linear_block(1024, 10, activation='lsoftmax')
    self.digit2_paslon_2 = linear_block(1024, 10, activation='lsoftmax')
    self.digit3_paslon_2 = linear_block(1024, 10, activation='lsoftmax')

    self.digit1_paslon_3 = linear_block(1024, 10, activation='lsoftmax')
    self.digit2_paslon_3 = linear_block(1024, 10, activation='lsoftmax')
    self.digit3_paslon_3 = linear_block(1024, 10, activation='lsoftmax')

def forward(self, x):
    x = self.conv(x)
    x = self.fc(x)

    pred_digit1_paslon_1 = self.digit1_paslon_1(x)
    pred_digit2_paslon_1 = self.digit2_paslon_1(x)
    pred_digit3_paslon_1 = self.digit3_paslon_1(x)

    pred_digit1_paslon_2 = self.digit1_paslon_2(x)
    pred_digit2_paslon_2 = self.digit2_paslon_2(x)
    pred_digit3_paslon_2 = self.digit3_paslon_2(x)

    pred_digit1_paslon_3 = self.digit1_paslon_3(x)
    pred_digit2_paslon_3 = self.digit2_paslon_3(x)
    pred_digit3_paslon_3 = self.digit3_paslon_3(x)

    return pred_digit1_paslon_1, pred_digit2_paslon_1, pred_digit3_paslon_1, pred_digit1_paslon_2, pred_digit2_paslon_2, pred_digit3_paslon_2, pred_digit1_paslon_3, pred_digit2_paslon_3, pred_digit3_paslon_3

config = set_config({
‘batch_size’: bs
})

model = CNN().to(device)

criterion = nn.NLLLoss()

optimizer = optim.AdamW(model.parameters(), lr=0.001)

callback = Callback(model, config, outdir=“model”)

from tqdm.auto import tqdm

def loop_fn(mode, dataset, dataloader, model, criterion, optimizer, device):
if mode == “train”:
model.train()
elif mode == “test”:
model.eval()
cost = correct = 0
for feature, target in tqdm(dataloader, desc=mode.title()):
feature, target = feature.to(device), target.to(device)
outputs = model(feature)

    # Pisahkan output untuk setiap digit
    (pred_digit1_paslon_1, pred_digit2_paslon_1, pred_digit3_paslon_1,
     pred_digit1_paslon_2, pred_digit2_paslon_2, pred_digit3_paslon_2,
     pred_digit1_paslon_3, pred_digit2_paslon_3, pred_digit3_paslon_3) = outputs

    # Hitung kerugian untuk setiap prediksi digit
    loss1 = criterion(pred_digit1_paslon_1, target[:, 0])
    loss2 = criterion(pred_digit2_paslon_1, target[:, 1])
    loss3 = criterion(pred_digit3_paslon_1, target[:, 2])

    loss4 = criterion(pred_digit1_paslon_2, target[:, 3])
    loss5 = criterion(pred_digit2_paslon_2, target[:, 4])
    loss6 = criterion(pred_digit3_paslon_2, target[:, 5])

    loss7 = criterion(pred_digit1_paslon_3, target[:, 6])
    loss8 = criterion(pred_digit2_paslon_3, target[:, 7])
    loss9 = criterion(pred_digit3_paslon_3, target[:, 8])

    loss = loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9

    loss = criterion(output, target)

    if mode == "train":
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

    cost += loss.item() * feature.shape[0]
    correct += (output.argmax(1) == target).sum().item()
    # Mengasumsikan semua digit harus benar untuk dihitung sebagai prediksi yang benar
    correct += ((pred_digit1_paslon_1.argmax(1) == target[:, 0]) &
                (pred_digit2_paslon_1.argmax(1) == target[:, 1]) &
                (pred_digit3_paslon_1.argmax(1) == target[:, 2]) &
                (pred_digit1_paslon_2.argmax(1) == target[:, 3]) &
                (pred_digit2_paslon_2.argmax(1) == target[:, 4]) &
                (pred_digit3_paslon_2.argmax(1) == target[:, 5]) &
                (pred_digit1_paslon_3.argmax(1) == target[:, 6]) &
                (pred_digit2_paslon_3.argmax(1) == target[:, 7]) &
                (pred_digit3_paslon_3.argmax(1) == target[:, 8])).sum().item()

cost = cost / len(dataset)
acc = correct / len(dataset)
return cost, acc

while True:
train_cost, train_score = loop_fn(“train”, train_set, trainloader, model, criterion, optimizer, device)
with torch.no_grad():
test_cost, test_score = loop_fn(“test”, test_set, testloader, model, criterion, optimizer, device)

# Logging
callback.log(train_cost, test_cost, train_score, test_score)

# Checkpoint
callback.save_checkpoint()

# Runtime Plotting
callback.cost_runtime_plotting()
callback.score_runtime_plotting()

# Early Stopping
if callback.early_stopping(model, monitor="test_score"):
    callback.plot_cost()
    callback.plot_score()
    break

this is the error:


RuntimeError Traceback (most recent call last)
in <cell line: 1>()
1 while True:
----> 2 train_cost, train_score = loop_fn(“train”, train_set, trainloader, model, criterion, optimizer, device)
3 with torch.no_grad():
4 test_cost, test_score = loop_fn(“test”, test_set, testloader, model, criterion, optimizer, device)
5

4 frames
/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
2731 if size_average is not None or reduce is not None:
2732 reduction = _Reduction.legacy_get_string(size_average, reduce)
→ 2733 return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2734
2735

RuntimeError: 0D or 1D target tensor expected, multi-target not supported

Excuse me, Sir @ptrblck
Would you love to help me solving this problem?

This error is usually raised if the target shape is invalid as seen here:

criterion = nn.NLLLoss()

batch_size = 10
nb_classes = 15

# works
output = torch.randn(batch_size, nb_classes, requires_grad=True)
target = torch.randint(0, nb_classes, (batch_size,))
loss = criterion(output, target)

# fails
output = torch.randn(batch_size, nb_classes, requires_grad=True)
target = torch.randint(0, nb_classes, (batch_size, 1))
loss = criterion(output, target)
# RuntimeError: 0D or 1D target tensor expected, multi-target not supported

Make sure the target contains the class indices and does not have a class dimension for nn.NLLLoss as seen in my example.

Hmmm…
excuse me, sir
but I still don’t understan what to do on my code

Check the shapes of your model output and the target.
I’m not sure what:

means, but assume you are working on a multi-class classification, i.e. each sample is classified into one class only.

In this case, your model output should contain log probabilities in the shape [batch_size, nb_classes] and the target should contain class indices in the range [0, nb_classes-1] in the shape [batch_size] if you are using nn.NLLLoss (as seen in my code snippet).