PyTorch Password Generator: Input and Target Batch Mismatch

Hello,
ich have this neural network i want to train to create human like passwords.
As data set i am using the rock you.txt wordlist.

This is my code:

from os import listdir, path
import unicodedata
import string
import torch
import torch.nn as nn
import random
import matplotlib.pyplot as plt


device_ = torch.device("cuda")
epochs = int(input("epchs: "))
learingRate = input("Learning rate: ")

if learingRate == "":
    learingRate = 0.001
else:
    learingRate = float(learingRate)

letters = string.ascii_letters + string.digits + string.punctuation
letters_num = len(letters) + 1

def toAscii(s):
    return "".join(
        char for char in unicodedata.normalize("NFD", s)
        if unicodedata.category(char) != "Mn"
        and char in letters
    )

def lines(datei):
    f = open(datei, encoding="utf-8").read().strip().split("\n")
    return [toAscii(l) for l in f]

def charToIndex(char):
    return letters.find(char)

def charToTensor(char):
    ret = torch.zeros(1, letters_num)
    ret[0][charToIndex(char)] = 1
    return ret


def passwordToTensor(name):
    ret = torch.zeros(len(name), 1, letters_num, device="cuda")
    for i, char in enumerate(name):
        ret[i][0][charToIndex(char)] = 1
    return ret

def targetToTensor(password):
    indizes = [letters.find(password[i]) for i in range(len(password))]
    return torch.tensor(indizes, device=torch.device("cuda"))


lines_file = lines("testWordlist.txt")

def get_random_example():
    example = random.choice(lines_file)
    return example
 
def get_random_train():
    pw = get_random_example()
    input_tensor = passwordToTensor(pw)
    target_tensor = targetToTensor(pw)
    print(target_tensor)
    return input_tensor, target_tensor


class Netz(nn.Module):
    def __init__(self, inputs, hiddens, outputs):
        super(Netz, self).__init__()
        self.hidden_size = hiddens

        self.input_to_hidden = nn.Linear(inputs + hiddens, hiddens)
        self.input_to_output = nn.Linear(inputs + hiddens, outputs)
        self.output_to_output = nn.Linear(hiddens + outputs, outputs)
        self.dropout = nn.Dropout(0.1)
        self.softmax = nn.LogSoftmax(1)
        
    def forward(self, input, hidden):

        combined = torch.cat((input, hidden), dim=1)
        hidden = self.input_to_hidden(combined)
        output = self.input_to_output(combined)
        out_combined = torch.cat((output, hidden), dim=1)
        output = self.output_to_output(out_combined)
        output = self.dropout(output)
        output = self.softmax(output)
        return output, hidden
    
    def initHidden(self):
        return torch.zeros(1, self.hidden_size, device="cuda")



if path.isfile("meinNetz.pt"):
    model = Netz(letters_num, 128, letters_num)
    model.load_state_dict(torch.load("meinNetz.pt"))    
    model.to("cuda")    
else:
    model = Netz(letters_num, 128, letters_num).to("cuda")

model.train()
criterion = nn.NLLLoss()

def train(input_tensor, target_tensor):
    hidden = model.initHidden()
    model.zero_grad()
    loss = 0

    for e in range(input_tensor.size()[0]):
        output, hidden = model(input_tensor[e], hidden)
        
        # Beachte, dass wir den Verlust für jedes Zeichen in der Sequenz einzeln berechnen
        loss += criterion(output, target_tensor[e])

    loss /= input_tensor.size()[0]  # Berechne den Durchschnittsverlust über die Sequenz
    loss.backward()

    for p in model.parameters():
        p.data.add_(-learingRate, p.grad.data)

    return output, loss


loss_sum = 0
lossPlot = []

for i in range(10000):
    input_tensor, target_tensor = get_random_train()

    output, loss = train(input_tensor, target_tensor)

    lossPlot.append(loss)    

    if i % 100 == 0:
        print(i / 100, "% made. Loss: ", loss.item() / input_tensor.size())

plt.plot(lossPlot)
plt.title()
plt.show()


And this is the error i am getting:

ValueError: Expected input batch_size (1) to match target batch_size (0).

Thanks for help.

Your target seems to be missing the batch dimension as seen here:

criterion = nn.CrossEntropyLoss()

output = torch.randn(1, 10, requires_grad=True)
target = torch.randint(0, 10, (1,))

# works
loss = criterion(output, target)

# fails since target is missing the batch dimension
print(target.shape)
# torch.Size([1])
target = target[0]
print(target.shape)
# torch.Size([])
loss = criterion(output, target)
# ValueError: Expected input batch_size (1) to match target batch_size (0).

.unsqueeze it and it should work.