How can i perform cross validation with this training process

lbls=[]
pred=[]
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
num_epochs=10
#total_step = len(train_loader)
total_step = len(train_data)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i ,(images, labels) in enumerate(train_data):
# Run the forward pass
images = images.unsqueeze(1).float()
outputs = model(images)
loss = criterion(outputs, labels)
loss_list.append(loss.item())

    # Backprop and perform Adam optimisation
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
#optimizer update the weight parameters to minimize the loss function

    # Track the accuracy
    total = labels.size(0)
    _, predicted = torch.max(outputs.data, 1)
    correct = (predicted == labels).sum().item()
    acc_list.append(correct / total)
    lbls+=labels
    pred+=predicted
    if (i + 1) % 36 == 0:
        print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
              .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
                      (correct / total) * 100))

You could split the dataset before training e.g. via random_split or better using sklearn.model_selection.KFold and train each split separately.
Alternatively, skorch provides a higher-level API for PyTorch, which is sklearn-compatible.