Onv2d(): argument 'input' (position 1) must be Tensor, not tuple

Hey there,
i have quite the same problem and i cannot find a fix. Since i have read all your solutions but none of them seem to fit I have no idea where to look for a diffrent solution. I have trained a model and it works finde but when i want to plot the confusion matrix with this code:
import torch
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from sklearn.metrics import confusion_matrix
import seaborn as sn
import pandas as pd
import numpy as np
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
print(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([transforms.ToTensor(), normalize])
dataset = datasets.ImageFolder(r’C:\Users\msoss\PycharmProjects\CDAN\data\visda-2017\test’, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=50, shuffle=True)
images, labels = next(iter(dataloader))

constant for classes

classes = (‘R00’, ‘R10’, ‘R15’, ‘R20’, ‘R25’, ‘R30’, ‘R35’, ‘R40’, ‘R45’, ‘R50’, ‘R55’, ‘R60’, ‘R65’, ‘R70’, ‘R75’,
‘R80’)
net = torch.load("…/snapshot/san/best_model.pth.tar")
net = net.to(‘cuda’)
print(net)
nb_classes = 16
y_pred = []
y_true = []
with torch.no_grad():
for i, (images, labels) in enumerate(dataloader):
images = images.to(device)
labels = labels.to(device)
output = net(images)
output = (torch.max(torch.exp(output), 1)[1]).data.cpu().numpy()
y_pred.extend(output) # Save Prediction
labels = labels.data.cpu().numpy()
y_true.extend(labels) # Save
cf_matrix = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cf_matrix / np.sum(cf_matrix) * 16, index=[i for i in classes], columns=[i for i in classes])
plt.figure(figsize=(12, 7))
sn.heatmap(df_cm, annot=True)
plt.savefig(‘output.png’)

It gives me the error from above: TypeError: exp(): argument ‘input’ (position 1) must be Tensor, not tuple

my network looks like this:
class ResNetFc(nn.Module):
def init(self, resnet_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000):
super(ResNetFc, self).init()
model_resnet = resnet_dictresnet_name
self.conv1 = model_resnet.conv1
self.bn1 = model_resnet.bn1
self.relu = model_resnet.relu
self.maxpool = model_resnet.maxpool
self.layer1 = model_resnet.layer1
self.layer2 = model_resnet.layer2
self.layer3 = model_resnet.layer3
self.layer4 = model_resnet.layer4
self.avgpool = model_resnet.avgpool
self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool,
self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool)

self.use_bottleneck = use_bottleneck
self.new_cls = new_cls
if new_cls:
    if self.use_bottleneck:
        self.bottleneck = nn.Linear(model_resnet.fc.in_features, bottleneck_dim)
        self.fc = nn.Linear(bottleneck_dim, class_num)
        self.bottleneck.apply(init_weights)
        self.fc.apply(init_weights)
        self.__in_features = bottleneck_dim
    else:
        self.fc = nn.Linear(model_resnet.fc.in_features, class_num)
        self.fc.apply(init_weights)
        self.__in_features = model_resnet.fc.in_features
else:
    self.fc = model_resnet.fc
    self.__in_features = model_resnet.fc.in_features

def forward(self, x):
x = self.feature_layers(x)
x = x.view(x.size(0), -1)
if self.use_bottleneck and self.new_cls:
x = self.bottleneck(x)
y = self.fc(x)
return x, y

thanks for the help in advance