please, could anyone help me check where I did wrong?

I am trying to build up a FCN model for binary segmentation. I have managed to design the following network which can train. However, when it gets to the validation loop, it gives the following error “cnv2d(): argument ‘input’ (position 1) must be Tensor, not int”.

please any help or suggestions would be highly appreciated

```
class fcn(nn.Module):
def __init__(self, num_classes):
super(fcn, self).__init__()
self.stage1 = nn.Sequential(*list(pretrained_net.children())[:-4])
# change input channels to 9
self.stage1[0] = nn.Conv2d(9, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.stage2 = list(pretrained_net.children())[-4]
self.stage3 = list(pretrained_net.children())[-3]
self.scores1 = nn.Conv2d(512, num_classes, 1)
self.scores2 = nn.Conv2d(256, num_classes, 1)
self.scores3 = nn.Conv2d(128, num_classes, 1)
self.upsample_8x = nn.ConvTranspose2d(num_classes, num_classes, 16, 8, 4, bias=False)
self.upsample_4x = nn.ConvTranspose2d(num_classes, num_classes, 4, 2, 1, bias=False)
self.upsample_2x = nn.ConvTranspose2d(num_classes, num_classes, 4, 2, 1, bias=False)
def forward(self, x):
x = self.stage1(x)
s1 = x # 1/8
x = self.stage2(x)
s2 = x # 1/16
x = self.stage3(x)
s3 = x # 1/32
s3 = self.scores1(s3)
s3 = self.upsample_2x(s3)
s2 = self.scores2(s2)
s2 = s2 + s3
s1 = self.scores3(s1)
s2 = self.upsample_4x(s2)
s = s1 + s2
s = self.upsample_8x(s2)
return s
num_classes = 2
pretrained_net = models.resnet18(pretrained=True)
model = fcn(num_classes)
```

The training part is as follows:

```
weights = torch.tensor([0.75, 1], dtype=torch.float)
criterion = nn.CrossEntropyLoss(weight=weights)
basic_optim = torch.optim.SGD(model.parameters(), lr=0.05, weight_decay=0.00001)
optimizer = basic_optim
Epoch_num=5
for e in range(Epoch_num):
print('Epoch', e+1)
train_loss = 0
model= model.train()
for idx, data in enumerate(train_loader):
x, y_true = data
if torch.cuda.is_available():
x, y_true = x.cuda(), y_true.cuda()
# forward
out = model(x)
out = F.softmax(out, dim=1)
#out = out > 0.5
loss = criterion(out, y_true)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
if idx % 100 == 0 and idx != 0:
print('[{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
idx , len(train_loader.dataset),
100. * idx / len(train_loader), train_loss/ idx))
model= model.train(False)
for data in enumerate(test_loader):
x, y_true = data
if torch.cuda.is_available():
x, y_true = x.cuda(), y_true.cuda()
out = model(x)
out = F.softmax(out, dim=1)
loss = criterion(out, y_true)
```