I try to make my model to work with any size of images so I remove flatting layer and use ConvTranspose2d and softmax.
I use dilation to keep the resolution of image so I don’t use adaptivepooling layer becuase it fixed the output to one.
when I run the code and exact in line:
loss = criterion(output, target) in train function
I get error:
“AttributeError: ‘tuple’ object has no attribute ‘log_softmax’”
This is parts of my code
class O_FCasConv(nn.Module):
def __init__(self, classes=4):
super(O_FCasConv, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=2,dilation=8),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=2,dilation=8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1))
self.classifier = nn.ConvTranspose2d(32, 8, 3, stride=2, padding=2, output_padding=1, groups=classes, bias=False)
self.softmax = nn.Softmax2d()
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
y = self.classifier(out)
return self.softmax(y), x
also I use
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.Adadelta(model.parameters(), lr=0.001, rho=0.9, eps=1e-06, weight_decay=0)
and I use for training
def train(train_loader):
total_loss = 0
total_size = 0
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
data=Variable(data)
target=Variable(target)
print(target.shape[:])
print(data.shape[:])
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
total_loss += loss.item()
total_size += data.size(0)
loss.backward()
optimizer.step()