Hi
class gdl3d_loss(nn.Module):
def __init__(self, pNorm=2):
super(gdl3d_loss, self).__init__()
self.convX = nn.Conv3d(1, 1, kernel_size=(3,3), stride=1, padding=1, bias=False)
self.convY = nn.Conv3d(1, 1, kernel_size=(3,3), stride=1 ,padding=1, bias=False)
self.convZ = nn.Conv3d(1, 1, kernel_size=(3,3), stride=1, padding=1 ,bias=False)
filterX = torch.FloatTensor([[[[-1, 1]]]]) # 1x2
filter_x=np.array([[1, 0, -1],[2,0,-2],[1,0,-1]])
print(filter_x.shape)
print(filter_x.size)
self.convX.weight=nn.Parameter(torch.from_numpy(filter_x).float().unsqueeze(0).unsqueeze(0))
filter_y=np.array([[1, 2, 1],[0,0,0],[-1,-2,-1]])
self.convY.weight=nn.Parameter(torch.from_numpy(filter_y).float().unsqueeze(0).unsqueeze(0))
filter_z=np.array([[-1, -2, 1],[0,0,0],[1,2,1]])
self.convZ.weight=nn.Parameter(torch.from_numpy(filter_z).float().unsqueeze(0).unsqueeze(0))
self.pNorm = pNorm
def forward(self, pred, gt):
assert not gt.requires_grad
assert pred.size() == 5
assert gt.size() == 5
assert pred.size() == gt.size(), "{0} vs {1} ".format(pred.size(), gt.size())
print(pred.shape)
print(gt.shape)
pred_dx = torch.abs(self.convX(pred))
pred_dy = torch.abs(self.convY(pred))
pred_dz = torch.abs(self.convZ(pred))
gt_dx = torch.abs(self.convX(gt))
gt_dy = torch.abs(self.convY(gt))
gt_dz = torch.abs(self.convZ(gt))
grad_diff_x = torch.abs(gt_dx - pred_dx)
grad_diff_y = torch.abs(gt_dy - pred_dy)
grad_diff_z = torch.abs(gt_dz - pred_dz)
mat_loss_x = grad_diff_x ** self.pNorm
mat_loss_y = grad_diff_y ** self.pNorm # Batch x Channel x width x height
mat_loss_z = grad_diff_z ** self.pNorm
shape = gt.shape
mean_loss = (torch.sum(mat_loss_x) + torch.sum(mat_loss_y)+torch.sum(mat_loss_z)) / (shape[0] * shape[1] * shape[2] * shape[3])
return mean_loss
I have this class but I am getting error when I run gdl3d(outputG,ct_train_batch)
The torch dimensions are (10,1,64,64,64) I am getting following error
RuntimeError: expected stride to be a single integer value or a list of 2 values to match the convolution dimensions, but got stride=[1, 1, 1]