class ConvNet(torch.nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.cv1 = torch.nn.Conv2d(1, 2, (3, 3), padding=(0, 0))
self.init_edge_detect_weight()
def forward(self, x):
return self.cv1(x)
def init_edge_detect_weight(self):
sob_kernel1 = (torch.tensor([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]], dtype=torch.float64)).unsqueeze(0).unsqueeze(0)
sob_kernel2 = torch.tensor([[1, 2, 1],
[0, 0, 0],
[-1, -2, 1]], dtype=torch.float64).unsqueeze(0).unsqueeze(0)
self.cv1.weight.data = torch.cat([sob_kernel1, sob_kernel2], dim=0).requires_grad_(False)
net = ConvNet()
raster_ts = torch.ones(1, 1, 128, 128, dtype=torch.float64)
print("unique value weight: ", torch.unique(net.cv1.weight.data))
print("unique value input: ", torch.unique(raster_ts))
out_ts = net(raster_ts)
print(out_ts.shape)
print("unique value output: ", torch.unique(out_ts))
>>>
>>>
>>>
unique value weight: tensor([-2., -1., 0., 1., 2.], dtype=torch.float64)
unique value input: tensor([1.], dtype=torch.float64)
torch.Size([1, 2, 126, 126])
unique value output: tensor([-0.0541, 2.2535], dtype=torch.float64, grad_fn=<Unique2Backward>)
Is this caused by the precision of the float number?
Because I used 2 sobel kernel to convolve a image with all values equals to 1.0, there should not be any edge in the image, so I expected the ouput feature map should with all values equals to 0