dtype = torch.float
device = torch.device(“cpu”)
corrupt_img = torch.tensor(corrupt_img,dtype=dtype,device=device)
corrected_im = torch.randn(1,1,corrupt_img.shape[0],corrupt_img.shape[1],device=device,dtype=dtype,requires_grad=True)
gt = torch.tensor(img)
#corrected_im = corrected_im[None,None,:,:]
corrupt_img = corrupt_img[None,None,:,:]
kernel = torch.tensor(kernel,dtype=dtype,device=device)
kernel = kernel[None,None,:,:]
lr = 1e-5
epoch = 1000
for i in range(epoch):
conv_result = F.conv2d(corrected_im,kernel,padding=10)
loss = (conv_result - corrupt_img).pow(2).sum() + torch.abs(corrected_im).sum()
if i%10==0:
print(i,loss.item())
loss.backward()
with torch.no_grad():
corrected_im = corrected_im - lr*corrected_im.grad
corrected_im.grad.zeros_()
I am finding a solution of compressed sensing problem using pytorch.to solve a optimization
argmin_u || kernel*u -b||^2 + ||u||_1
I expressed the formula directly using F.conv2d ,and torch.tensor,but the backward doesn’t work for the u. the corrected_im.grad is a none type obeject is this code. However ,theoretically the autograd should work on this or is there any mistake in my convolution expression?