Is this the correct way to specify custom manipulation of the weights of a convolution layer?
class MaskedConv3d(nn.Module):
def __init__(self, channels, filter_mask):
super().__init__()
self.kernel_size = tuple(filter_mask.shape)
self.filter_mask = nn.Parameter(filter_mask) # tensor
self.conv = nn.Conv3d(
in_channels=channels,
out_channels=channels,
kernel_size=self.kernel_size,
)
def forward(self, x):
self._mask_conv_filter()
return self.conv(x)
def _mask_conv_filter(self):
with torch.no_grad():
self.conv.weight.data = self.conv.weight.data * self.filter_mask
Specifically, in the last line of code I’m using .data
attribute and not the tensors themselves since otherwise I’m getting the following error:
TypeError: cannot assign 'torch.FloatTensor' as parameter 'weight' (torch.nn.Parameter or None expected)
Original code:
with torch.no_grad():
self.conv.weight = self.conv.weight * self.filter_mask
Thanks
Barak