Hi, I am trying this wavenet model and been facing issue with this ConstantPad1d function.
class ConstantPad1d(Function):
def __init__(self, target_size, dimension=0, value=0, pad_start=False):
super(ConstantPad1d, self).__init__()
self.target_size = target_size
self.dimension = dimension
self.value = value
self.pad_start = pad_start
@staticmethod
def forward(self, input):
self.num_pad = self.target_size - input.size(self.dimension)
assert self.num_pad >= 0, 'target size has to be greater than input size'
self.input_size = input.size()
size = list(input.size())
size[self.dimension] = self.target_size
output = input.new(*tuple(size)).fill_(self.value)
c_output = output
if self.pad_start:
c_output = c_output.narrow(self.dimension, self.num_pad, c_output.size(self.dimension) - self.num_pad)
else:
c_output = c_output.narrow(self.dimension, 0, c_output.size(self.dimension) - self.num_pad)
c_output.copy_(input)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.new(*self.input_size).zero_()
cg_output = grad_output
if self.pad_start:
cg_output = cg_output.narrow(self.dimension, self.num_pad, cg_output.size(self.dimension) - self.num_pad)
else:
cg_output = cg_output.narrow(self.dimension, 0, cg_output.size(self.dimension) - self.num_pad)
grad_input.copy_(cg_output)
return grad_input
Error
RuntimeError: Legacy autograd function with non-static forward method is deprecated. Please use new-style autograd function with static forward method. (Example: Automatic differentiation package - torch.autograd — PyTorch 1.10 documentation)
Can anyone please help what I need to change here?
I have even raised on their official github, but I guess they are too busy.