Hello everyone, I am trying to adapt my tensorflow to pytorch code. In this case I have the following custom layers:
class LinearMax(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: torch.Tensor
def __init__(self, in_features: int, out_features: int,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
self.reset_parameters()
def reset_parameters(self) -> None:
# Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
# uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
# https://github.com/pytorch/pytorch/issues/57109
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
def forward(self, input: torch.Tensor) -> torch.Tensor:
inputs_weighted = torch.mul(input, self.weight.T)
maximum = torch.max(inputs_weighted).unsqueeze(dim=0)
#print(f"input shape: {input.shape}, weight shape {self.weight.shape}")
return maximum
def extra_repr(self) -> str:
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
and the following model:
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.input_size = input_size
self.max = layer.LinearMax(1, input_size)
self.l1 = nn.Linear(1, 8)#, dtype=torch.complex64)
self.relu = nn.ReLU()
self.l2 = nn.Linear(8, 8)
self.l3 = nn.Linear(8,1)#, dtype=torch.complex64)
def forward(self, x):
maximum_out = self.max(x)
out = self.l1(maximum_out)
out = self.relu(out.real)# + 1j * self.relu(out.imag)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
By running this part of my code:
class MyDataset(Dataset):
def __init__(self, data,rul, window):
self.data = data
self.rul = rul
self.window = window
def __getitem__(self, index):
x = torch.from_numpy(self.data[index:index+self.window]).to(torch.float32)
y = torch.tensor(np.min(self.rul[index:index+self.window])).to(torch.float32).unsqueeze(0)
return x,y
def __len__(self):
return len(self.data) - self.window
def training_loop_pytorch(train_loader, model,num_epochs, learning_rate):
n_total_steps = len(train_loader)
#loss and optimizer
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
#training loop
for epoch in range(num_epochs):
# forward pass and loss
for i,(vibration, rul) in enumerate(train_loader):
X = vibration
y = rul
y_predicted = model(X)
#print(y.shape)
loss = criterion(y_predicted, y)
#backward pass
loss.backward()
# update
optimizer.step()
optimizer.zero_grad()
if (i+1) % 100 == 0:
print(f'epoch {epoch+1}/ {num_epochs}, step {i+1}/{n_total_steps}, loss={loss.item():.4f}')
return model
I obtain the following userwarning: UserWarning: Using a target size (torch.Size([500, 1])) that is different to the input size (torch.Size([1]))
The code still runs but I wanted to know how I can get remove this warning. When I remove the custom layer the userwarning doesn’t appear, so I assume the problem is in the custom layer (but I want to keep it).
Thanks