Hi, I’m new to pytorch.

I’m developing a weak learner for an ensable approach, I defined a weak learner as a neural network with just one neuron and one weigth:

```
class BaseLearnerFST(nn.Module):
def __init__(self, leakyReluM: float = 0.02):
super().__init__()
# weight application
self._weightApplier: Linear = Linear(1, 1, bias=False)
# activation function
self._leakyRelu: LeakyReLU = nn.LeakyReLU(negative_slope=leakyReluM)
def forward(self, batch: Tensor) -> Tensor:
weightedInput: Tensor = self._weightApplier(batch)
return self._leakyRelu(weightedInput)
```

Given the fact that each sample in the dataset has a specific weight I developed a custom loss to handle the error computation:

```
class WeightedMeanSquaredError(nn.Module):
def __init__(self):
super().__init__()
self._errorMap: list[Tensor] = []
def forward(self, yTrue: Tensor, yPred: Tensor, weights: Tensor, save: bool = False) -> Tensor:
modelLabel: Tensor = gt(yPred, 1.0).float()
absolute_diff: Tensor = th.abs(modelLabel - yTrue)
weighted_absolute_diff = weights * absolute_diff
if save:
self._errorMap.append(absolute_diff)
return th.sum(weighted_absolute_diff, dtype=float32)
def getErrorMap(self) -> list[Tensor]:
return self._errorMap
```

I also developed a fit() method to train a BaseLearnerFST:

```
def fit(
self, xTrain: ndarray[float], yTrain: ndarray[int],
weights: ndarray[float], batchSize: int = 1, epochs: int = 10
) -> Tensor:
optimizer = SGD(self.parameters(), lr=0.05)
weightedLoss = WeightedMeanSquaredError()
dataset: CustomDataset = CustomDataset(xTrain, yTrain, weights)
batches: DataLoader = BaseLearnerFST.prepareDataset(dataset, batchSize)
self.train()
for epoch in range(epochs):
for batch in batches:
print("Weight: ", self._weightApplier.weight)
xBatch, (yBatch, weightsBatch) = batch
optimizer.zero_grad() # initialize gradient to zero
yPred: Tensor = self(xBatch) # forward pass
loss: Tensor = weightedLoss(
yBatch, yPred, weightsBatch,
save=True if epoch == epochs - 1 else False
)
loss.backward() # compute gradient
optimizer.step() # backpropagation
print(f"Epoch:{epoch} loss is {loss.item()}")
return tensor([]) # th.cat(weightedLoss.getErrorMap())
@staticmethod
def prepareDataset(dataset: Dataset, batchSize: int = 1):
return DataLoader(dataset, batchSize, shuffle=True)
```

The problem is that troughout the training process the single weight does not get updated, I know that the problem is inside the custom loss function that I made because, I tried to use a pytorch loss such as MSELoss and everything worked fine.

I read that if the training set tensor was not declared with the flag: requires_grad, the gradient do not get computed so here I attach also the class with which I handle the dataset before the training phase:

```
class CustomDataset(Dataset):
def __init__(self, data: ndarray[float], labels: ndarray[int], weights: ndarray[float]):
self._xTrain: Tensor = tensor(data, dtype=float32, requires_grad=True)
self._yTrain: Tensor = tensor(labels, dtype=float32, requires_grad=True)
self._weights: Tensor = tensor(weights, dtype=float32, requires_grad=True)
def __len__(self) -> int:
return len(self._xTrain)
def __getitem__(self, idx: int) -> tuple[Tensor, tuple[Tensor, Tensor]]:
return self._xTrain[idx], (self._yTrain[idx], self._weights[idx])
```