The trick is to parameterize the weights by their logarithms. The log weights are allowed to vary freely among real numbers. An exponential map will convert the log weights to positive-definite weights before the weight is applied to the input data.
Example code:
import torch
import torch.nn as nn
class PositiveLinear(nn.Module):
def __init__(self, in_features, out_features):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.log_weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.log_weight)
def forward(self, input):
return nn.functional.linear(input, self.log_weight.exp())