How can I make the lambda trainable in Softshrink function?

You have to write a new Softshrink function that takes lambda as a variable. Example:

import torch
from torch.autograd import Variable
import torch.nn as nn

def softshrink(x, lambd):
    mask1 = x > lambd
    mask2 = x < -lambd
    out = torch.zeros_like(x)
    out += mask1.float() * -lambd + mask1.float() * x
    out += mask2.float() * lambd + mask2.float() * x
    return out

x = Variable(torch.randn(2,2,2), requires_grad=True)
l = Variable(torch.Tensor([0.5]), requires_grad=True)
out = softshrink(x, l)

# do things to out
y = sum(sum(sum(out)))
y.backward()
x.grad  # exists
l.grad  # also exists
1 Like