I have a custom activation function:
import torch
import math
class CustAct(torch.nn.Module):
def __init__(self, degree=4, alphas=None):
super(CustAct, self).__init__()
if alphas:
self.alphas = torch.nn.Parameter(torch.Tensor(alphas))
else:
self.alphas = torch.nn.Parameter(torch.ones(degree + 1))
self.alphas.requiresGrad = True # set requiresGrad to true!
self.beta.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
val = torch.zeros(x.size())
for d in range(len(self.alphas)):
val += self.alphas[d] * x
return val
And in my model:
self.linear = torch.nn.Sequential(
torch.nn.Linear(feature_dim, feature_dim),
CustAct(alphas=[1, 2, 3, 4]),
torch.nn.Linear(feature_dim, feature_dim),
torch.nn.ReLU6()
)
I am running on a GPU, but when I try with the custom activation function, I get:
val += self.alphas[d] * x
RuntimeError: expected device cpu but got device cuda:0
Iām not sure why it expects a cpu
.