Hi
I implemented the special neural network and define the layer by myself. However, I meet such an error. It said I didn’t implement the forward which I already constructed. Here is my full code:
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.autograd as autograd
class Equi1(nn.Module):
def __init__(self,length):
super(Equi1, self).__init__()
self.lambda1=nn.Parameter(torch.randn(1))
self.gamma=nn.Parameter(torch.randn(1))
self.length=length
def forward(self,x):
identity=torch.eye(self.length)
x=x.view(len(x),1)
x=x.float()
part1=torch.mm(self.lambda1*identity,x)
part2=torch.mm(self.gamma*torch.mm(torch.ones(self.length,1),torch.ones(1,self.length)),x)
output=part1+part2
return output
class Equi2(nn.Module):
def __init__(self,length):
super(Equi2, self).__init__()
self.lambda1=nn.Parameter(torch.randn(1))
self.gamma=nn.Parameter(torch.randn(1))
self.length=length
#self.lambda1.requires_grad_(True)
#self.gamma.requires_grad_(True)
def forward(self,x):
length1=self.length
length2=np.int(np.sqrt(length1))
#x=torch.from_numpy(x)
x=x.view(len(x),1)
x=x.float()
identity=torch.eye(length2)
block=self.lambda1*identity+self.gamma*torch.mm(torch.ones(length2,1),torch.ones(1,length2))
w2=torch.mm(block,torch.from_numpy(np.tile(np.identity(length2),length2)).float())
output=torch.mm(w2,x)
return output
class Symnet(nn.Module):
def __init__(self,length,n1,n2,n3):
super(Symnet, self).__init__()
self.length=length
self.n1=n1
self.n2=n2
self.n3=n3
self.w1=nn.Parameter(torch.randn(1))
self.w2=nn.Parameter(torch.randn(1))
self.w3=nn.Parameter(torch.randn(1))
self.phi1=nn.ModuleList([Equi1(self.length)])
self.phi2=nn.ModuleList([Equi2(self.length*self.length)])
self.phi3=nn.ModuleList([Equi1(self.length)])
for i in range(n1):
self.phi1.append(nn.ReLU(inplace=True))
self.phi1.append(Equi1(self.length))
for i in range(n2):
self.phi2.append(nn.ReLU(inplace=True))
self.phi2.append(Equi1(self.length))
for i in range(n3):
self.phi3.append(nn.ReLU(inplace=True))
self.phi3.append(Equi1(self.length))
def forward(self,x):
x1=x[0:self.length]
x2=x[self.length:self.length*self.length+self.length]
x3=x[(self.length*self.length+self.length):(self.length*self.length+2*self.length)]
out1=self.phi1(x1)
out2=self.phi2(x2)
out3=self.phi3(x3)
return(out1)
def clip_grad(model, max_norm):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
total_norm = total_norm ** (0.5)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in model.parameters():
p.grad.data.mul_(clip_coef)
return total_norm
''''
X=np.array([5,1,3,4,2,6])
x=Variable(torch.FloatTensor(X))
equ1=Equi1(6)
m=equ1(x)###ok
y=np.arange(120)
equ2=Equi2()
y=Variable(torch.FloatTensor(y))
n=equ2(y)###ok
"''
y=np.arange(120)
y=Variable(torch.FloatTensor(y))
sym=Symnet(10,1,1,1)
sym.forward(y)###
and the error is
Traceback (most recent call last):
File "<ipython-input-12-50ace8e96454>", line 1, in <module>
sym.forward(y)
File "<ipython-input-3-cbe0e070605f>", line 27, in forward
out1=self.phi1(x1)
File "D:\python\anaconda\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "D:\python\anaconda\lib\site-packages\torch\nn\modules\module.py", line 83, in forward
raise NotImplementedError
NotImplementedError
Do you know what happens? Thank you.