import rpy2.robjects as robj
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
robj.r["source"]("input_matrix.R")
matrix = robj.r['input_matrix']
HIDDEN_LAYER = 5
LEARNING_RATE = 0.01
data_train = torch.tensor(matrix)
data_train = data_train.view(10,891)
print(data_train.size())
INP_DIM = data_train.size()[0] - 1
class Titanic(nn.Module) :
def __init__(self,d_in,h):
super(Titanic,self).__init__()
self.layer1 = nn.Linear(d_in,h)
self.layer2 = nn.Linear(h,h)
self.layer3 = nn.Linear(h, h)
self.layer4 = nn.Linear(h,d_in)
assert(d_in == data_train.size()[0] - 1)
assert(h == HIDDEN_LAYER)
def forward(self, *input) :
input = F.relu(self.layer1(input))
input = F.relu(self.layer2(input))
input = F.relu(self.layer3(input))
input = self.layer4(input)
return F.log_softmax(input)
net = Titanic(INP_DIM,HIDDEN_LAYER)
print(net)
optimizer = optim.SGD(net.parameters(), lr = LEARNING_RATE)
crit = nn.KLDivLoss()
print(INP_DIM)
for epoch in range(0,data_train.size()[1]) :
vec = data_train[:,epoch]
print(vec)
target = vec[0]
data = vec[1:]
print(data)
target,data = Variable(target),Variable(data)
optimizer.zero_grad()
net_out = net(data) #error on this line
The error comes from the *
in your forward
method.
Just remove it of use input = F.relu(self.layer1(input[0]))
.
May I ask for the reason you are passing your input as *input
?
1 Like
Thanks that worked.
I was a bit confused on how the input data is to be passed since i am new to pytorch