I am having an issue here i am not sure how to solve i am thinking that it might be how the input layer of the model is but i don’t see how it relates to input size of 182. I am building an a RL agent using open ai for lunar lander which inputs an observation space of 8 values and action space of 4 hence the input of 8 in the first layer
Input In [5], in test_model(agent)
8 score = 0
10 while not done:
---> 11 params = unpack(agent['params'])
12 probs = model(state,params)
13 action = torch.distributions.Categorical(probs=probs).sample()
Input In [1], in unpack(params, layers)
21 s,e = e,e+np.prod(l)
22 print("s,e",s,e)
---> 23 weights = params[s:e].view(l)
24 s,e= e,e+l[0]
25 bias = params[s:e]
Here is the code with problem
def model(x,params):
l1,b1,l2,b2,l3,b3 = params
y = torch.nn.functional.linear(x,l1,b1)
print("YY",y)
y=torch.relu(y)
y = torch.nn.functional.linear(y,l2,b2)
y=torch.relu(y)
y = torch.nn.functional.linear(y,l3,b3)
y=torch.log_softmax(y,dim=0)
print("Y",y)
return y
def unpack(params,layers=[(25,8),(10,25),(4,10)]):
unpacked = []
e=0
for i,l in enumerate(layers):
print("il",i,l)
s,e = e,e+np.prod(l)
print("s,e",s,e)
weights = params[s:e].view(l)
s,e= e,e+l[0]
bias = params[s:e]
unpacked.extend([weights,bias])
print("unpacked",unpacked)
return unpacked