RuntimeError: expected stride to be a single integer value or a list of 1 values to match the convolution dimensions, but got stride=[1, 1]

Hello at all,

I’m new to pytorch and I’m trying to do a CNN- Model
but I get this errror

Thank you very much for your help


RuntimeError Traceback (most recent call last)
in ()
----> 1 scores = dqn_improved(n_episodes=4000)
2 # plot the scores
3 fig = plt.figure()
4 ax = fig.add_subplot(111)
5 plt.plot(np.arange(len(scores)), scores)

in dqn_improved(n_episodes, max_t, eps_start, eps_end, eps_decay)
55 score += reward
56 #reward = np.tanh(reward)
—> 57 agent.step(state, action, reward, next_state, done)
58 state = next_state
59 if done:

in step(self, state, action, reward, next_state, done)
56 if len(self.memory) > self.batch_size:
57 experiences = self.memory.sample()
—> 58 self.learn(experiences, GAMMA)
59
60 def act(self, state, eps=0.):

in learn(self, experiences, gamma)
92 “*** YOUR CODE HERE ***”
93 #Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(0)
—> 94 Q_targets_next = self.qnetwork_target(next_states).unsqueeze()
95 Q_targets = rewards + (gamma * Q_targets_next * (1-dones))
96

/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
→ 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

in forward(self, x)
34
35 # Convolution Layer 1
—> 36 x = self.conv1(x)
37 x = self.relu1(x)
38

/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
→ 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/usr/local/lib/python3.5/dist-packages/torch/nn/modules/conv.py in forward(self, input)
299 def forward(self, input):
300 return F.conv2d(input, self.weight, self.bias, self.stride,
→ 301 self.padding, self.dilation, self.groups)
302
303

RuntimeError: expected stride to be a single integer value or a list of 1 values to match the convolution dimensions, but got stride=[1, 1]

and the code
class CNN(nn.Module):

def __init__(self, action_size, hidden_size_conv, hidden_size_fc):
    super().__init__()
           
    # NOTE: All Conv2d layers have a default padding of 0 and stride of 1,
    # which is what we are using.
    
    # Convolution Layer 1                             # 28 x 28 x 1  (input)
    self.conv1 = nn.Conv2d(4, hidden_size_conv, kernel_size=5)  
    self.conv1_drop = nn.Dropout2d(p=0.5)  # 24 x 24 x 20  (after 1st convolution)
    self.relu1 = nn.ReLU()                            # Same as above
    
    # Convolution Layer 2
    self.conv2 = nn.Conv2d(hidden_size_conv, 30, kernel_size=5)     # 20 x 20 x 30  (after 2nd convolution)
    self.conv2_drop = nn.Dropout2d(p=0.5)             # Same as above
    self.maxpool2 = nn.MaxPool2d(2)                   # 10 x 10 x 30  (after pooling)
    self.relu2 = nn.ReLU()                            # Same as above 
    
    # Fully connected layers
    self.fc1 = nn.Linear(38880, hidden_size_fc)
    self.fc2 = nn.Linear(hidden_size_fc, action_size)

def forward(self, x):
    
    # Convolution Layer 1                    
    x = self.conv1(x)                        
    x = self.relu1(x)                        
    
    # Convolution Layer 2
    x = self.conv2(x)               
    x = self.conv2_drop(x)
    x = self.maxpool2(x)
    x = self.relu2(x)
    
    # Switch from activation maps to vectors
    x = x.view(-1, 38880)
    
    # Fully connected layer 1
    x = self.fc1(x)
    x = F.relu(x)
    x = F.dropout(x, training=True)
    
    # Fully connected layer 2
    x = self.fc2(x)
    
    return x

Blockquote

This might be a misleading error message from an older version, where your input is missing the batch dimension.
It should be fixed since 0.4.1 as far as I know and result in a clearer error message.
Could you check the shape of x before passing it to your model?
It should be [batch_size, 4, h, w].

Thank you for answering this beginner questions,
I watched some good tutorials. And if you got used to it its easier then TF