IndexError: index 1 is out of bounds for dimension 0 with size 1

The error index 1 is out of bounds for dimension 0 with size 1 is in the following code. There are 6 labels for output y, 0-5. y has a type like [1,0,0,0,0,0]. Please tell me what is not going well.

import torch
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
import numpy as np

time = 25
network = Network()

inpt = Input(n=64,shape=[1,64], sum_input=True) # n=64
middle = LIFNodes(n=40, trace=True, sum_input=True)
center = LIFNodes(n=40, trace=True, sum_input=True)
final = LIFNodes(n=40, trace=True, sum_input=True)
out = LIFNodes(n=6, sum_input=True) # n=6 same numbers of laver(0~5)

inpt_middle = Connection(source=inpt, target=middle, wmin=0, wmax=1e-1)
middle_center = Connection(source=middle, target=center, wmin=0, wmax=1e-1)
center_final = Connection(source=center, target=final, wmin=0, wmax=1e-1)
final_out = Connection(source=final, target=out, wmin=0, wmax=1e-1)

network.add_layer(inpt, name='A')
network.add_layer(middle, name='B')
network.add_layer(center, name='C')
network.add_layer(final,  name='D')
network.add_layer(out, name='E')

foward_connection = Connection(source=inpt, target=middle, w=0.05 + 0.1*torch.randn(inpt.n, middle.n))
network.add_connection(connection=foward_connection, source="A", target="B")
foward_connection = Connection(source=middle, target=center, w=0.05 + 0.1*torch.randn(middle.n, center.n))
network.add_connection(connection=foward_connection, source="B", target="C")
foward_connection = Connection(source=center, target=final, w=0.05 + 0.1*torch.randn(center.n, final.n))
network.add_connection(connection=foward_connection, source="C", target="D")
foward_connection = Connection(source=final, target=out, w=0.05 + 0.1*torch.randn(final.n, out.n))
network.add_connection(connection=foward_connection, source="D", target="E")
recurrent_connection = Connection(source=out, target=out, w=0.025*(torch.eye(out.n)-1),)
network.add_connection(connection=recurrent_connection, source="E", target="E")

inpt_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
middle_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
center_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
final_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
out_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)

network.add_monitor(monitor=inpt_monitor, name="A")
network.add_monitor(monitor=middle_monitor, name="B")
network.add_monitor(monitor=center_monitor, name="C")
network.add_monitor(monitor=final_monitor, name="D")
network.add_monitor(monitor=out_monitor, name="E")

for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time)
    network.add_monitor(m, name=l)


npzfile = np.load("C:/Users/name/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile['x'] 
y = npzfile['y'] 
x = torch.from_numpy(x).clone() 
y = torch.from_numpy(y).clone()

grads = {}
lr, lr_decay = 1e-2, 0.95
criterion = torch.nn.CrossEntropyLoss() 
spike_ims, spike_axes, weight_im = None, None, None

for i,(x,y) in enumerate(zip(x.view(-1,64), y)):
    inputs = {'A': x.repeat(time, 1),'E_b': torch.ones(time, 1)}
    network.run(inputs=inputs, time=time)
    y = torch.tensor(y).long()
    spikes = {l: network.monitors[l].get('s') for l in network.layers}
    summed_inputs = {l: network.layers[l].summed for l in network.layers}
   
    output = spikes['E'].sum(-1).float().softmax(0).view(1,-1)
    predicted = output.argmax(1).item()
    
    grads['dl/df'] = summed_inputs['E'].softmax(0)
    grads['dl/df'][y] -= 1 ☚ error
    grads['dl/dw'] = torch.ger(summed_inputs['A'], grads['dl/df'])
    network.connections['A','B','C','D','E'].w -= lr*grads['dl/dw']
   
    if i > 0 and i % 300 == 0:
       lr = lr_decay
    network.reset_()

Could you post the error message with the complete stack trace?
It should point to the line of code, which raises the error, and you could then check the shape of the tensor inside this method to make sure it really has more than a single dimension.

The error statement is as follows.
Is it working only when the index is 0?
Could you just tell me?

grads['dl/df'][y] -= 1
IndexError: index 1 is out of bounds for dimension 0 with size 1
print(y.size())
torch.Size([19573, 6])

In your code snippet it seems that grads['dl/df'] has the shape [1, *], so if you want to index it in dim0 you can only use an index of 0.

In other words, should I change the way grads [‘dl / df’] is written?

It depends what you are trying to achieve, i.e. what exactly should grads contain and what should y index? At the moment the shapes and indices do not match and thus the error is raised.

I wrote it to find the gradient of the input y data (label).
In this case, the gradient between the output obtained by the softmax function in the last layer (named E) and the input label is calculated.

Should I call it by specifying dim?

I would recommend to check the shape of grads['dl/df'] and make sure to index it in the right dimensions with the right indices.

confirmed the shape of grads [‘dl / df’].
The shape was [1,6]. Since the shape of y was [6], grads [‘dl / df’] = torch.squeeze (grads [‘dl / df’], dim = 0) was executed.
Thanks to your advice, this index error has been fixed.