"softmax" not implemented for 'Long'

Currently, I am creating a neural network using bindsnet. I’m having trouble with the error “softmax” not implemented for’Long’. I converted a 1x64 array for x and a tensor type for y for 1x6, but I get an error. Does anyone know the correct way to write it?

import torch
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
import numpy as np
time = 25

network = Network()


inpt = Input(n=64,shape=[1,64], sum_input=True) 
middle = LIFNodes(n=40, trace=True, sum_input=True)
center = LIFNodes(n=40, trace=True, sum_input=True)
final = LIFNodes(n=40, trace=True, sum_input=True)
out = LIFNodes(n=6, sum_input=True) 
inpt_middle = Connection(source=inpt, target=middle, wmin=0, wmax=1e-1)
middle_center = Connection(source=middle, target=center, wmin=0, wmax=1e-1)
center_final = Connection(source=center, target=final, wmin=0, wmax=1e-1)
final_out = Connection(source=final, target=out, wmin=0, wmax=1e-1)


network.add_layer(inpt, name='A')
network.add_layer(middle, name='B')
network.add_layer(center, name='C')
network.add_layer(final,  name='D')
network.add_layer(out, name='E')

foward_connection = Connection(source=inpt, target=middle, w=0.05 + 0.1*torch.randn(inpt.n, middle.n))
network.add_connection(connection=foward_connection, source="A", target="B")
foward_connection = Connection(source=middle, target=center, w=0.05 + 0.1*torch.randn(middle.n, center.n))
network.add_connection(connection=foward_connection, source="B", target="C")
foward_connection = Connection(source=center, target=final, w=0.05 + 0.1*torch.randn(center.n, final.n))
network.add_connection(connection=foward_connection, source="C", target="D")
foward_connection = Connection(source=final, target=out, w=0.05 + 0.1*torch.randn(final.n, out.n))
network.add_connection(connection=foward_connection, source="D", target="E")
recurrent_connection = Connection(source=out, target=out, w=0.025*(torch.eye(out.n)-1),)
network.add_connection(connection=recurrent_connection, source="E", target="E")


inpt_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
middle_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
center_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
final_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
out_monitor = Monitor(obj=inpt, state_vars=("s","v"), time=500,)
# Monitorをネットワークに接続
network.add_monitor(monitor=inpt_monitor, name="A")
network.add_monitor(monitor=middle_monitor, name="B")
network.add_monitor(monitor=center_monitor, name="C")
network.add_monitor(monitor=final_monitor, name="D")
network.add_monitor(monitor=out_monitor, name="E")

for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time)
    network.add_monitor(m, name=l)


npzfile = np.load("C:/Users/name/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile['x']  #1×64
y = npzfile['y']   #1×6
 # tensor
x = torch.from_numpy(x).clone()
y = torch.from_numpy(y).clone()

grads = {}
lr, lr_decay = 1e-2, 0.95
criterion = torch.nn.CrossEntropyLoss()
spike_ims, spike_axes, weight_im = None, None, None

for i,(x,y) in enumerate(zip(x.view(-1,64), y)):
    inputs = {'A': x.repeat(time, 1),'E_b': torch.ones(time, 1)}
    network.run(inputs=inputs, time=time)
    y = torch.tensor(y).long()
    spikes = {l: network.monitors[l].get('s') for l in network.layers} 
    summed_inputs = {l: network.layers[l].summed for l in network.layers}
    # softmax
    output = spikes['E'].sum(-1).softmax(0).view(1,-1) ☚ error
    predicted = output.argmax(1).item()
    grads['dl/df'] = summed_inputs['E'].softmax(0)
    grads['dl/df'][y] -= 1
    grads['dl/dw'] = torch.ger(summed_inputs['A'], grads['dl/df'])
    network.connections['A','E'].w -= lr*grads['dl/dw']
    if i > 0 and i % 500 == 0:
       lr = lr_decay
    network.reset_()

The softmax method cannot be applied to LongTensors, as it would round them (and thus wouldn’t really make sense), so you should transform the incoming tensor to a FloatTensor via tensor = tensor.float().

Thank you. After running your code, I was able to solve this problem.