Inputs must be non-negative

I used this cord. So, I transformed datum from input(x).
But, error happed. I want to know the way to resolve this error. Please tell me.

error sentence

raceback (most recent call last):
  File "C:/Users/name/Desktop/myo-python-1.0.4/bindsnet-master/bindsnet/nextrsnn.py", line 106, in <module>
    for i, (datum, y_train) in enumerate(loader):
  File "C:\Python36\lib\site-packages\bindsnet\encoding\loaders.py", line 54, in poisson_loader
    yield poisson(datum=data[i], time=time, dt=dt)
  File "C:\Python36\lib\site-packages\bindsnet\encoding\encodings.py", line 104, in poisson
    assert (datum >= 0).all(), "Inputs must be non-negative"
AssertionError: Inputs must be non-negative

cord

import torch
import torch.nn as nn
import numpy as np
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
from sklearn.model_selection import train_test_split
from bindsnet.encoding import poisson_loader
import matplotlib.pyplot as plt

class LogisticRegression(nn.Module):
    def __init__(self, input_size, num_classes):
        super(LogisticRegression, self).__init__()
        self.linear = nn.Linear(input_size, num_classes)
        self.dropout = nn.Dropout(0.5)

    def forward(self, x,):
        x = x.view(-1, 64)
        return self.linear(x)

input_size = 64
num_classes = 6
time = 64     
num_epochs = 6
network = Network(dt=1.0)
_BATCH_SIZE = 300

inpt = Input(64, shape=(1, 64)); network.add_layer(inpt, name='A')
middle = LIFNodes(n=40, thresh=-52 + torch.randn(40)); network.add_layer(middle, name='B')
center = LIFNodes(n=40, thresh=-52 + torch.randn(40)); network.add_layer(middle, name='C')
final = LIFNodes(n=40, thresh=-52 + torch.randn(40)); network.add_layer(middle, name='D')
output = LIFNodes(n=40, thresh=-52 + torch.randn(40)); network.add_layer(middle, name='E')
network.add_connection(Connection(inpt, middle, w=torch.randn(inpt.n, middle.n)), 'A', 'B')
network.add_connection(Connection(middle, center, w=torch.randn(middle.n, center.n)), 'B', 'C')
network.add_connection(Connection(center, final, w=torch.randn(center.n, final.n)), 'C', 'D')
network.add_connection(Connection(final, output, w=torch.randn(final.n, output.n)), 'D', 'E')
network.add_connection(Connection(output, output, w=torch.randn(output.n, output.n)), 'E', 'E')

inpt_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
middle_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
center_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
final_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
out_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)

network.add_monitor(monitor=inpt_monitor, name="A")
network.add_monitor(monitor=middle_monitor, name="B")
network.add_monitor(monitor=center_monitor, name="C")
network.add_monitor(monitor=final_monitor, name="D")
network.add_monitor(monitor=out_monitor, name="E")

for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time)
    network.add_monitor(m, name=l)

npzfile = np.load("C:/Users/name/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile['x']
y = npzfile['y']
x_train, x_test = train_test_split(x, test_size=0.3)
y_train, y_test = train_test_split(y,test_size=0.3)
x_train = torch.from_numpy(x_train).float()
y_train = torch.from_numpy(y_train).float()
loader = zip(poisson_loader(x_train * 0.20, time=20), iter(y_train))
training_pairs = []
for i, (datum, y_train) in enumerate(loader):
    network.run(inpts={'A': datum}, time=64)
    training_pairs.append([network.monitors['E'].get('s').sum(-1), y_train])
    network.reset_state_variables()
    if (i + 1) % 50 == 0: print('Train progress: (%d / 500)' % (i + 1))
    if (i + 1) == 500: print(); break

    model = LogisticRegression(input_size, num_classes); criterion = nn.CrossEntropyLoss()  
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

for epoch in range(num_epochs):
    for i, (s, y_train) in enumerate(training_pairs):
        optimizer.zero_grad(); output = model(s.float())
        y = torch.reshape(y_train, (-1,))
        y_train = y_train.view(-1, 6)
        y_train = torch.argmax(y_train, dim=-1)
        loss = criterion(output,  y_train.long())
        loss.backward(); optimizer.step()


x_test = torch.from_numpy(x_test).float()
y_test = torch.from_numpy(y_test).long()
test_pairs = []

for i, (x_test, y_test) in enumerate(zip(x_test.view(-1, 64), y_test)):
    inputs = {'A': x_test.repeat(time, 1), 'E_b': torch.ones(time, 1)}
    network.run(inputs=inputs, time=time)
    test_pairs.append([network.monitors['E'].get('s').sum(-1), y_test])
    network.reset_state_variables()

    if (i + 1) % 50 == 0: print('Test progress: (%d / 500)' % (i + 1))
    if (i + 1) == 500: print(); break

correct, total = 0, 0
for s, y_test in test_pairs:
    output = model(s.float()); _, predicted = torch.max(output.data, 1)
    total += 1
    #print(y_test.long())
    y_test = torch.argmax(y_test, dim=-1)
    correct += int(predicted == y_test.long())
    accuracy = 100 * correct / total
    #print(y_test.long())

print('Accuracy of logistic regression on test examples: %2f %%\n ' % (100 * correct / total))
print("Iteration: {}. Loss: {}. Accuracy: {}.".format(iter, loss.item(), accuracy))
torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': loss,
            }, "C:/Users/name/Desktop/myo-python-1.0.4/bindsnet-master/bindsnet/pytorchsession/snn")

The bindsnet implementation seems to expect a non-negative input, while your input tensor apparently contains negative values.
You could clip them or process the data in any way such that it contains non-negative values.