I want to know the cause of the low accuracy of the neural network which based on logistic regresion

I created a neural network using the logistic regression implemented by pytorch as a model, but the accuracy is low.
I want to know the cause. So Where should I check ?

import torch
import torch.nn as nn
import numpy as np
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
from sklearn.model_selection import train_test_split
from bindsnet.encoding import poisson_loader
import matplotlib.pyplot as plt
# difine of model
class LogisticRegression(nn.Module):
    def __init__(self, input_size, num_classes):
        super(LogisticRegression, self).__init__()
        self.linear = nn.Linear(input_size, num_classes)
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        x = x.view(-1, 64)
        return self.linear(x)
network = Network(dt=1.0)
input_size = 64
num_classes = 6
time = 64      
num_epochs = 10
network = Network(dt=1.0)
_BATCH_SIZE = 300

inpt = Input(64, shape=(1,64)); network.add_layer(inpt, name='A')
middle = LIFNodes(900, thresh=-52 + torch.randn(900)); network.add_layer(middle, name='B')
center = LIFNodes(900, thresh=-52 + torch.randn(900)); network.add_layer(center, name='C')
final = LIFNodes(900, thresh=-52 + torch.randn(900)); network.add_layer(final, name='D')
output = LIFNodes(6, thresh=-52 + torch.randn(6)); network.add_layer(output, name='E')

network.add_connection(Connection(inpt, middle, w=torch.randn(inpt.n, middle.n)), 'A', 'B')
network.add_connection(Connection(middle, center, w=torch.randn(middle.n, center.n)), 'B', 'C')
network.add_connection(Connection(center, final, w=torch.randn(center.n, final.n)), 'C', 'D')
network.add_connection(Connection(final, output, w=torch.randn(final.n, output.n)), 'D', 'E')
network.add_connection(Connection(output, output, w=torch.randn(output.n, output.n)), 'E', 'E')

inpt_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
middle_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
center_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
final_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)
out_monitor = Monitor(obj=inpt, state_vars=("s", "v"), time=500,)

# connecting Monitor to network
network.add_monitor(monitor=inpt_monitor, name="A")
network.add_monitor(monitor=middle_monitor, name="B")
network.add_monitor(monitor=center_monitor, name="C")
network.add_monitor(monitor=final_monitor, name="D")
network.add_monitor(monitor=out_monitor, name="E")

for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time)
    network.add_monitor(m, name=l)


npzfile = np.load("C:/Users/tazawa/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile['x']
y = npzfile['y']
x_train, x_test = train_test_split(x, test_size=0.3)
y_train, y_test = train_test_split(y,test_size=0.3)
# converting tensor
x_train = torch.from_numpy(x_train).float()
y_train = torch.from_numpy(y_train).float()
x_train = torch.clamp(x_train,min=0,max=100)
loader = zip(poisson_loader(x_train * 0.20, time=20), iter(y_train))

training_pairs = []
for i, (datum, y_train) in enumerate(loader):
    inputs = {'A': datum.repeat(time, 1), 'E_b': torch.ones(time, 1)}
    network.run(inputs=inputs, time=time)
    #network.run(inputs={'A':datum}, time=250)
    training_pairs.append([network.monitors['E'].get('s').sum(-1), y_train])
    network.reset_state_variables()
    if (i + 1) % 500 == 0: print('Train progress: (%d / 10000)' % (i + 1)) 
    if (i + 1) == 10000: print(); break

    model = LogisticRegression(input_size, num_classes); criterion = nn.CrossEntropyLoss()   
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
# training spikes and labels
for epoch in range(num_epochs):
    for i, (s, y_train) in enumerate(training_pairs):
        optimizer.zero_grad(); output = model(s.float().softmax(0))
        y = torch.reshape(y_train, (-1,))
        y_train = y_train.view(-1, 6)
        y_train = torch.argmax(y_train, dim=-1)
        loss = criterion(output,  y_train.long())
        loss.backward(); optimizer.step()

x_test = torch.from_numpy(x_test).float()
y_test = torch.from_numpy(y_test).long()
x_test = torch.clamp(x_test,min=0,max=100)
loader = zip(poisson_loader(x_test * 0.20, time=20), iter(y_test))
test_pairs = []
model.eval()
for i, (datum, y_test) in enumerate(loader):
    inputs = {'A': datum.repeat(time, 1), 'E_b': torch.ones(time, 1)}
    network.run(inputs=inputs, time=time)
    test_pairs.append([network.monitors['E'].get('s').sum(-1), y_test])
    network.reset_state_variables()

    if (i + 1) % 500 == 0: print('Test progress: (%d / 1000)' % (i + 1))
    if (i + 1) == 1000: print(); break

correct, total = 0, 0
for s, y_test in test_pairs:
    output = model(s.float().softmax(0)); _, predicted = torch.max(output.data.softmax(0), 1)
    total += 1
    y_test = torch.argmax(y_test, dim=-1)
    correct += int(predicted == y_test.long())
    accuracy = 100 * correct / total
    print('Accuracy of logistic regression on test examples: %2f %%\n ' % (100 * correct / total))
    print("Iteration: {}. Loss: {}. Accuracy: {}.".format(iter, loss.item(), accuracy))

You could try to overfit a small dataset, e.g. just 10 samples, by playing around with the hyperparameters and check, if this simple model is able to learn the data. Once this is done you could scale up the use case again by using more data.

I checked my cord. I noticed that some value was not changed despite for sentence and first value of predicted was not same next value of predicted.
I want to know the way to fix it.
tensor([[-0.2721, -0.0676, 0.4114, 0.4027, -0.5927, 0.3291]],
grad_fn=)
tensor([2])
tensor([0, 1, 0, 0, 0, 0])
tensor([2])
tensor(1)
tensor([[-0.2721, -0.0676, 0.4114, 0.4027, -0.5927, 0.3291]],
grad_fn=)
tensor([2])
tensor([0, 0, 1, 0, 0, 0])
tensor([2])
tensor(2)

correct, total = 0, 0
for s, y_test in test_pairs:
    output = model(s.float()); _, predicted = torch.max(output.data, 1) # ここが違う
    total += 1
    print(output)
    print(predicted)
    print(y_test)
    y_test = torch.argmax(y_test)
    correct += int(predicted == y_test.long())
    accuracy = 100 * correct / total
    print(predicted)
print('Accuracy of logistic regression on test examples: %2f %%\n ' % (100 * correct / total))

The accuracy is still in the 20% range, is this normal?

No, if your current model and training routine cannot overfit the small dataset, then there might be some other issues in the code or the model is not suitable for this use case and you would need to modify its architecture.

1 Like

I noticed that spikes can not fire because the value of thresh was too high.
Thus, I changed the value of thresh from -52 to -60.
As a result, the accuracy raised at 40%.