How to use batch in LSTM

Hi everyone.
I am an absolute beginner of Neural Network and would like to try to use LSTM for predicting the last point of noised sin curve at first.
But, I am confused about how to use batch method because the prediction accuracy used batch is worse than one not used batch.
In the case of that total training data and epochs are 100 and batch is off, the accuracy reaches 100% about epochs=50, however, batch size=10, it reaches 25% about epochs=100 at best.
On top of that, training data is 1000, the accuracy reaches 100% about epochs=50
I think the reason why this is that optimized method is iterated for 100 cycles since training data is 1000.
But, if it is true, the calculation unused batch is better than that of used batch and I am confusing.
So, could you give me some advises?

My easy code is shown as below and “batch_size” is bigger than 1 if batch method is used.

class Net(nn.Module):
“”“LSTM”""
def init(self, inputDim, hiddenDim, outputDim):
super(Net, self).init()
self.rnn = nn.LSTM(input_size=inputDim,batch_first=True)
self.output_layer = nn.Linear(hiddenDim, outputDim)

def forward(self, inputs, hidden0=None):
    output, (hidden, cell) = self.rnn(inputs, hidden0)
    output = self.output_layer(output[:, -1, :])
    return output

def mkDataSet(data_size, data_length=50, freq=60., noise=0.02):
train_x = []
train_t = []

for offset in range(data_size):
    train_x.append([[math.sin(2 * math.pi * (offset + i) / freq) +np.random.normal(loc=0.0, scale=noise)] for i in range(data_length)])
    train_t.append([math.sin(2 * math.pi * (offset + data_length) / freq)])

return train_x, train_t

def mkBatch(train_x, train_t, batch_size, step):
batch_x = []
batch_t = []

for i in range(batch_size):
    idx = step * batch_size + i
    batch_x.append(train_x[idx])
    batch_t.append(train_t[idx])

return torch.FloatTensor(batch_x), torch.FloatTensor(batch_t)

def main():
“”“main routine”""
training_size = 100
epochs_num = 100
hidden_size = 5
batch_size = 10
train_x, train_t = mkDataSet(training_size)
model = Net(1, hidden_size, 1)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(epochs_num):
running_loss = 0.0
training_accuracy = 0.0

    for i in range(int(training_size / batch_size)):
        #Initialize
        optimizer.zero_grad()
        data, label = mkBatch(train_x, train_t, batch_size, i)
        output = model(data)
        loss = criterion(output, label)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        training_accuracy += np.sum(
            np.abs((output.data - label.data).numpy()) < 0.1)

return 0