Why does my torch convolutional NN return same outputs?

I am working with tutorial for convolutional neural nets on pytorch and using MNIST dataset as example. But I was getting same outputs for all labels, net always returns same tensor as result. I’ve decided to reduce complexity of net and decrease num of epoch, so here is my code:

from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")

plt.ion()   # interactive mode


class DigitsDataset(Dataset):

    def __init__(self, csv_file, transform=None):
        """
        Args:
            csv_file (string): Path to the csv file.

            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.digits_frame = pd.read_csv(csv_file)

        self.transform = transform

    def __len__(self):
        return len(self.digits_frame)

    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        label = digits_df.iloc[idx, 0]
        digit_pixels = digits_df.iloc[idx, 1:]
        digit_pixels = np.asarray(digit_pixels)
        digit_pixels = digit_pixels.astype('float').reshape(28, 28)
        sample = {'label' : label, 'image' : digit_pixels}
        if self.transform:
            sample['image'] = self.transform(sample['image'])

        return sample


class GrayScaleTransform:
    ''' Scale intensity from [0,255] to [0,1]'''
    def __init__(self, new_min, new_max):
        self.new_min = new_min
        self.new_max = new_max

    def __call__(self, x):
        return (x) * (self.new_max - self.new_min) / (255) + self.new_min


min_max_transform = GrayScaleTransform(new_min = 0, new_max = 1)

train_dataset = DigitsDataset(csv_file='data/train.csv', transform = min_max_transform)
test_dataset = DigitsDataset(csv_file='data/test.csv', transform = min_max_transform)

train_loader = DataLoader(train_dataset)
test_loader = DataLoader(test_dataset)

learning_rate = 0.1
num_epochs = 2

from torch import nn
class ConvNet(nn.Module): 
    def __init__(self): 
        super(ConvNet, self).__init__() 
        self.layer1 = nn.Sequential( nn.Conv2d(1, 4, kernel_size=5, stride=1, padding=2), 
        nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) 
        self.layer2 = nn.Sequential( nn.Conv2d(4, 8, kernel_size=5, stride=1, padding=2), 
        nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) 
        self.drop_out = nn.Dropout() 
        self.fc1 = nn.Linear(7 * 7 * 8, 10) 
    def forward(self, x): 
        out = self.layer1(x) 
        out = self.layer2(out) 
        out = out.reshape(out.size(0), -1) 
        out = self.drop_out(out) 
        out = self.fc1(out) 
        return out

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = ConvNet()
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

total_step = len(train_loader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
    for i, sample in enumerate(train_loader):
        # Прямой запуск
        img = sample['image'].view(-1, 1, 28, 28).float().to(device) 
        label = sample['label'].to(device)
        output = model(img)
        loss = criterion(output, label)
        loss_list.append(loss.item())

        # Обратное распространение и оптимизатор
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

But I am still getting the same tensor as output. What am I doing wrong? Why am I always getting the same output?

I would recommend to play around with the learning rate and e.g. lower it to see if your model is stuck from the beginning.
It looks like you are working on the MNIST dataset?