import torch
from torch import nn, optim
import torch.nn.functional as F
import torchvision
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])), batch_size=1000, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 8, kernel_size=3)
self.conv1_bn = nn.BatchNorm2d(8)
self.conv2 = nn.Conv2d(8, 16, kernel_size=3)
self.conv2_bn = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.conv3_bn = nn.BatchNorm2d(32)
self.fc1 = nn.Linear(1, 100)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1_bn(self.conv1(x)), 2))
x = F.relu(F.max_pool2d(self.conv2_bn(self.conv2(x)), 2))
x = F.relu(F.max_pool2d(self.conv3_bn(self.conv3(x)), 2))
x = F.relu(self.fc1(x))
return self.fc2(x)
net = Net()
optimizer = optim.SGD(net.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
for epoch in range(3):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
I get the following RuntimeError: only batches of spatial targets supported (3D tensors) but got targets of dimension: 1