my data is[1,7*7].when I run my code with resnet.
RuntimeError: Need input.size[1] == 1 but got 3 instead.
Exception ignored in: <function WeakValueDictionary.init..remove at 0x7f3821373a60>
Traceback (most recent call last):
File “/home/zw/ztpytorch/pytorch/lib/python3.5/weakref.py”, line 117, in remove
TypeError: ‘NoneType’ object is not callable
my code like this:
import torchvision
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import math
EPOCH=5
BATCH_SIZE=256
train_data = torchvision.datasets.ImageFolder(’/home/zw/ztfd_data/delay_0_1/train’,
transform=transforms.ToTensor()
)
test_data = torchvision.datasets.ImageFolder(’/home/zw/ztfd_data/delay_0_1/test’,
transform=transforms.ToTensor()
)
#print(len(train_data))
#print(len(test_data))
train_loader=torch.utils.data.DataLoader(train_data,batch_size=256,shuffle=True)
test_loader=torch.utils.data.DataLoader(test_data,batch_size=256,shuffle=True)
#print(len(train_loader))
#print(len(train_loader))
class Bottleneck(nn.Module):
def init(self, nChannels, growthRate):
super(Bottleneck, self).init()
interChannels = 4*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1,stride=1,padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3,stride=1,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
def init(self, nChannels, growthRate):
super(SingleLayer, self).init()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3,stride=1,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def init(self, nChannels, nOutChannels):
super(Transition, self).init()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1,stride=1,padding=0,
bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def init(self, growthRate=12, depth=16, reduction=0.5, nClasses=5, bottleneck=True):
super(DenseNet, self).init()
nDenseBlocks = (depth-4) // 3
if bottleneck:
nDenseBlocks //= 2
nChannels = 2*growthRate
self.conv1 = nn.Conv2d(1, nChannels, kernel_size=3, padding=1,stride=1,
bias=False)
self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans1 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans2 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.fc = nn.Linear(nChannels, nClasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(Bottleneck(nChannels, growthRate))
else:
layers.append(SingleLayer(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 3))
out = F.log_softmax(self.fc(out))
return out
densenet=DenseNet(growthRate=12, depth=16, reduction=0.5, nClasses=5, bottleneck=True).cuda()
print(densenet)
print(‘Number of model parameters: {}’.format(
sum([p.data.nelement() for p in densenet.parameters()])))
optimizer = optim.SGD(densenet.parameters(), lr=0.1, momentum=0.9,weight_decay=1e-4,nesterov=False)
criterion = torch.nn.CrossEntropyLoss()
def train(densenet, train_loader, EPOCH):
densenet.train()
for epoch in range(EPOCH):
for step, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = densenet(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if step % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, step * len(data), len(train_loader.dataset),
100. * step / len(train_loader), loss.data[0]))
def test(densenet, test_loader):
densenet.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = densenet(data)
test_loss += criterion(output, target).data[0] # Variable.data
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if name == “main”:
train(densenet, train_loader, EPOCH=5)
test(densenet, test_loader)
PLEASE HELP ME!
Thank you very much!!!