data_transforms = {
'train': transforms.Compose([
transforms.ToPILImage(),
transforms.RandomRotation(15),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]),
'valid': transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(224),
transforms.ToTensor(),
]),
}
class Dataset_load(Dataset):
def __init__(self, file_path, transform=None):
self.data = pd.read_csv(file_path)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image = self.data.iloc[index, 1:].values.astype(np.uint8).reshape((28,28,1))
label = self.data.iloc[index, 0]
if self.transform is not None:
image = self.transform(image)
return image , label
train_dataset=Dataset_load(train_dir,transform=data_transforms['train'])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset=Dataset_load(test_dir,transform=data_transforms['valid'])
test_loader =torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
#input depth , output depth , kernel size(filter)x
self.conv1=nn.Conv2d(1,16,kernel_size=(3, 3),padding=(1, 1),stride=(1, 1))
self.conv2=nn.Conv2d(16,32,kernel_size=(3, 3),padding=(1, 1),stride=(1, 1))
self.conv3=nn.Conv2d(32,64,kernel_size=(3, 3),padding=(1, 1),stride=(1, 1))
#padding for last conv layer
self.adapt = nn.AdaptiveMaxPool2d((4,4))
#padding layer
self.pool=nn.MaxPool2d(2,2)
#dropout layer
self.drop=nn.Dropout(p=0.2)
#fc layers
self.fc1=nn.Linear(64*4*4,512)
self.fc2=nn.Linear(512,256)
self.fc3=nn.Linear(256,128)
self.fc4=nn.Linear(128,24)
def forward(self,x):
x=self.pool(F.leaky_relu(self.conv1(x)))
x=self.pool(F.leaky_relu(self.conv2(x)))
x=self.adapt(F.leaky_relu(self.conv3(x)))
#flatten Images
x = x.view(x.size(0), -1)
x=self.drop(x)
x=F.leaky_relu(self.fc1(x))
x=self.drop(x)
x=F.leaky_relu(self.fc2(x))
x=self.drop(x)
x=F.leaky_relu(self.fc3(x))
x=self.drop(x)
x=self.fc4(x)
return F.log_softmax(x)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.BCEWithLogitsLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
def train(n_epochs=100):
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
#train
model.train()
for data, target in train_loader:
print(data.shape)
print(target.shape)
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_loader)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f}}'.format(
epoch, train_loss))
train(10)
torch.Size([32, 1, 28, 28])
torch.Size([32])
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:61: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-73-c813bd3a9aca> in <module>()
1 from torch.autograd import Variable
----> 2 train(10)
3 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
2159
2160 if not (target.size() == input.size()):
-> 2161 raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
2162
2163 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
ValueError: Target size (torch.Size([32])) must be the same as input size (torch.Size([32, 24]))