```
import time as t
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from random import randint
import numpy as np
data = np.load('cvd.npy',allow_pickle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.mp = nn.MaxPool2d(2)
self.fc = nn.Linear(1620, 2)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = F.relu(self.mp(self.conv2(x)))
x = x.view(in_size, -1) # flatten the tensor
x = self.fc(x)
return F.log_softmax(x,dim = 1)
net = Net()
optimizer = optim.Adam(net.parameters(), lr=(1.0e-2))
counter = 0
losses = []
for epoch in range(1):
err = []
for d in data:
x,y = d
x = x/255 # normalizing the i/p image
optimizer.zero_grad()
X = torch.Tensor([[x]]) # a 1x50x50 tensor
Y = torch.Tensor([[y]]) # a 1x1 tensor
Y = Y.long()
output = net(X)
loss = F.nll_loss(output, Y[0])
losses.append(float(loss))
counter += 1
err.append(loss)
if counter%20 == 0: # backprop every 20 iters
k = torch.stack(err).mean()
counter = 0
err = []
k.backward()
optimizer.step()
print(loss)
```

I wrote this cnn to solve the cats v dogs dataset. The loss remains more or less constant for @ ~0.7.

This is my first crack at CNNs. I’m just training in a batch size of one initially. You can find the numpy file here -> https://drive.google.com/file/d/1WF_Bti7x2K93AmUrIje8RGVmrBco71_S/view?usp=sharing