Hi, I am trying to run CNN model on synthetic dataset consisting of semi simple random data points generated by following code snippet followed by the code snippet of CNN model and its training and at the end is received error message. Whenever i change batch size it effects the expected input size i don’t understand how? Kindly guide me through…!
X1, Y1 = make_classification(n_samples=4000, n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1, n_classes=2)
plt.scatter(X1[:, 0], X1[:, 1], marker=‘o’, c=Y1, s=25, edgecolor=‘k’)
x = torch.from_numpy(X1).to(device, dtype=torch.float)
y = torch.from_numpy(Y1).to(device, dtype=torch.long)
y = y.unsqueeze(1).repeat(1,1)
class Net(nn.Module): def __init__(self, Bias): super(Net, self).__init__() self.conv1 = nn.Conv1d(1, 16, kernel_size=1, stride=1) self.relu = nn.ReLU() self.fc1 = nn.Linear(2*1, 16, bias = Bias) self.fc2 = nn.Linear(16, 1, bias = Bias) def forward(self, x): x = self.relu(self.conv1(x)) x = x.reshape(x.size(0), -1) x = self.relu(self.fc1(x)) x = self.fc2(x) return x biases = False epochs = 50 alpha = 1e-2 batch_size = 16 criterion = nn.CrossEntropyLoss() model = Net(biases).to(device) model=train(model.to(device),criterion,x,y,alpha,epochs,batch_size)def dataloader(x, y):
x = x.float()
y = y.float()
data_train = torch.cat((x, y), dim=1)
train_size = int(0.8 * len(data_train))
test_size = len(data_train) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(data_train, [train_size, test_size])
train_dataset = data_train[train_dataset.indices,:]
test_dataset = data_train[test_dataset.indices,:]
trainx = train_dataset [:,0:train_dataset.shape[1]-1]
trainy = train_dataset [:,train_dataset.shape[1]-1]
testx = test_dataset [:,0:test_dataset.shape[1]-1]
testy = test_dataset [:,test_dataset.shape[1]-1]
return trainx, trainy, testx,testy
def train(model,criterion, x, y, alpha ,epochs,batchsize): costs = [] optimizer = torch.optim.SGD(model.parameters(), lr=alpha) trainx, trainy, testx,testy= dataloader(x,y) x=trainx.float() y=trainy.float().unsqueeze(1) data_train = torch.cat((x, y), dim=1) data_train_loader = DataLoader(data_train, batch_size=batchsize, shuffle=True) model.train() j = 0 for i in range(epochs): for index,samples in enumerate(data_train_loader): j += 1 x1=samples[:,0:2] y1=samples[:,2].long().reshape(-1,1) if (j%50 == 0): model.eval() acc = accuracy(model,testx,testy) print(f'Test accuracy is #{acc:.2f} , Iteration number is = {j}') model.train() cost = criterion(model(x1), y1.long()) optimizer.zero_grad() cost.backward() optimizer.step() costs.append(float(cost)) return model def accuracy(model,x,y): prediction = model(x).detach().cpu().numpy() return (np.sum(np.argmax(prediction,axis=1).reshape(-1,1)==np.array(y.cpu().numpy()).reshape(-1,1))/len(y.cpu().numpy()))*100
RuntimeError: Given groups=1, weight of size [16, 1, 1], expected input[1, 16, 2] to have 1 channels, but got 16 channels instead