Training isn't working, CNN classifier

Hello! I am trying to build a binary classifier using a convolutional neural network architecture. I have finished writing the model and dataset, and the training and validation loops. Everything seems fine when I train it, the parameters in the network are registered correctly, predictions are being generated etc… However, my validation accuracy does not budge from 50%, ie chance. This is strange, as I’m training and validating it on a cats and dogs training set that I know should work for this architecture. Any ideas what may be causing this error?

CNN architecture:

class ConvNet : public torch::nn::Module {
	public:
		ConvNet(){
			register_module("layer1", layer1);
			register_module("layer2", layer2);
			register_module("fc", fc);
		};
		torch::Tensor forward(torch::Tensor x){
			x = layer1->forward(x);
			x = layer2->forward(x);
			x = x.view({-1, x.size(1) * x.size(2) * x.size(3)});
			x = fc->forward(x);
			return x;
		};

	private:
		torch::nn::Sequential layer1{
			torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 64, 3).stride(1).padding(1)),
			torch::nn::ReLU(),
			torch::nn::MaxPool2d(torch::nn::MaxPool2dOptions({2, 2}).stride(2))
			};

		torch::nn::Sequential layer2{
			torch::nn::Conv2d(torch::nn::Conv2dOptions(64, 64, 3).stride(1).padding(1)),
			torch::nn::ReLU(),
			torch::nn::MaxPool2d(torch::nn::MaxPool2dOptions({2, 2}).stride(2))
			};

		torch::nn::Sequential fc{
			torch::nn::Linear(torch::nn::LinearOptions(64 * 50 * 125, 64)),
			torch::nn::ReLU(),
			torch::nn::Linear(torch::nn::LinearOptions(64, 1)),
			torch::nn::Sigmoid()
			};
			
};
Training loop:

void trainModel(std::shared_ptr cnn, std::string folder1, std::string folder2){

cout << "training.." << endl;
cnn->train();
double learning_rate = 0.01;
torch::optim::Adam optimizer(cnn->parameters(), torch::optim::AdamOptions(learning_rate));
auto lossFunction = torch::nn::BCELoss();

int batch_size = 1;
auto dataset = CustomDataset(folder1, folder2).map(torch::data::transforms::Stack<>());
cout << "dataset made" << endl;
auto data_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(dataset, batch_size);
int counter = 0;
for (auto& batch : *data_loader){
	optimizer.zero_grad();
	torch::Tensor outputs = cnn->forward(batch.data);
	auto loss = lossFunction(outputs, batch.target);
	loss.backward();
	optimizer.step();
	if (counter < 20){
		counter++;
	}
	else{
		cout << loss << endl;
		counter = 0;
	}
}

torch::save(cnn, "cnn.pt");

}

Validation loop:

void testModel(std::shared_ptr cnn, std::string folder1, std::string folder2){

cout << "testing.." << endl;
auto dataset = CustomDataset(folder1, folder2).map(torch::data::transforms::Stack<>());
auto data_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(dataset, 1 /*batch size*/);
cout << "dataset made" << endl;
cnn->eval();
double correct = 0;
size_t total =  dataset.size().value();
for (auto &batch : *data_loader){
	torch::Tensor outputs = cnn->forward(batch.data);
	correct += outputs.eq(batch.target).sum().template item<int64_t>();	
}
double percentage = correct/total;
cout << "size of validation sample: " << total << endl;
cout << "validation accuracy: " << percentage << endl;

}

Is your training loss going down? LR of 0.01 is generally a bit high for Adam in my experience.

I edited my loss metric to reflect the algorithm given in the “training a classifier” example, and changed LR to 0.001. Alas, the loss does not seem to be decreasing systematically.