can you help me??
I’m trying to write a code about disease diagnosis through retinal images. I try different codes but I get errors separately from all of them
first code
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.optim as optim
import numpy as np
train_transforms = transforms.Compose([transforms.Grayscale(),
transforms.Resize(255),
transforms.RandomRotation(38),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.5],[0.5])])
test_transforms=transforms.Compose([transforms.Grayscale(),
transforms.Resize(255),
transforms.CenterCrop(244),
transforms.ToTensor()])
train_dataset=torchvision.datasets.ImageFolder(root=‘C:/OCT2017/train’, transform=train_transforms)
test_dataset=torchvision.datasets.ImageFolder(root=‘C:/OCT2017/test’, transform=test_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = 100, shuffle = False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = 100, shuffle = False)
class NN(nn.Module):
def init(self, input_size, num_classes):
super(NN.self).init()
self.fc1=nn.Linear(input_size,50)
def forward(self,x):
x=F.relu(self.fc1(x))
return x
class OCTModel(nn.Module):
def init(self,in_channels=1, num_classes =4):
super(OCTModel, self).init()
# Convolution 1
self.conv1= nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3,3), stride=(1))
self.relu1 = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=2)
self.dropout1=nn.Dropout(p=0.2)
self.relu2=nn.ReLU()
self.fc1 = nn.Linear(256,num_classes)
def forward(self, x):
# Convolution 1
x = self.conv1(x)
x = self.relu1(x)
x = self.pool(x)
x = self.dropout1(x)
x = self.relu2(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)
in_channels=1
num_classes=4
learning_rate=0.001
batch_size=10
num_epochs=1
model = OCTModel().to(device)
Criterion=nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range (num_epochs):
for batch_idx, (data,targets) in enumerate(train_loader,0):
data=data.to(device=device)
targets=targets.to(device=device)
scores=model(data)
loss = Criterion(scores,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy (loader, model):
if loader.datasets.train:
print (“Checking accuracy on training data”)
else:
print(“Checking accuary on test data”)
num_correct=0
num_samples=0
model.eval()
with torch.no_grad():
for x, y in loader:
x=x.to(device=device)
y=y.to(device=device)
scores=model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(f’Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}’)
model.train()
check_accuracy(train_loader,model)
check_accuracy(test_loader,model)
RuntimeError: CUDA out of memory. Tried to allocate 152.00 MiB (GPU 0; 2.00 GiB total capacity; 1.20 GiB already allocated; 121.74 MiB free; 9.86 MiB cached)
second code
class OCTModel(nn.Module):
def init(self,in_channels=1, num_classes =4):
super(OCTModel, self).init()
# Convolution 1
self.conv1= nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3,3), stride=(1))
self.relu1 = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=2)
self.dropout1=nn.Dropout(p=0.2)
self.relu2=nn.ReLU()
self.fc1 = nn.Linear(256,num_classes)
def forward(self, x):
# Convolution 1
x = self.conv1(x)
x = self.relu1(x)
x = self.pool(x)
x = self.dropout1(x)
x = self.relu2(x)
x = self.fc1(x)
return x
device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)
input_size=784
num_classes = 4
learning_rate=0.001
batch_size=32
num_epochs=1
train_transforms = transforms.Compose([transforms.Grayscale(),
transforms.Resize(255),
transforms.RandomRotation(38),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.5],[0.5])])
test_transforms=transforms.Compose([transforms.Grayscale(),
transforms.Resize(255),
transforms.CenterCrop(244),
transforms.ToTensor()])
train_dataset=torchvision.datasets.ImageFolder(root=‘C:/OCT2017/train’, transform=train_transforms)
test_dataset=torchvision.datasets.ImageFolder(root=‘C:/OCT2017/test’, transform=test_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = 32, shuffle = False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = 32, shuffle = False)
model = OCTModel().to(device)
Criterion=nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range (num_epochs):
for batch_idx, (data,targets) in enumerate(train_loader,0):
data=data.to(device=device)
targets=targets.to(device=device)
data = data.reshape(data.shape[0],-1)
scores=model(data)
loss = Criterion(scores,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy (loader, model):
if loader.datasets.train:
print (“Checking accuracy on training data”)
else:
print(“Checking accuary on test data”)
num_correct=0
num_samples=0
model.eval()
with torch.no_grad():
for x, y in loader:
x=x.to(device=device)
y=y.to(device=device)
x=x.reshape(x.shape[0],-1)
scores=model(x.unsqueeze(dim=1))
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(f’Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}’)
model.train()
check_accuracy(train_loader,model)
check_accuracy(test_loader,model)
RuntimeError: Expected 4-dimensional input for 4-dimensional weight 32 1, but got 2-dimensional input of size [32, 50176] instead