Hi,
i have this error in my code, how can i solve this error.
my code:
torch.manual_seed(1)
Hyper Parameters
EPOCH = 1
BATCH_SIZE = 500
LR = 0.001
path=’/Users/lionardo/Desktop/data/train’
#traindir = os.path.join(path, ‘train’)
train_dataset = torchvision.datasets.ImageFolder(root=path,transform=torchvision.transforms.ToTensor())
#Load our dataset
#define testdata
n = len(train_dataset)# total number of examples
train_data_set = transforms.ToPILImage(train_dataset)
#train_dataset=torchvision.transforms.functional.to_tensor(train_dataset)
n_test = int(0.1 * n) # take ~10% for test
test_x = torch.utils.data.Subset(train_dataset, range(n_test)) # take first 10%
#torch.unsqueeze(test_x, dim=1).type(torch.FloatTensor)[:200]/255.
#Make the dataset iterable
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
#define the network
class CNN(nn.Module):
def init(self):
super(CNN, self).init()
#mri lf layer 1
self.conv1 = nn.Sequential( # input shape (1, 28, 28)
nn.Conv2d(
in_channels=1, # input height
out_channels=16, # n_filters
kernel_size=5, # filter size
stride=1, # filter movement/step
padding=2 #
))
self.batch1 = nn.BatchNorm2d(16),
#output shape (16, 28, 28)
self.relu1 = nn.LeakyReLU(0.2, True) #activation
#mri hf layer 1
self.conv2 = nn.Sequential( # input shape (16, 28, 28)
nn.Conv2d(
in_channels = 16,
out_channels = 32,
kernel_size = 5,
stride = 1,
padding = 2
))
self.batch2 = nn.BatchNorm2d(32), #output shape (1,28,28)
self.relu2 = nn.LeakyReLU(0.2,True)
self.conv3 = nn.Sequential( # input shape (16, 28, 28)
nn.Conv2d(
in_channels = 32,
out_channels = 64,
kernel_size = 5,
stride = 1,
padding = 2
))
self.batch3 = nn.BatchNorm2d(64), #output shape (1,28,28)
self.relu3 = nn.LeakyReLU(0.2,True)
self.conv4 = nn.Sequential( # input shape (16, 28, 28)
nn.Conv2d(
in_channels = 64,
out_channels = 32,
kernel_size = 5,
stride = 1,
padding = 2
))
self.batch4 = nn.BatchNorm2d(32), #output shape (1,28,28)
self.relu4 = nn.LeakyReLU(0.2,True)
self.conv5 = nn.Sequential( # input shape (16, 28, 28)
nn.Conv2d(
in_channels = 32,
out_channels = 16,
kernel_size = 5,
stride = 1,
padding = 2
))
self.batch5 = nn.BatchNorm2d(16), #output shape (1,28,28)
self.relu5 = nn.LeakyReLU(0.2,True)
self.conv6 = nn.Sequential( # input shape (16, 28, 28)
nn.Conv2d(
in_channels = 16,
out_channels = 1,
kernel_size = 5,
stride = 1,
padding = 2
))
self.batch6 = nn.BatchNorm2d(1), #output shape (1,28,28)
self.relu6 = nn.LeakyReLU(0.2,True)
def forward(self, x,y):
#x1 = x.reshape(-1,1,28,28) #careful: need to change by yourself
x1= self.conv1(x)
x1 = self.batch1(x)
x1 = slef.relu1(x)
#x = x.reshape(-1,1,28,28) #careful: need to change by yourself
x1 = self.conv2(x)
x1 = self.batch2(x)
x1 = slef.relu2(x)
x1 = self.conv3(x)
x1 = self.batch3(x)
x1 = slef.relu3(x)
x1 = self.conv4(x)
x1 = self.batch4(x)
x1 = slef.relu4(x)
x1 = self.conv5(x)
x1 = self.batch5(x)
x1 = slef.relu5(x)
x1 = self.conv6(x)
x1 = self.batch6(x)
x1 = slef.relu6(x)
return x1
#x2 = y.reshape(-1,1,28,28) #careful: need to change by yourself
x2= self.conv1(y)
x2 = self.batch1(y)
x2 = slef.relu1(y)
# x = x.reshape(-1,1,28,28) #careful: need to change by yourself
x2 = self.conv2(y)
x2 = self.batch2(y)
x2 = slef.relu2(y)
x2 = self.conv3(y)
x2 = self.batch3(y)
x2 = slef.relu3(y)
x2 = self.conv4(y)
x2 = self.batch4(y)
x2 = slef.relu4(y)
x2 = self.conv5(y)
x2 = self.batch5(y)
x2 = slef.relu5(y)
x2 = self.conv6(y)
x2 = self.batch6(y)
x2 = slef.relu6(y)
return x2
x = torch.cat((x1, x2), 1) # fusion x1 , x2
x = torch.tanh(x)
x = x.reshape(-1)
# x = x.reshape(-1,1,28,28)
return x
print(x.size())
#execute the network
cnn = CNN()
print(cnn)
#define the optimizers and loss functions
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = SSIM() # the target label is not one-hotted
training
for epoch in range(EPOCH):
for step, (b_x , b_y) in enumerate(train_loader):
#b_x = b_x.reshape(-1)
# b_y= b_y.reshape(-1)
output = cnn(b_x,b_y) # cnn output
loss = loss_func(output, b_x)+ loss_func(output,b_y)#+ cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step()