Hi guys,
i’d make a chess board using CNN
I Understood input, and filter feature
… and I wrote some code, I don’t know how to decide conv2d input, output size !
I guess that i should be used a chess board size ( 8 * 8 ) as input size
but, output_size … really dont’ know
so,
I’d got some questions
- how to decide kernel_size and input size …?
- What is Output_size ???
- I make a1… d4 following youtube
but, i don’t understand how it works… would you tell me how it work ?
I used kernel_size as default,
I used input size by 8 cuz its one lane of chess board ( 8*8 )
but, I got this error “Calculated padded input size per channel: (1 x 1). Kernel size: (8 x 140273631887368). Kernel size can’t be greater than actual input size”
# My Dataset shape ( 0,1,2,3 = binary section, 4 = it's turn who it is )
# Loaded (15030, 5, 8, 8) (15030,)
# My code
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch
import torch.optim as optim
import torch.nn.functional as F
class CustomDataset(Dataset):
“”“Face Landmarks dataset.”""
def __init__(self):
loaded = np.load("process/dataset_2M.npz")
self.X = loaded["arr_0"]
self.Y = loaded["arr_1"]
print("Loaded ", self.X.shape, self.Y.shape)
# print(self.X)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
# pass
# Loaded (20055, 5, 8, 8) (20055,) <-- DataSet Size
# Loaded (15030, 5, 8, 8) (15030,)
class Net(nn.Module):
def init(self):
super(Net, self).init()
# (in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
self.a1 = nn.Conv2d(5, 64, kernel_size=8)
self.a2 = nn.Conv2d(64, 64, kernel_size=8)
self.a8 = nn.Conv2d(64, 64, kernel_size=8)
self.a4 = nn.Conv2d(64, 64, kernel_size=8, padding=1)
self.b1 = nn.Conv2d(16, 16, kernel_size=3)
self.b2 = nn.Conv2d(16, 16, kernel_size=3)
self.b3 = nn.Conv2d(16, 16, kernel_size=3)
self.b4 = nn.Conv2d(16, 24, kernel_size=3)
self.c1 = nn.Conv2d(24, 24, kernel_size=3)
self.c2 = nn.Conv2d(24, 24, kernel_size=3)
self.c3 = nn.Conv2d(24, 24, kernel_size=3)
self.c4 = nn.Conv2d(24, 32, kernel_size=3)
self.d1 = nn.Conv2d(32, 32, kernel_size=3)
self.d2 = nn.Conv2d(32, 32, kernel_size=3)
self.d3 = nn.Conv2d(32, 32, kernel_size=3)
self.d4 = nn.Conv2d(32, 40, kernel_size=3)
self.e1 = nn.Conv2d(40, 40, kernel_size=3)
self.e2 = nn.Conv2d(40, 40, kernel_size=3)
self.e3 = nn.Conv2d(40, 40, kernel_size=3)
self.e4 = nn.Conv2d(40, 40, kernel_size=3)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.relu(self.a1(x))
x = F.relu(self.a2(x))
x = F.relu(self.a3(x))
x = F.relu(self.a4(x))
x = F.relu(self.b1(x))
x = F.relu(self.b2(x))
x = F.relu(self.b3(x))
x = F.relu(self.b4(x))
x = F.relu(self.c1(x))
x = F.relu(self.c2(x))
x = F.relu(self.c3(x))
x = F.relu(self.c4(x))
return x
if name == “main”:
# Device configuration
device = 'cpu'
# DataSet Loading
custom = CustomDataset()
net = Net()
train_dataset = torch.utils.data.DataLoader(custom)
iterator = iter(train_dataset)
model = Net()
# Define a Loss function and optimizer
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
# Train the model
optimizer.step()
model.train()
# total_step = len(train_dataset)
for epoch in range(1):
for i, (data, labels) in enumerate(iterator):
print(data.shape)
print(labels.shape)
data = data.float()
# Forward pass
outputs = model(data)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, 1, i+1, total_step, loss.item()))
# print('%d Finished Training', total_step)