Hi everyone, I design my nn but i got error about different sizes. My training set size is [77,768] but my validation set size is [77,1,3] how can i fix this problem:
My loops are:
class Module(nn.Module):
def __init__(self, D_in, H1, H2, D_out):
super().__init__()
self.linear1 = nn.Linear(D_in, H1)
self.linear2 = nn.Linear(H1, H2)
self.linear3 = nn.Linear(H2, D_out)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
model=Module(768,600,360,256)
for e in range(epochs):
running_loss = 0.0
running_corrects = 0.0
val_running_loss = 0.0
val_running_corrects = 0.0
for inputs,out in train_generator:
#inputs=torch.squeeze(inputs)
inputs = inputs.view(inputs.shape[0], -1)
output=model(inputs)
print(inputs.size())
#output = torch.squeeze(output)
out=out.view(77,256)
loss = criterion(output,out)
preds,_=torch.max(output,1)
#
outputss.append(preds.max().detach().numpy())
losses.append(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#outputss.append(output.detach().numpy())
#print(loss.item())
'''
else:
with torch.no_grad():
for val_inputs, val_labels in valid_generator:
val_inputs = val_inputs.view(val_inputs.shape[0], -1)
val_outputs = model(val_inputs)
val_loss = criterion(val_outputs, val_labels)
_, val_preds = torch.max(val_outputs, 1)
val_running_loss += val_loss.item()
val_running_corrects += torch.sum(val_preds == val_labels.data)