Hello Mr. @ptrblck, I have same problem,
This is my program
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data as Data
import matplotlib.pyplot as plt
import sklearn.metrics as sm
from torchstat import stat
torch.cuda.set_device(1)
n_gpu = torch.cuda.device_count()
print(n_gpu)
train_x = np.load(‘Thermal_data/np/np_thermal_3224_train_x.npy’)
train_y = np.load(‘Thermal_data/np/np_thermal_3224_train_y.npy’)
test_x = np.load(‘Thermal_data/np/np_thermal_3224_test_x.npy’)
test_y = np.load(‘Thermal_data/np/np_thermal_3224_test_y.npy’)
print("\nShape of train_x:",train_x.shape,
“\nShape of train_y:”,train_y.shape,
“\nShape of test_x:”,test_x.shape,
“\nShape of test_y:”,test_y.shape,)
train_x = np.reshape(train_x, [-1, 1, 768, 1])
test_x = np.reshape(test_x, [-1, 1, 768, 1])
train_x = torch.from_numpy(train_x)
train_y = torch.from_numpy(train_y)
test_x = torch.from_numpy(test_x)
test_y = torch.from_numpy(test_y)
print("\nShape of train_x:",train_x.shape,
“\nShape of train_y:”,train_y.shape,
“\nShape of test_x:”,test_x.shape,
“\nShape of test_y:”,test_y.shape,)
batchSize = 64
torch_dataset = Data.TensorDataset(train_x,train_y)
train_loader = Data.DataLoader(dataset=torch_dataset,batch_size=batchSize,shuffle=True,num_workers=0)
class Stacked_CNN_Net(nn.Module):
def init(self):
super(Stacked_CNN_Net, self).init()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(6, 1)),
nn.BatchNorm2d(64),
nn.ReLU(True),
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 1)),
nn.BatchNorm2d(64),
nn.ReLU(True),
)
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(6, 1)),
nn.BatchNorm2d(128),
nn.ReLU(True),
)
self.layer4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 1)),
nn.BatchNorm2d(128),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(96512, 14)
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
print(x.shape)
x = x.view(x.size(0), -1)
print(x.shape)
x = self.fc(x)
return x
lr_list = []
LR = 0.0001
net = Stacked_CNN_Net().cuda(2)
opt = torch.optim.Adam(net.parameters(),lr=LR)
loss_func = nn.CrossEntropyLoss().cuda(2)
params = list(net.parameters())
k = 0
epoch_list = []
accuracy_list = []
loss_list = []
def flat(data):
data=np.argmax(data,axis=-1)
return data
for epoch in range(50):
net.train()
for step,(x,y) in enumerate(train_loader):
x = x.type(torch.FloatTensor)
x,y=x.cuda(2),y
output = net(x)
y = flat(y).cuda(2)
loss = loss_func(output,y.long())
net.zero_grad()
opt.zero_grad()
loss.backward()
opt.step()
if epoch%1==0:
net.eval()
test_x = test_x.type(torch.FloatTensor)
test_out = net(test_x.cuda(2))
pred_y = torch.max(test_out,1)[1].data.squeeze().cuda(2)
lr_list.append(opt.state_dict()[‘param_groups’][0][‘lr’])
accuracy = (torch.sum(pred_y == flat(test_y.float()).cuda(2)).type(torch.FloatTensor) / test_y.size(0)).cuda(2)
print(‘Epoch: ‘, epoch, ‘| test accuracy: %.4f’ % accuracy,’|loss:%.4f’%loss,’| params:’,str(k))
epoch_list.append(epoch)
accuracy_list.append(accuracy.item())
loss_list.append(loss.item())
print(‘Epoch_list:’,epoch_list,‘Accuracy_list:’,accuracy_list,‘Loss_list:’,loss_list)
And this is my error
3
Shape of train_x: (10232, 768)
Shape of train_y: (10232,)
Shape of test_x: (4386, 768)
Shape of test_y: (4386,)
Shape of train_x: torch.Size([10232, 1, 768, 1])
Shape of train_y: torch.Size([10232])
Shape of test_x: torch.Size([4386, 1, 768, 1])
Shape of test_y: torch.Size([4386])
torch.Size([64, 128, 754, 1])
torch.Size([64, 96512])
ValueError Traceback (most recent call last)
/tmp/ipykernel_783793/3164811453.py in
93 output = net(x)
94 y = flat(y).cuda(2)
—> 95 loss = loss_func(output,y.long())
96 net.zero_grad()
97 opt.zero_grad()
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
1148
1149 def forward(self, input: Tensor, target: Tensor) → Tensor:
→ 1150 return F.cross_entropy(input, target, weight=self.weight,
1151 ignore_index=self.ignore_index, reduction=self.reduction,
1152 label_smoothing=self.label_smoothing)
/usr/local/lib/python3.8/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
2844 if size_average is not None or reduce is not None:
2845 reduction = _Reduction.legacy_get_string(size_average, reduce)
→ 2846 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
2847
2848
ValueError: Expected input batch_size (64) to match target batch_size (0).
How to solve this error?
Thank you