I saw these kind of topics a lot in forum but generally batch dimension is missing in their tensors. But in my case channel dimension is missing and I don’t know how to put it. Here is my code

class MyMatDataset(Dataset):

```
def __init__(self, distFunc_paths, flowfield_paths):
self.distFunc_paths = distFunc_paths
self.flowfield_paths = flowfield_paths
def __getitem__(self, index):
x = scipy.io.loadmat(self.distFunc_paths[index])
x = torch.from_numpy(x['MD'])
y = scipy.io.loadmat(self.flowfield_paths[index])
y = torch.from_numpy(y['z1'])
return x, y
def __len__(self):
return len(self.distFunc_paths)
```

root = “./Data/”

distFunc_paths = []

flowfield_paths = []

for r, d, f in os.walk(root):

for files in f:

if “_distFunc.mat” in files:

distFunc_paths.append(osp.join(r, files))

if “_flowfield.mat” in files:

flowfield_paths.append(osp.join(r,files))

dataset = MyMatDataset(distFunc_paths = distFunc_paths, flowfield_paths = flowfield_paths)

train_loader = torch.utils.data.DataLoader(

dataset, batch_size=1, shuffle=True

)

class CFD_CNN(nn.Module):

def **init**(self, out_ch=500):

super(CFD_CNN, self).**init**()

self.encoder = nn.Sequential(

nn.Conv2d(1, out_ch, kernel_size=5, stride=5, padding=0),

nn.ReLU(True),

nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=5, padding=0),

nn.ReLU(True),

nn.Conv2d(out_ch, out_ch, kernel_size=2, stride=2, padding=0),

nn.ReLU(True),

)

```
self.decoder = nn.Sequential(
nn.ConvTranspose2d(out_ch, out_ch, kernel_size=2, stride=2, padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=5, padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(out_ch, 1, kernel_size=5, stride=5, padding=0),
nn.ReLU(True)
)
def forward(self, x):
x = self.encoder(x)
print(x.size())
print(x.size())
x = self.decoder(x)
print(x.size())
return x
```

net = CFD_CNN()

criterion = nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

for epoch in range(2):

```
running_loss = 0.0
for i, data in enumerate(train_loader,0):
distfunc, flowfields = data
optimizer.zero_grad()
outputs = net(distfunc)
loss = criterion(outputs, flowfields)
loss.backward()
optimizer.step()
running_loss += loss.item()
```

print(“Finished Training”)

Thanks in advance.