Given groups=1, weight of size [128, 1, 5], expected input[1, 32, 187] to have 1 channels, but got 32 channels instead

I get this error nd I am new to pytorch…Pls help …It will be really grateful …Thanks in Advance!!!
I SHALL COPY PASTE THE ERROR :

RuntimeError Traceback (most recent call last)
in
4 # specify optimizer
5 optimizer = optim.Adam(model_1.parameters(), lr=0.001)
----> 6 model_1_validation_losses = train_by_model_and_custom_loader(model_1, train_loader_1, valid_loader_1, criterion, optimizer, ‘model_ecg_heartbeat_categorization_1.pt’, num_epochs, train_on_gpu)

in train_by_model_and_custom_loader(model, train_loader, valid_loader, criterion, optimizer, best_model_name, n_epochs, train_on_gpu)
32 optimizer.zero_grad()
33 # forward pass: compute predicted outputs by passing inputs to the model
—> 34 output = model(data.float())
35 #print(“o/pT”)(
36 #print(output.shape)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
→ 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

in forward(self, input)
33 #input = input.view(32, -1, 187)
34 input = input.unsqueeze(0)
—> 35 x = self.conv1(input)
36 x = self.conv2(x)
37 x = self.conv3(x)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
→ 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

in forward(self, input)
50
51 def forward(self, input):
—> 52 conv1 = self.conv_1(input)
53 x = self.normalization_1(conv1)
54 x = self.swish_1(x)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
→ 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
257 _single(0), self.dilation, self.groups)
258 return F.conv1d(input, self.weight, self.bias, self.stride,
→ 259 self.padding, self.dilation, self.groups)
260
261

RuntimeError: Given groups=1, weight of size [128, 1, 5], expected input[1, 32, 187] to have 1 channels, but got 32 channels instead

The following is the code that I used…I called the model using:

model_1 = CNN(num_classes=5, hid_size=128).to(device)

The model code is:

class ConvNormPool(nn.Module):

def __init__(
    self,
    input_size,
    hidden_size,
    kernel_size,
    norm_type='bachnorm'
):
    super().__init__()
    
    self.kernel_size = kernel_size
    self.conv_1 = nn.Conv1d(
        in_channels=input_size,
        out_channels=hidden_size,
        kernel_size=kernel_size
    )
    self.conv_2 = nn.Conv1d(
        in_channels=hidden_size,
        out_channels=hidden_size,
        kernel_size=kernel_size
    )
    self.conv_3 = nn.Conv1d(
        in_channels=hidden_size,
        out_channels=hidden_size,
        kernel_size=kernel_size
    )
    self.swish_1 = Swish()
    self.swish_2 = Swish()
    self.swish_3 = Swish()
    if norm_type == 'group':
        self.normalization_1 = nn.GroupNorm(
            num_groups=8,
            num_channels=hidden_size
        )
        self.normalization_2 = nn.GroupNorm(
            num_groups=8,
            num_channels=hidden_size
        )
        self.normalization_3 = nn.GroupNorm(
            num_groups=8,
            num_channels=hidden_size
        )
    else:
        self.normalization_1 = nn.BatchNorm1d(num_features=hidden_size)
        self.normalization_2 = nn.BatchNorm1d(num_features=hidden_size)
        self.normalization_3 = nn.BatchNorm1d(num_features=hidden_size)
        
    self.pool = nn.MaxPool1d(kernel_size=2)
    
def forward(self, input):
    conv1 = self.conv_1(input)
    x = self.normalization_1(conv1)
    x = self.swish_1(x)
    x = F.pad(x, pad=(self.kernel_size - 1, 0))
    
    x = self.conv_2(x)
    x = self.normalization_2(x)
    x = self.swish_2(x)
    x = F.pad(x, pad=(self.kernel_size - 1, 0))
    
    conv3 = self.conv_3(x)
    x = self.normalization_3(conv1+conv3)
    x = self.swish_3(x)
    x = F.pad(x, pad=(self.kernel_size - 1, 0))   
    
    x = self.pool(x)
    return x

class CNN(nn.Module):
def init(
self,
input_size = 1,
hid_size = 256,
kernel_size = 5,
num_classes = 5,
):

    super().__init__()
    
    self.conv1 = ConvNormPool(
        input_size=input_size,
        hidden_size=hid_size,
        kernel_size=kernel_size,
    )
    self.conv2 = ConvNormPool(
        input_size=hid_size,
        hidden_size=hid_size//2,
        kernel_size=kernel_size,
    )
    self.conv3 = ConvNormPool(
        input_size=hid_size//2,
        hidden_size=hid_size//4,
        kernel_size=kernel_size,
    )
    self.avgpool = nn.AdaptiveAvgPool1d((1))
    self.fc = nn.Linear(in_features=hid_size//4, out_features=num_classes)
    
def forward(self, input):
    #print("INPUT SHAPE is ")
    #print(input.shape)
    #input = input.view(32, -1, 187)
    #input = input.unsqueeze(0)
    x = self.conv1(input)
    x = self.conv2(x)
    x = self.conv3(x)
    x = self.avgpool(x)        
    # print(x.shape) # num_features * num_channels
    x = x.view(-1, x.size(1) * x.size(2))
    x = F.softmax(self.fc(x), dim=1)
    return x

def train_by_model_and_custom_loader(model, train_loader, valid_loader, criterion, optimizer, best_model_name, n_epochs, train_on_gpu):
model = model.float()
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
valid_loss_min = np.Inf # track change in validation loss
valid_losses =

for epoch in range(1, n_epochs+1):

    # keep track of training and validation loss
    train_loss = 0.0
    valid_loss = 0.0

    ###################
    # train the model #
    ###################
    model.train()
    for data, target in train_loader:
        # move tensors to GPU if CUDA is available
        if train_on_gpu:
            data, target = data.cuda(), target.cuda()
            
            ###print(data)
            ###print(data.shape)
            ###print("FLOAT")
            ######print(data.float())
            ###print(data.float().shape)
            ######print(target)
            ###print(target.shape)
        # clear the gradients of all optimized variables
        optimizer.zero_grad()
        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(data.float())
        #print("o/pT")(
        #print(output.shape)
        # calculate the batch loss
        loss = criterion(output, target)
        # backward pass: compute gradient of the loss with respect to model parameters
        loss.backward()
        # perform a single optimization step (parameter update)
        optimizer.step()
        # update training loss
        train_loss += loss.item()*data.size(0)

    ######################    
    # validate the model #
    ######################
    model.eval()
    for data, target in valid_loader:
        # move tensors to GPU if CUDA is available
        if train_on_gpu:
            data, target = data.cuda(), target.cuda()
        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(data.float())
        # calculate the batch loss
        loss = criterion(output, target)
        # update average validation loss 
        valid_loss += loss.item()*data.size(0)

    # calculate average losses
    train_loss = train_loss/len(train_loader.dataset)
    valid_loss = valid_loss/len(valid_loader.dataset)
    
    valid_losses.append(valid_loss)

    # print training/validation statistics 
    print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
        epoch, train_loss, valid_loss))

    # save model if validation loss has decreased
    if valid_loss <= valid_loss_min:
        print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
        valid_loss_min,
        valid_loss))
        torch.save(model.state_dict(), best_model_name)
        valid_loss_min = valid_loss
        
return valid_losses

It is because you are reshaping the image into a different shape that is not compatible. What is the original shape of the data and why are you reshaping it?

Your code is a bit hard to follow because you did not paste it correctly. That being said I think I found your error. When you define your CNN module you set your input_size parameter to have a default size of 1 as seen below:

class CNN(nn.Module):
def init (
self,
input_size = 1,
hid_size = 256,
kernel_size = 5,
num_classes = 5,
):

Then when you initialize your model:

you don’t tell it what input_size you want so it uses the default, which is 1.
Hence your model expected 1 input channel in the first conv1d, but it got 32.

Change your CNN initialization to

model_1 = CNN(num_classes=5, hid_size=128, input_size = 32).to(device)

hope that helps

@nicofish Thanks…That was one reason due to which I could move to the root of this cause!!
Actually the shape of data from dataloader was not correct
Thank you so much!!

One more thing, could you pls tell me from the above code whether my model design is correct or not?Like all layers in place?
Thanks again in advance!

@Dwight_Foster Thanks…That was due to shape of data returned from data loader
Actually the shape of data from dataloader was not correct
Thank you so much!!
Also…I hope my model layers are proper in place …I m pretty new to this stuff

superficially everything seems fine to me. Hope you were able to get everything working fine

Hi! How did you fix this? I am pretty new at GANS and was wondering if we could have a chat if you are free? I am facing a similar error, (RuntimeError: Given transposed=1, weight of size [35, 16, 1], expected input[32, 16, 2] to have 35 channels, but got 16 channels instead)

noise_size = 35 #z_dim should be the num of features of the input data
batch_size = 32

#features_in=35

#output data=[] values between=[0,1]

class Generator(nn.Module):
def init(self, features_in, features):
super().init()
self.features=features
self.gen = nn.Sequential(
#input is features_in=35, going into the first convolution with kernel_size=1, stride=1,padding=0, bias=False

          nn.Conv1d(features_in, 128, kernel_size=1, stride=2, padding=1, bias=False),
          nn.LeakyReLU(0.2, inplace=True),
         
          nn.Conv1d(128, 64, kernel_size=1, stride=2, padding=1, bias=False),
          nn.BatchNorm1d(64),
          nn.LeakyReLU(0.2, inplace=True),
        
          nn.Conv1d(64,32, kernel_size=1,stride=2, padding=1, bias=False),
          nn.BatchNorm1d(32),
          nn.LeakyReLU(0.2, inplace=True),
         
          nn.Conv1d(32,16, kernel_size=1,stride=2, padding=1, bias=False),
          
          )
     
  def forward(self, x):
      x=torch.unsqueeze(x,2)
      return self.gen(x)

class Discriminator(nn.Module):
def init(self,features_in, features):
super().init()
self.features=features
self.disc = nn.Sequential(

          nn.ConvTranspose1d(features_in, 16, 1, 2, 1, bias=False),
          nn.BatchNorm1d(16),
          nn.ReLU(True),

          nn.ConvTranspose1d(16, 32, 1, 2, 1, bias=False),
          nn.BatchNorm1d(32),
          nn.ReLU(True),

          nn.ConvTranspose1d(32,64, 1, 2, 1, bias=False),
          nn.BatchNorm1d(64),
          nn.ReLU(True),

          nn.ConvTranspose1d(64,128, 1, 2, 1, bias=False),
          nn.BatchNorm1d(128),
          nn.ReLU(True),

          nn.ConvTranspose1d(64, 1, 1, 2, 1, bias=False),
          # nn.Tanh()
      )

  def forward(self, x):
      return self.disc(x)

This is my model and below is the dataprocessing for it:

f=open(‘GANS-for-MediSci-main\VLOAD_HIV_dataset_10_years.pickle’,‘rb’)
#f=open(‘dynamic_dataset_cd4_7_years (1).pickle’,‘rb’)
dynamic_dataset=pickle.load(f)

#procesing for the vload dataset
n_train_data,n_test_data,train_dataloader,test_dataloader,scaler,pred_samples=to_gan_input(dynamic_dataset,batch_size)

#Definition of hyperparameters
num_epochs = 50
lr = 0.01
betas = (0.5, 0.999)
features=8
#we take noise inputs to be able to transform them into training data inputs
#len_seq=35
len_seq=dynamic_dataset.shape[-1]

#for 1CNN GAN
generator = Generator(len_seq,features)
optimizer_g = torch.optim.Adam(generator.parameters(), lr=lr, betas=betas)
discriminator = Discriminator(len_seq,features)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=betas)
criterion = nn.BCELoss()

“”“TRAIN”“”

running_g_loss=0.0
running_d_loss=0.0
currentStep = 0
g_loss_array = []
d_loss_array = []

TRAINING_AMOUNT = 49 # low to test for now
SAVE_STEPS_AMOUNT = 1000 # testing for now

for epoch in range(num_epochs):
for i, data in enumerate(train_dataloader):
real_samples=data
loss_g, loss_d,acc_d = train_batch(real_samples, generator, discriminator, optimizer_g, optimizer_d,criterion,noise_size)
running_g_loss+=loss_g
running_d_loss+=loss_d
aux=len(train_dataloader)

    if((i+1) % len(train_dataloader)==0):
        g_loss_array.append(running_g_loss/aux)
        d_loss_array.append(running_d_loss/aux)
        print(f"\nEpoch: {epoch+1}/{num_epochs}, batch: {i+1}/{len(train_dataloader)}, G_mean_batch_loss: {running_g_loss/aux}, D_mean_batch_loss: {running_d_loss/aux},D_accu:{acc_d}")
        running_g_loss=0.0
        running_d_loss=0.0