ValueError: Expected input batch_size (2) to match target batch_size (64)

I have this problem if you can help me please i don’t know why it changes from 64 to 2
i have checked the input and the output which resulted as the following
input_var = input

    target_var = target

    print("input var")

    print(input.shape)

    print ("target")

    print(target.shape)

result : input var
torch.Size([64, 3, 224, 224])
target
torch.Size([64])
and after this line
output = model(input_var)
torch.Size([2, 1])
and it should output [64,1]
and this function which takes the input and return the output
def forward(self, input):

    print("inputforward167")

    print (input.shape)

    #print(input)

    #sample_len = (3 if self.modality == "RGB" else 2) * self.new_length

    if self.modality == 'RGBDiff':

        sample_len = 3 * self.new_length

        input = self._get_diff(input)

    if not self.slow_testing:#self.test_mode:

        #print(input.shape)

        #input= torch.as_tensor(torch.from_numpy(input)).permute(0,2,1,3,4).contiguous()

        #input = torch.from_numpy(input).permute(0,2,1,3,4).contiguous()

        #input.permute(0,2,1,3,4).contiguous()

        #print(input.shape)

        #input =torch.from_numpy(input)

        input=input.view((-1,self.new_length,3)+ input.size()[-2:])

        input=input.permute(0,2,1,3,4).contiguous()

        print("input185")

        print(input.shape)

        #input= input.permute(0,2,1,3,4).contiguous()

     #input = torch.from_numpy(input).permute(0,2,1,3,4).contiguous()

     

     # input= torch.as_tensor(np.array(input).astype('float')).permute(0,2,1,3,4).contiguous()

      #input=torch.tensor(input)

      #input=torch.from_numpy(input)

      #input=input.permute(0,2,1,3,4).contiguous()

      #input=input.view((-1,self.new_length,3)+ input.size()[-2:])

    else:

         input=input.view((-1,3,self.new_length)+ input.size()[-2:])

         print("inputbase")

         print(input.shape)

    base_out = self.base_model(input)

    #base_out=F.dropout(base_out,p=0.5)

    if self.gtsn:

        base_out=base_out.view(base_out.shape[0]/self.num_segments,-1)

    if self.dropout > 0:

        base_out = self.new_fc(base_out)

            

    if not self.gtsn:

        if self.reshape:

            base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:])

        #print "base out ",

        #print(base_out.shape)

        #base_out = np.squeeze(self.consensus(base_out),axis=-1)

        base_out = self.consensus(base_out).squeeze(1)

        if  self.slow_testing:#apply_softmax:

            base_out = self.softmax(base_out)

        #base_out=torch.tensor(base_out)

        print("base_out")

        print(base_out)

        print(base_out.shape)

        return base_out#.squeeze(1)

    else:

        return base_out

and this is for base model
resnet50 = torchvision.models.resnet50(pretrained=True)

        self.base_model= I3ResNet(resnet50, self.new_length,num_class,test_mode=test_mode,num_segments=self.num_segments,fast_implementation=fast_implementation)

        self.new_fc = nn.Linear(2048, num_class)

i have changed
self.new_fc = nn.Linear(3224224, num_class)
but i had the following error
RuntimeError: mat1 and mat2 shapes cannot be multiplied (8x2048 and 150528x1)
as after these lines the input is like that
input=input.view((-1,self.new_length,3)+ input.size()[-2:])

        print("input181")

        print(input.shape)

        input=input.permute(0,2,1,3,4).contiguous()

        print("input185")

        print(input.shape)

input181
torch.Size([8, 8, 3, 224, 224])
input185
torch.Size([8, 3, 8, 224, 224])
base193

Replace the view operations with input = input.view(input.size(0), your_shapes or -1) to keep the batch size equal.
If this approach raises shape mismatches in e.g. linear layers, adapt the in_features to the expected values.

when i tried
input = input.view(input.size(0), -1)
which outputs
torch.Size([64, 150528])
before
input=input.permute(1,2,0,3,4).contiguous()
i had the follwing error
input=input.permute(1,2,0,3,4).contiguous()
RuntimeError: number of dims don’t match in permute
as input is (64,3,224,244)
and i need to expand it as i have to match with (64,3,1,7,7) kernal

If you need to keep more dimensions, you shouldn’t use the -1 in the second argument to view, but provide the desired shapes instead.
The important part is to keep the batch dimension equal via input.view(input.size(0), ...), since this is currently raising the shape mismatch error.

1 Like