How to get output of model as input to other model in pytorch?

I have split my model into model A and model B respectively. But while inferencing when I give my model A output as input to model B I got the below error:
“output = input.matmul(weight.t())
RuntimeError: mat1 and mat2 shapes cannot be multiplied (2400x75 and 180000x6)”
My working code:
class ConvNet(nn.Module):

def __init__(self,num_classes=6):
    super(ConvNet,self).__init__()
    self.conv1=nn.Conv2d(in_channels=3,out_channels=12,kernel_size=3,stride=1,padding=1)
    self.bn1=nn.BatchNorm2d(num_features=12)
    self.relu1=nn.ReLU()
    self.pool=nn.MaxPool2d(kernel_size=2)
    self.conv2=nn.Conv2d(in_channels=12,out_channels=20,kernel_size=3,stride=1,padding=1)
    self.relu2=nn.ReLU()
    self.conv3=nn.Conv2d(in_channels=20,out_channels=32,kernel_size=3,stride=1,padding=1)
    self.bn3=nn.BatchNorm2d(num_features=32)
    self.relu3=nn.ReLU()
    self.fc=nn.Linear(in_features=75 * 75 * 32,out_features=num_classes)
def forward(self,input):
    output=self.conv1(input)
    output=self.bn1(output)
    output=self.relu1(output)
    output=self.pool(output)
    output=self.conv2(output)
    output=self.relu2(output)
    output=self.conv3(output)
    output=self.bn3(output)
    output=self.relu3(output)
    output=self.fc(output)
    return output

model_path=‘model2.pt’
model=ConvNet()
model.load_state_dict(torch.load(model_path))
model.eval()
print(model)
new_model1 = nn.Sequential((list(model.children())[:3]))
print(new_model1)
new_model2 = nn.Sequential(
(list(model.children())[3:]))
print(new_model2)
#inferecing models
image_path = ‘cat.4672.jpg’
def predict_image(image_path):
print(“Prediction in progress”)
image = Image.open(image_path)
transformer=transforms.Compose(
transforms.Resize((150,150)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), #0-255 to 0-1, numpy to tensors
transforms.Normalize([0.5,0.5,0.5], # 0-1 to [-1,1] , formula (x-mean)/std
[0.5,0.5,0.5])
])
# Preprocess the image
image_tensor = transformer(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
new_model1.eval()
input = Variable(image_tensor)
output1 = new_model1(input)
new_model2.eval()
output2 = new_model2(output1)
index = output2.data.numpy().argmax()
return output1.size()
print(predict_image(image_path))

Seems like the shapes don’t match. What is the output shape from modelA and what should the input shape be to modelB? One thing you can do to debug is make print statements in the forward part and make sure each shape is what you expect it to be

How can I get output of specific layer and give it as input to another model in pytorch?