How to use the model without the GAT module?

There is such a model (ResNet18 + GAT):

class AntispoofModel(nn.Module):
    def __init__(self, device="cpu", **kwargs):
        super().__init__()
        resnet = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
        self.resnet = nn.Sequential(*[i for i in list(resnet.children())[:-2]]).to(device)
        for ch in self.resnet.children():
            for param in ch.parameters():
                param.requires_grad = False
        self.gat = GAT(**kwargs).to(device)
        self.device = device
        self.adj = torch.tensor(grid_to_graph(7, 7, return_as=np.ndarray)).to(device)
        
    def forward(self, x):
        x = self.resnet(x.to(self.device))
        x = x.view(-1, 49, 512)
        #adj = torch.stack([self.adj for i in range(x.shape[0])]).to(self.device)
        x = self.gat(x, self.adj)
        return torch.sigmoid(x)

I’m trying to do the same thing, but without GAT, for example I rewrite code like:

class AntispoofModel(nn.Module):    
    
    def __init__(self, device="cpu", **kwargs):
        super().__init__()
        model_resnet = ResNet(BasicBlock, [2, 2, 2, 2])
        self.conv1 = model_resnet.conv1
        self.bn1 = model_resnet.bn1
        self.relu = model_resnet.relu
        self.maxpool = model_resnet.maxpool
        self.layer1 = model_resnet.layer1
        self.layer2 = model_resnet.layer2
        self.layer3 = model_resnet.layer3
        self.layer4 = model_resnet.layer4
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
        
    def forward(self, x):
        feature = self.conv1(x)
        feature = self.bn1(feature)
        feature = self.relu(feature)
        feature = self.maxpool(feature)
        feature = self.layer1(feature)
        feature = self.layer2(feature)
        feature = self.layer3(feature)
        feature = self.layer4(feature)
        out = self.avgpool(feature) #[,512,1,1]
        out = out.view(out.size(0), -1) #[,512]
        return out

But I have Using a target size (torch.Size([64, 1])) that is different to the input size (torch.Size([64, 512])) is deprecated. Please ensure they have the same size.

I understand what it means. But I don’t understand how to fix it…