RuntimeError: Error(s) in loading state_dict for ResNet(PSPNet)):

Hello, I’m interested in running PSPNet using Pytorch. First of all, I use ResNet as an encoder.
Resnet:
def load_weights_sequential(target, source_state):
new_dict = OrderedDict()
for (k1, v1), (k2, v2) in zip(target.state_dict().items(), source_state.items()):
new_dict[k1] = v2
target.load_state_dict(new_dict)

‘’’
Implementation of dilated ResNet-101 with deep supervision. Downsampling is changed to 8x
‘’’
model_urls = {
‘resnet18’: ‘https://download.pytorch.org/models/resnet18-5c106cde.pth’,
‘resnet34’: ‘https://download.pytorch.org/models/resnet34-333f7ec4.pth’,
‘resnet50’: ‘https://download.pytorch.org/models/resnet50-19c8e357.pth’,
‘resnet101’: ‘https://download.pytorch.org/models/resnet101-5d3b4d8f.pth’,
‘resnet152’: ‘https://download.pytorch.org/models/resnet152-b121ed2d.pth’,
}

def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)

class BasicBlock(nn.Module):
expansion = 1

def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
    super(BasicBlock, self).__init__()
    self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
    self.bn1 = nn.BatchNorm2d(planes)
    self.relu = nn.ReLU(inplace=True)
    self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
    self.bn2 = nn.BatchNorm2d(planes)
    self.downsample = downsample
    self.stride = stride

def forward(self, x):
    residual = x

    out = self.conv1(x)
    out = self.bn1(out)
    out = self.relu(out)

    out = self.conv2(out)
    out = self.bn2(out)

    if self.downsample is not None:
        residual = self.downsample(x)

    out += residual
    out = self.relu(out)

    return out

class Bottleneck(nn.Module):
expansion = 4

def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
    super(Bottleneck, self).__init__()
    self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
    self.bn1 = nn.BatchNorm2d(planes)
    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation,
                           padding=dilation, bias=False)
    self.bn2 = nn.BatchNorm2d(planes)
    self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
    self.bn3 = nn.BatchNorm2d(planes * 4)
    self.relu = nn.ReLU(inplace=True)
    self.downsample = downsample
    self.stride = stride

def forward(self, x):
    residual = x

    out = self.conv1(x)
    out = self.bn1(out)
    out = self.relu(out)

    out = self.conv2(out)
    out = self.bn2(out)
    out = self.relu(out)

    out = self.conv3(out)
    out = self.bn3(out)

    if self.downsample is not None:
        residual = self.downsample(x)

    out += residual
    out = self.relu(out)

    return out

class ResNet(nn.Module):
def init(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).init()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)

    for m in self.modules():
        if isinstance(m, nn.Conv2d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_()

def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
    downsample = None
    if stride != 1 or self.inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            nn.Conv2d(self.inplanes, planes * block.expansion,
                      kernel_size=1, stride=stride, bias=False),
            nn.BatchNorm2d(planes * block.expansion),
        )

    layers = [block(self.inplanes, planes, stride, downsample)]
    self.inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(block(self.inplanes, planes, dilation=dilation))

    return nn.Sequential(*layers)

def forward(self, x):
    x = self.conv1(x)
    x = self.bn1(x)
    x = self.relu(x)
    x = self.maxpool(x)

    x = self.layer1(x)
    x = self.layer2(x)
    x_3 = self.layer3(x)
    x = self.layer4(x_3)

    return x, x_3

def resnet152(pretrained=True):
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
load_weights_sequential(model, model_zoo.load_url(model_urls[‘resnet152’]))
return model

Then I want to use PSPNet as a decoder.
PSPNet:

import torch
from torch import nn
from torch.nn import functional as F

import extractors

class PSPModule(nn.Module):
def init(self, features, out_features=1024, sizes=(1, 2, 3, 6)):
super().init()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.relu = nn.ReLU()

def _make_stage(self, features, size):
    prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
    conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
    return nn.Sequential(prior, conv)

def forward(self, feats):
    h, w = feats.size(2), feats.size(3)
    priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
    bottle = self.bottleneck(torch.cat(priors, 1))
    return self.relu(bottle)

class PSPUpsample(nn.Module):
def init(self, in_channels, out_channels):
super().init()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)

def forward(self, x):
    h, w = 2 * x.size(2), 2 * x.size(3)
    p = F.upsample(input=x, size=(h, w), mode='bilinear')
    return self.conv(p)

class PSPNet(nn.Module):
def init(self, n_classes=18, sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=‘resnet34’,
pretrained=True):
super().init()
self.feats = getattr(extractors, backend)(pretrained)
self.psp = PSPModule(psp_size, 1024, sizes)
self.drop_1 = nn.Dropout2d(p=0.3)

    self.up_1 = PSPUpsample(1024, 256)
    self.up_2 = PSPUpsample(256, 64)
    self.up_3 = PSPUpsample(64, 64)

    self.drop_2 = nn.Dropout2d(p=0.15)
    self.final = nn.Sequential(
        nn.Conv2d(64, n_classes, kernel_size=1),
        nn.LogSoftmax()
    )

    self.classifier = nn.Sequential(
        nn.Linear(deep_features_size, 256),
        nn.ReLU(),
        nn.Linear(256, n_classes)
    )

def forward(self, x):
    f, class_f = self.feats(x) 
    p = self.psp(f)
    p = self.drop_1(p)

    p = self.up_1(p)
    p = self.drop_2(p)

    p = self.up_2(p)
    p = self.drop_2(p)

    p = self.up_3(p)
    p = self.drop_2(p)

    auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))

    return self.final(p), self.classifier(auxiliary)

But I get such an error. can you please help?:

RuntimeError: Error(s) in loading state_dict for ResNet: