I have a fastai resnet50 trained model saved as a PTH weights file.
I try to open it in pytorch so I can convert it to ONNX format.
I get a state_dict load error telling me that a whole set of keys are missing and also that a whiole set of keys are unknown.
I guess this is because the state_dict for the fastai model are different. Is there a way of overcoming this? I have already tried converting the GPU model keys to CPU keys.
See attached errors:
Phil
model_state_dict = torch.load(f’/Users/culverhouse/PIA/PlanktonAnalytics/Projects/Classifiers/planktoshare/models/{modelname}.pth’, map_location=‘cpu’, weights_only=False, strict=False) [‘model’]
model.load_state_dict(model_state_dict)Traceback (most recent call last):
File “”, line 1, in
File “/Users/culverhouse/PIA/PlanktonAnalytics/Projects/Classifiers/openvino/lib/python3.11/site-packages/torch/nn/modules/module.py”, line 2593, in load_state_dict
raise RuntimeError(
RuntimeError: Error(s) in loading state_dict for ResNet:
Missing key(s) in state_dict: “conv1.weight”, “bn1.weight”, “bn1.bias”, “bn1.running_mean”, “bn1.running_var”, “layer1.0.conv1.weight”, “layer1.0.bn1.weight”, “layer1.0.bn1.bias”, “layer1.0.bn1.running_mean”, “layer1.0.bn1.running_var”, “layer1.0.conv2.weight”, “layer1.0.bn2.weight”, “layer1.0.bn2.bias”, “layer1.0.bn2.running_mean”, “layer1.0.bn2.running_var”, “layer1.0.conv3.weight”, “layer1.0.bn3.weight”, “layer1.0.bn3.bias”, “layer1.0.bn3.running_mean”, “layer1.0.bn3.running_var”, “layer1.0.downsample.0.weight”, “layer1.0.downsample.1.weight”, “layer1.0.downsample.1.bias”, “layer1.0.downsample.1.running_mean”, “layer1.0.downsample.1.running_var”, “layer1.1.conv1.weight”, “layer1.1.bn1.weight”, “layer1.1.bn1.bias”, “layer1.1.bn1.running_mean”, “layer1.1.bn1.running_var”, “layer1.1.conv2.weight”, “layer1.1.bn2.weight”, “layer1.1.bn2.bias”, “layer1.1.bn2.running_mean”, “layer1.1.bn2.running_var”, “layer1.1.conv3.weight”, “layer1.1.bn3.weight”, “layer1.1.bn3.bias”, “layer1.1.bn3.running_mean”, “layer1.1.bn3.running_var”, “layer1.2.conv1.weight”, “layer1.2.bn1.weight”, “layer1.2.bn1.bias”, “layer1.2.bn1.running_mean”, “layer1.2.bn1.running_var”, “layer1.2.conv2.weight”, “layer1.2.bn2.weight”, “layer1.2.bn2.bias”, “layer1.2.bn2.running_mean”, “layer1.2.bn2.running_var”, “layer1.2.conv3.weight”, “layer1.2.bn3.weight”, “layer1.2.bn3.bias”, “layer1.2.bn3.running_mean”, “layer1.2.bn3.running_var”, “layer2.0.conv1.weight”, “layer2.0.bn1.weight”, “layer2.0.bn1.bias”, “layer2.0.bn1.running_mean”, “layer2.0.bn1.running_var”, “layer2.0.conv2.weight”, “layer2.0.bn2.weight”, “layer2.0.bn2.bias”, “layer2.0.bn2.running_mean”, “layer2.0.bn2.running_var”, “layer2.0.conv3.weight”, “layer2.0.bn3.weight”, “layer2.0.bn3.bias”, “layer2.0.bn3.running_mean”, “layer2.0.bn3.running_var”, “layer2.0.downsample.0.weight”, “layer2.0.downsample.1.weight”, “layer2.0.downsample.1.bias”, “layer2.0.downsample.1.running_mean”, “layer2.0.downsample.1.running_var”, “layer2.1.conv1.weight”, “layer2.1.bn1.weight”, “layer2.1.bn1.bias”, “layer2.1.bn1.running_mean”, “layer2.1.bn1.running_var”, “layer2.1.conv2.weight”, “layer2.1.bn2.weight”, “layer2.1.bn2.bias”, “layer2.1.bn2.running_mean”, “layer2.1.bn2.running_var”, “layer2.1.conv3.weight”, “layer2.1.bn3.weight”, “layer2.1.bn3.bias”, “layer2.1.bn3.running_mean”, “layer2.1.bn3.running_var”, “layer2.2.conv1.weight”, “layer2.2.bn1.weight”, “layer2.2.bn1.bias”, “layer2.2.bn1.running_mean”, “layer2.2.bn1.running_var”, “layer2.2.conv2.weight”, “layer2.2.bn2.weight”, “layer2.2.bn2.bias”, “layer2.2.bn2.running_mean”, “layer2.2.bn2.running_var”, “layer2.2.conv3.weight”, “layer2.2.bn3.weight”, “layer2.2.bn3.bias”, “layer2.2.bn3.running_mean”, “layer2.2.bn3.running_var”, “layer2.3.conv1.weight”, “layer2.3.bn1.weight”, “layer2.3.bn1.bias”, “layer2.3.bn1.running_mean”, “layer2.3.bn1.running_var”, “layer2.3.conv2.weight”, “layer2.3.bn2.weight”, “layer2.3.bn2.bias”, “layer2.3.bn2.running_mean”, “layer2.3.bn2.running_var”, “layer2.3.conv3.weight”, “layer2.3.bn3.weight”, “layer2.3.bn3.bias”, “layer2.3.bn3.running_mean”, “layer2.3.bn3.running_var”, “layer3.0.conv1.weight”, “layer3.0.bn1.weight”, “layer3.0.bn1.bias”, “layer3.0.bn1.running_mean”, “layer3.0.bn1.running_var”, “layer3.0.conv2.weight”, “layer3.0.bn2.weight”, “layer3.0.bn2.bias”, “layer3.0.bn2.running_mean”, “layer3.0.bn2.running_var”, “layer3.0.conv3.weight”, “layer3.0.bn3.weight”, “layer3.0.bn3.bias”, “layer3.0.bn3.running_mean”, “layer3.0.bn3.running_var”, “layer3.0.downsample.0.weight”, “layer3.0.downsample.1.weight”, “layer3.0.downsample.1.bias”, “layer3.0.downsample.1.running_mean”, “layer3.0.downsample.1.running_var”, “layer3.1.conv1.weight”, “layer3.1.bn1.weight”, “layer3.1.bn1.bias”, “layer3.1.bn1.running_mean”, “layer3.1.bn1.running_var”, “layer3.1.conv2.weight”, “layer3.1.bn2.weight”, “layer3.1.bn2.bias”, “layer3.1.bn2.running_mean”, “layer3.1.bn2.running_var”, “layer3.1.conv3.weight”, “layer3.1.bn3.weight”, “layer3.1.bn3.bias”, “layer3.1.bn3.running_mean”, “layer3.1.bn3.running_var”, “layer3.2.conv1.weight”, “layer3.2.bn1.weight”, “layer3.2.bn1.bias”, “layer3.2.bn1.running_mean”, “layer3.2.bn1.running_var”, “layer3.2.conv2.weight”, “layer3.2.bn2.weight”, “layer3.2.bn2.bias”, “layer3.2.bn2.running_mean”, “layer3.2.bn2.running_var”, “layer3.2.conv3.weight”, “layer3.2.bn3.weight”, “layer3.2.bn3.bias”, “layer3.2.bn3.running_mean”, “layer3.2.bn3.running_var”, “layer3.3.conv1.weight”, “layer3.3.bn1.weight”, “layer3.3.bn1.bias”, “layer3.3.bn1.running_mean”, “layer3.3.bn1.running_var”, “layer3.3.conv2.weight”, “layer3.3.bn2.weight”, “layer3.3.bn2.bias”, “layer3.3.bn2.running_mean”, “layer3.3.bn2.running_var”, “layer3.3.conv3.weight”, “layer3.3.bn3.weight”, “layer3.3.bn3.bias”, “layer3.3.bn3.running_mean”, “layer3.3.bn3.running_var”, “layer3.4.conv1.weight”, “layer3.4.bn1.weight”, “layer3.4.bn1.bias”, “layer3.4.bn1.running_mean”, “layer3.4.bn1.running_var”, “layer3.4.conv2.weight”, “layer3.4.bn2.weight”, “layer3.4.bn2.bias”, “layer3.4.bn2.running_mean”, “layer3.4.bn2.running_var”, “layer3.4.conv3.weight”, “layer3.4.bn3.weight”, “layer3.4.bn3.bias”, “layer3.4.bn3.running_mean”, “layer3.4.bn3.running_var”, “layer3.5.conv1.weight”, “layer3.5.bn1.weight”, “layer3.5.bn1.bias”, “layer3.5.bn1.running_mean”, “layer3.5.bn1.running_var”, “layer3.5.conv2.weight”, “layer3.5.bn2.weight”, “layer3.5.bn2.bias”, “layer3.5.bn2.running_mean”, “layer3.5.bn2.running_var”, “layer3.5.conv3.weight”, “layer3.5.bn3.weight”, “layer3.5.bn3.bias”, “layer3.5.bn3.running_mean”, “layer3.5.bn3.running_var”, “layer4.0.conv1.weight”, “layer4.0.bn1.weight”, “layer4.0.bn1.bias”, “layer4.0.bn1.running_mean”, “layer4.0.bn1.running_var”, “layer4.0.conv2.weight”, “layer4.0.bn2.weight”, “layer4.0.bn2.bias”, “layer4.0.bn2.running_mean”, “layer4.0.bn2.running_var”, “layer4.0.conv3.weight”, “layer4.0.bn3.weight”, “layer4.0.bn3.bias”, “layer4.0.bn3.running_mean”, “layer4.0.bn3.running_var”, “layer4.0.downsample.0.weight”, “layer4.0.downsample.1.weight”, “layer4.0.downsample.1.bias”, “layer4.0.downsample.1.running_mean”, “layer4.0.downsample.1.running_var”, “layer4.1.conv1.weight”, “layer4.1.bn1.weight”, “layer4.1.bn1.bias”, “layer4.1.bn1.running_mean”, “layer4.1.bn1.running_var”, “layer4.1.conv2.weight”, “layer4.1.bn2.weight”, “layer4.1.bn2.bias”, “layer4.1.bn2.running_mean”, “layer4.1.bn2.running_var”, “layer4.1.conv3.weight”, “layer4.1.bn3.weight”, “layer4.1.bn3.bias”, “layer4.1.bn3.running_mean”, “layer4.1.bn3.running_var”, “layer4.2.conv1.weight”, “layer4.2.bn1.weight”, “layer4.2.bn1.bias”, “layer4.2.bn1.running_mean”, “layer4.2.bn1.running_var”, “layer4.2.conv2.weight”, “layer4.2.bn2.weight”, “layer4.2.bn2.bias”, “layer4.2.bn2.running_mean”, “layer4.2.bn2.running_var”, “layer4.2.conv3.weight”, “layer4.2.bn3.weight”, “layer4.2.bn3.bias”, “layer4.2.bn3.running_mean”, “layer4.2.bn3.running_var”, “fc.weight”, “fc.bias”.
Unexpected key(s) in state_dict: “0.0.weight”, “0.1.weight”, “0.1.bias”, “0.1.running_mean”, “0.1.running_var”, “0.1.num_batches_tracked”, “0.4.0.conv1.weight”, “0.4.0.bn1.weight”, “0.4.0.bn1.bias”, “0.4.0.bn1.running_mean”, “0.4.0.bn1.running_var”, “0.4.0.bn1.num_batches_tracked”, “0.4.0.conv2.weight”, “0.4.0.bn2.weight”, “0.4.0.bn2.bias”, “0.4.0.bn2.running_mean”, “0.4.0.bn2.running_var”, “0.4.0.bn2.num_batches_tracked”, “0.4.0.conv3.weight”, “0.4.0.bn3.weight”, “0.4.0.bn3.bias”, “0.4.0.bn3.running_mean”, “0.4.0.bn3.running_var”, “0.4.0.bn3.num_batches_tracked”, “0.4.0.downsample.0.weight”, “0.4.0.downsample.1.weight”, “0.4.0.downsample.1.bias”, “0.4.0.downsample.1.running_mean”, “0.4.0.downsample.1.running_var”, “0.4.0.downsample.1.num_batches_tracked”, “0.4.1.conv1.weight”, “0.4.1.bn1.weight”, “0.4.1.bn1.bias”, “0.4.1.bn1.running_mean”, “0.4.1.bn1.running_var”, “0.4.1.bn1.num_batches_tracked”, “0.4.1.conv2.weight”, “0.4.1.bn2.weight”, “0.4.1.bn2.bias”, “0.4.1.bn2.running_mean”, “0.4.1.bn2.running_var”, “0.4.1.bn2.num_batches_tracked”, “0.4.1.conv3.weight”, “0.4.1.bn3.weight”, “0.4.1.bn3.bias”, “0.4.1.bn3.running_mean”, “0.4.1.bn3.running_var”, “0.4.1.bn3.num_batches_tracked”, “0.4.2.conv1.weight”, “0.4.2.bn1.weight”, “0.4.2.bn1.bias”, “0.4.2.bn1.running_mean”, “0.4.2.bn1.running_var”, “0.4.2.bn1.num_batches_tracked”, “0.4.2.conv2.weight”, “0.4.2.bn2.weight”, “0.4.2.bn2.bias”, “0.4.2.bn2.running_mean”, “0.4.2.bn2.running_var”, “0.4.2.bn2.num_batches_tracked”, “0.4.2.conv3.weight”, “0.4.2.bn3.weight”, “0.4.2.bn3.bias”, “0.4.2.bn3.running_mean”, “0.4.2.bn3.running_var”, “0.4.2.bn3.num_batches_tracked”, “0.5.0.conv1.weight”, “0.5.0.bn1.weight”, “0.5.0.bn1.bias”, “0.5.0.bn1.running_mean”, “0.5.0.bn1.running_var”, “0.5.0.bn1.num_batches_tracked”, “0.5.0.conv2.weight”, “0.5.0.bn2.weight”, “0.5.0.bn2.bias”, “0.5.0.bn2.running_mean”, “0.5.0.bn2.running_var”, “0.5.0.bn2.num_batches_tracked”, “0.5.0.conv3.weight”, “0.5.0.bn3.weight”, “0.5.0.bn3.bias”, “0.5.0.bn3.running_mean”, “0.5.0.bn3.running_var”, “0.5.0.bn3.num_batches_tracked”, “0.5.0.downsample.0.weight”, “0.5.0.downsample.1.weight”, “0.5.0.downsample.1.bias”, “0.5.0.downsample.1.running_mean”, “0.5.0.downsample.1.running_var”, “0.5.0.downsample.1.num_batches_tracked”, “0.5.1.conv1.weight”, “0.5.1.bn1.weight”, “0.5.1.bn1.bias”, “0.5.1.bn1.running_mean”, “0.5.1.bn1.running_var”, “0.5.1.bn1.num_batches_tracked”, “0.5.1.conv2.weight”, “0.5.1.bn2.weight”, “0.5.1.bn2.bias”, “0.5.1.bn2.running_mean”, “0.5.1.bn2.running_var”, “0.5.1.bn2.num_batches_tracked”, “0.5.1.conv3.weight”, “0.5.1.bn3.weight”, “0.5.1.bn3.bias”, “0.5.1.bn3.running_mean”, “0.5.1.bn3.running_var”, “0.5.1.bn3.num_batches_tracked”, “0.5.2.conv1.weight”, “0.5.2.bn1.weight”, “0.5.2.bn1.bias”, “0.5.2.bn1.running_mean”, “0.5.2.bn1.running_var”, “0.5.2.bn1.num_batches_tracked”, “0.5.2.conv2.weight”, “0.5.2.bn2.weight”, “0.5.2.bn2.bias”, “0.5.2.bn2.running_mean”, “0.5.2.bn2.running_var”, “0.5.2.bn2.num_batches_tracked”, “0.5.2.conv3.weight”, “0.5.2.bn3.weight”, “0.5.2.bn3.bias”, “0.5.2.bn3.running_mean”, “0.5.2.bn3.running_var”, “0.5.2.bn3.num_batches_tracked”, “0.5.3.conv1.weight”, “0.5.3.bn1.weight”, “0.5.3.bn1.bias”, “0.5.3.bn1.running_mean”, “0.5.3.bn1.running_var”, “0.5.3.bn1.num_batches_tracked”, “0.5.3.conv2.weight”, “0.5.3.bn2.weight”, “0.5.3.bn2.bias”, “0.5.3.bn2.running_mean”, “0.5.3.bn2.running_var”, “0.5.3.bn2.num_batches_tracked”, “0.5.3.conv3.weight”, “0.5.3.bn3.weight”, “0.5.3.bn3.bias”, “0.5.3.bn3.running_mean”, “0.5.3.bn3.running_var”, “0.5.3.bn3.num_batches_tracked”, “0.6.0.conv1.weight”, “0.6.0.bn1.weight”, “0.6.0.bn1.bias”, “0.6.0.bn1.running_mean”, “0.6.0.bn1.running_var”, “0.6.0.bn1.num_batches_tracked”, “0.6.0.conv2.weight”, “0.6.0.bn2.weight”, “0.6.0.bn2.bias”, “0.6.0.bn2.running_mean”, “0.6.0.bn2.running_var”, “0.6.0.bn2.num_batches_tracked”, “0.6.0.conv3.weight”, “0.6.0.bn3.weight”, “0.6.0.bn3.bias”, “0.6.0.bn3.running_mean”, “0.6.0.bn3.running_var”, “0.6.0.bn3.num_batches_tracked”, “0.6.0.downsample.0.weight”, “0.6.0.downsample.1.weight”, “0.6.0.downsample.1.bias”, “0.6.0.downsample.1.running_mean”, “0.6.0.downsample.1.running_var”, “0.6.0.downsample.1.num_batches_tracked”, “0.6.1.conv1.weight”, “0.6.1.bn1.weight”, “0.6.1.bn1.bias”, “0.6.1.bn1.running_mean”, “0.6.1.bn1.running_var”, “0.6.1.bn1.num_batches_tracked”, “0.6.1.conv2.weight”, “0.6.1.bn2.weight”, “0.6.1.bn2.bias”, “0.6.1.bn2.running_mean”, “0.6.1.bn2.running_var”, “0.6.1.bn2.num_batches_tracked”, “0.6.1.conv3.weight”, “0.6.1.bn3.weight”, “0.6.1.bn3.bias”, “0.6.1.bn3.running_mean”, “0.6.1.bn3.running_var”, “0.6.1.bn3.num_batches_tracked”, “0.6.2.conv1.weight”, “0.6.2.bn1.weight”, “0.6.2.bn1.bias”, “0.6.2.bn1.running_mean”, “0.6.2.bn1.running_var”, “0.6.2.bn1.num_batches_tracked”, “0.6.2.conv2.weight”, “0.6.2.bn2.weight”, “0.6.2.bn2.bias”, “0.6.2.bn2.running_mean”, “0.6.2.bn2.running_var”, “0.6.2.bn2.num_batches_tracked”, “0.6.2.conv3.weight”, “0.6.2.bn3.weight”, “0.6.2.bn3.bias”, “0.6.2.bn3.running_mean”, “0.6.2.bn3.running_var”, “0.6.2.bn3.num_batches_tracked”, “0.6.3.conv1.weight”, “0.6.3.bn1.weight”, “0.6.3.bn1.bias”, “0.6.3.bn1.running_mean”, “0.6.3.bn1.running_var”, “0.6.3.bn1.num_batches_tracked”, “0.6.3.conv2.weight”, “0.6.3.bn2.weight”, “0.6.3.bn2.bias”, “0.6.3.bn2.running_mean”, “0.6.3.bn2.running_var”, “0.6.3.bn2.num_batches_tracked”, “0.6.3.conv3.weight”, “0.6.3.bn3.weight”, “0.6.3.bn3.bias”, “0.6.3.bn3.running_mean”, “0.6.3.bn3.running_var”, “0.6.3.bn3.num_batches_tracked”, “0.6.4.conv1.weight”, “0.6.4.bn1.weight”, “0.6.4.bn1.bias”, “0.6.4.bn1.running_mean”, “0.6.4.bn1.running_var”, “0.6.4.bn1.num_batches_tracked”, “0.6.4.conv2.weight”, “0.6.4.bn2.weight”, “0.6.4.bn2.bias”, “0.6.4.bn2.running_mean”, “0.6.4.bn2.running_var”, “0.6.4.bn2.num_batches_tracked”, “0.6.4.conv3.weight”, “0.6.4.bn3.weight”, “0.6.4.bn3.bias”, “0.6.4.bn3.running_mean”, “0.6.4.bn3.running_var”, “0.6.4.bn3.num_batches_tracked”, “0.6.5.conv1.weight”, “0.6.5.bn1.weight”, “0.6.5.bn1.bias”, “0.6.5.bn1.running_mean”, “0.6.5.bn1.running_var”, “0.6.5.bn1.num_batches_tracked”, “0.6.5.conv2.weight”, “0.6.5.bn2.weight”, “0.6.5.bn2.bias”, “0.6.5.bn2.running_mean”, “0.6.5.bn2.running_var”, “0.6.5.bn2.num_batches_tracked”, “0.6.5.conv3.weight”, “0.6.5.bn3.weight”, “0.6.5.bn3.bias”, “0.6.5.bn3.running_mean”, “0.6.5.bn3.running_var”, “0.6.5.bn3.num_batches_tracked”, “0.7.0.conv1.weight”, “0.7.0.bn1.weight”, “0.7.0.bn1.bias”, “0.7.0.bn1.running_mean”, “0.7.0.bn1.running_var”, “0.7.0.bn1.num_batches_tracked”, “0.7.0.conv2.weight”, “0.7.0.bn2.weight”, “0.7.0.bn2.bias”, “0.7.0.bn2.running_mean”, “0.7.0.bn2.running_var”, “0.7.0.bn2.num_batches_tracked”, “0.7.0.conv3.weight”, “0.7.0.bn3.weight”, “0.7.0.bn3.bias”, “0.7.0.bn3.running_mean”, “0.7.0.bn3.running_var”, “0.7.0.bn3.num_batches_tracked”, “0.7.0.downsample.0.weight”, “0.7.0.downsample.1.weight”, “0.7.0.downsample.1.bias”, “0.7.0.downsample.1.running_mean”, “0.7.0.downsample.1.running_var”, “0.7.0.downsample.1.num_batches_tracked”, “0.7.1.conv1.weight”, “0.7.1.bn1.weight”, “0.7.1.bn1.bias”, “0.7.1.bn1.running_mean”, “0.7.1.bn1.running_var”, “0.7.1.bn1.num_batches_tracked”, “0.7.1.conv2.weight”, “0.7.1.bn2.weight”, “0.7.1.bn2.bias”, “0.7.1.bn2.running_mean”, “0.7.1.bn2.running_var”, “0.7.1.bn2.num_batches_tracked”, “0.7.1.conv3.weight”, “0.7.1.bn3.weight”, “0.7.1.bn3.bias”, “0.7.1.bn3.running_mean”, “0.7.1.bn3.running_var”, “0.7.1.bn3.num_batches_tracked”, “0.7.2.conv1.weight”, “0.7.2.bn1.weight”, “0.7.2.bn1.bias”, “0.7.2.bn1.running_mean”, “0.7.2.bn1.running_var”, “0.7.2.bn1.num_batches_tracked”, “0.7.2.conv2.weight”, “0.7.2.bn2.weight”, “0.7.2.bn2.bias”, “0.7.2.bn2.running_mean”, “0.7.2.bn2.running_var”, “0.7.2.bn2.num_batches_tracked”, “0.7.2.conv3.weight”, “0.7.2.bn3.weight”, “0.7.2.bn3.bias”, “0.7.2.bn3.running_mean”, “0.7.2.bn3.running_var”, “0.7.2.bn3.num_batches_tracked”, “1.2.weight”, “1.2.bias”, “1.2.running_mean”, “1.2.running_var”, “1.2.num_batches_tracked”, “1.4.weight”, “1.6.weight”, “1.6.bias”, “1.6.running_mean”, “1.6.running_var”, “1.6.num_batches_tracked”, “1.8.weight”.model.eval()
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): Bottleneck(
(conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer2): Sequential(
(0): Bottleneck(
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer3): Sequential(
(0): Bottleneck(
(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(4): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(5): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer4): Sequential(
(0): Bottleneck(
(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=2048, out_features=49, bias=True)
)