How to use pre-trained weights for 5 channels?

if args.init_weights:
    model_dict = model.state_dict()
    snapshot = torch.load(args.init_weights, map_location='cpu')
    snapshot = snapshot['model']
    model_dict.update(snapshot)
    model.load_state_dict(model_dict)

RuntimeError: Error(s) in loading state_dict for ModelSpatioTemporalTCN:
size mismatch for conv1_scene.weight: copying a param of torch.Size([64, 5, 7, 7]) from checkpoint, where the shape is torch.Size([64, 4, 7, 7]) in current model.

    self.conv1_scene = nn.Conv2d(**5**, 64, kernel_size=7, stride=2, padding=3, bias=False)
    self.bn1_scene = nn.BatchNorm2d(64)
    self.layer1_scene = self._make_layer_scene(block, 64, layers_scene[0])
    self.layer2_scene = self._make_layer_scene(block, 128, layers_scene[1], stride=2)
    self.layer3_scene = self._make_layer_scene(block, 256, layers_scene[2], stride=2)
    self.layer4_scene = self._make_layer_scene(block, 512, layers_scene[3], stride=2)
    self.layer5_scene = self._make_layer_scene(block, 256, layers_scene[4], stride=1)

I need to use 5 channels, how to solve it?