Use Grad-Cam pytorch for two input into model

Hello, I was having a problem with my project. I wanna draw a heat map from “grad_cam” library.
My model have 2 input:

class MetaMelanoma(nn.Module):
    def __init__(self,out_dim=9,n_meta_features=0,n_meta_dim=[512, 128],network='efficientnet_b0'):
            super(MetaMelanoma,self).__init__()
            self.enet = timm.create_model(network,pretrained=True)
            self.n_meta_features = n_meta_features
            self.dropouts = nn.ModuleList([
            nn.Dropout(0.2) for _ in range(3)])
            in_ch = self.enet.classifier.in_features
            if n_meta_features > 0:
                self.meta = nn.Sequential(
                    nn.Linear(n_meta_features, n_meta_dim[0]),
                    nn.BatchNorm1d(n_meta_dim[0]),
                    Swish_Module(),
                    nn.Dropout(p=0.3),
                    nn.Linear(n_meta_dim[0], n_meta_dim[1]),
                    nn.BatchNorm1d(n_meta_dim[1]),
                    Swish_Module(),
                )
                in_ch += n_meta_dim[1]
            self.myfc = nn.Linear(in_ch, out_dim)
            self.enet.classifier = nn.Identity()
    def extract(self, x):
        x = self.enet(x)
        return x
    def forward(self, x,x_meta):

          x = self.extract(x).squeeze(-1).squeeze(-1)
                  if self.n_meta_features > 0:
                      x_meta = self.meta(x_meta)
                      x = torch.cat((x, x_meta), dim=1)
                  for i, dropout in enumerate(self.dropouts):
                      if i == 0:
                          out = self.myfc(dropout(x))
                      else:
                          out += self.myfc(dropout(x))
                  out /= len(self.dropouts)
                  return out

And when use ‘grad-cam’ library:

input_tensor = preprocess_image(image,
                                    mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])

    with cam_algorithm(model=model,
                        target_layers=target_layers,
                        use_cuda=False) as cam:
            cam.batch_size = 30
            grayscale_cam = cam(input_tensor=(input_tensor.float(),meta_features.float()),
                                targets=None,
                                aug_smooth=False,
                                eigen_smooth=False)
            grayscale_cam = grayscale_cam[0, :]
            cam_image = show_cam_on_image(image, grayscale_cam, use_rgb=True)

And this is error:


Traceback (most recent call last):
  File "/home/dhsang/miniconda3/envs/st.cloud/lib/python3.9/site-packages/streamlit/scriptrunner/script_runner.py", line 554, in _run_script
    exec(code, module.__dict__)
  File "streamlit_app.py", line 222, in <module>
    image,image_ori,image_scale = heatmap(selected_box,crop_image,Cam=EigenGradCAM,meta_features=features) #GradCAM, \
  File "/home/dhsang/argparse/streamlit-skin-diseases-classifications-cloud/src/utils.py", line 242, in heatmap
    cam_image, image_scale = back_heatmap_meta(model,image,512,target_layers,Cam,meta_features)
  File "/home/dhsang/argparse/streamlit-skin-diseases-classifications-cloud/src/utils.py", line 286, in back_heatmap_meta
    grayscale_cam = cam(input_tensor=zip(input_tensor.float(),meta_features.float()),
  File "/home/dhsang/miniconda3/envs/st.cloud/lib/python3.9/site-packages/pytorch_grad_cam/base_cam.py", line 184, in __call__
    return self.forward(input_tensor,
  File "/home/dhsang/miniconda3/envs/st.cloud/lib/python3.9/site-packages/pytorch_grad_cam/base_cam.py", line 74, in forward
    outputs = self.activations_and_grads(input_tensor)
  File "/home/dhsang/miniconda3/envs/st.cloud/lib/python3.9/site-packages/pytorch_grad_cam/activations_and_gradients.py", line 42, in __call__
    return self.model(x)
  File "/home/dhsang/miniconda3/envs/st.cloud/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'x_meta'

I think the library did not design for 2 input_tensor