How to avoid TracerWarning when converting PyTorch to Onnx?

Hello there. Recently I have to convert a PyTorch-trained network to onnx for inference, then I using the script below to convert it:

model = YOLOv3_LargeScale(input_size=input_size, num_classes=16, rfb=True).cuda().eval()
model.load_state_dict(torch.load(path))

dummy_input = torch.zeros((3, input_size, input_size)).cuda().unsqueeze(0)
onnx_f = output_path
torch.onnx.export(model, dummy_input, onnx_f, verbose=True)

However, when converting, the script throws a warning:

pytorch2onnx.py:83: TracerWarning: Converting a tensor to a Python index might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  crop_tensor = x[..., h_index_list[i]:h_index_list[i + 1], w_index_list[j]:w_index_list[j + 1]]

I guess this is because I use some Python data structure (i.e. list) to save a few tensors, the related codes are pasted below:
(If I have guessed wrong, please tell me)

class DarkNet53_LargeScale(nn.Module):
    def __init__(self, input_size, crop_idx=4):
        super().__init__()
        self.darknet = DarkNet53(local_input_size=input_size//crop_idx)
        self.crop_idx = crop_idx

    def get_local_input(self, x):
        # get cropped tensors
        # crop order: left 2 right, up 2 down
        device = x.device
        dtype = x.dtype
        size = x.shape[2:]
        crop_tensor_list = []
        h_index_list = [size[0] // self.crop_idx * i for i in range(self.crop_idx + 1)]
        w_index_list = [size[1] // self.crop_idx * i for i in range(self.crop_idx + 1)]
        for i in range(self.crop_idx):
            for j in range(self.crop_idx):
                crop_tensor = x[..., h_index_list[i]:h_index_list[i + 1], w_index_list[j]:w_index_list[j + 1]]
                crop_tensor_list.append(crop_tensor)
        return crop_tensor_list, h_index_list, w_index_list, device, dtype

    def forward_backbone(self, crop_tensor_list):
        # forward each cropped tensor and get the output
        route_layers_list = []
        x_list = []
        for tensor in crop_tensor_list:
            route_layers, x = self.darknet(tensor)
            route_layers_list.append(route_layers)
            x_list.append(x)
        return route_layers_list, x_list

    def forward_post_process(self, route_layers_list, x_list, device, dtype):
        # concatenate each cropped tensor's output following their spatial order to reconstruct feature maps
        n = x_list[0].shape[0]  # batch size
        c_x, h_x, w_x = x_list[0].shape[1:]
        c_0, h_0, w_0 = route_layers_list[0][0].shape[1:]  # num of channels, height, width
        c_1, h_1, w_1 = route_layers_list[0][1].shape[1:]
        c_2, h_2, w_2 = route_layers_list[0][2].shape[1:]
        c_3, h_3, w_3 = route_layers_list[0][3].shape[1:]
        c_4, h_4, w_4 = route_layers_list[0][4].shape[1:]
        # route_layers_list contains (idx*idx) route_layers,every route_layers contains 5 tensors
        # x_list contains (idx*idx) tensors
        x_out = torch.zeros((n, c_x, h_x * self.crop_idx, w_x * self.crop_idx)).to(dtype).to(device)
        route_layers_out_0 = torch.zeros((n, c_0, h_0 * self.crop_idx, w_0 * self.crop_idx)).to(dtype).to(device)
        route_layers_out_1 = torch.zeros((n, c_1, h_1 * self.crop_idx, w_1 * self.crop_idx)).to(dtype).to(device)
        route_layers_out_2 = torch.zeros((n, c_2, h_2 * self.crop_idx, w_2 * self.crop_idx)).to(dtype).to(device)
        route_layers_out_3 = torch.zeros((n, c_3, h_3 * self.crop_idx, w_3 * self.crop_idx)).to(dtype).to(device)
        route_layers_out_4 = torch.zeros((n, c_4, h_4 * self.crop_idx, w_4 * self.crop_idx)).to(dtype).to(device)

        for i, x in enumerate(x_list):
            h_index = int(i / self.crop_idx)
            w_index = i % self.crop_idx
            x_out[..., (h_index * h_x):((h_index + 1) * h_x), (w_index * w_x):((w_index + 1) * w_x)] = x
            route_layers_out_0[..., (h_index * h_0):((h_index + 1) * h_0), (w_index * w_0):((w_index + 1) * w_0)] = \
                route_layers_list[i][0]
            route_layers_out_1[..., (h_index * h_1):((h_index + 1) * h_1), (w_index * w_1):((w_index + 1) * w_1)] = \
                route_layers_list[i][1]
            route_layers_out_2[..., (h_index * h_2):((h_index + 1) * h_2), (w_index * w_2):((w_index + 1) * w_2)] = \
                route_layers_list[i][2]
            route_layers_out_3[..., (h_index * h_3):((h_index + 1) * h_3), (w_index * w_3):((w_index + 1) * w_3)] = \
                route_layers_list[i][3]
            route_layers_out_4[..., (h_index * h_4):((h_index + 1) * h_4), (w_index * w_4):((w_index + 1) * w_4)] = \
                route_layers_list[i][4]
        return [route_layers_out_0, route_layers_out_1, route_layers_out_2, route_layers_out_3, route_layers_out_4], x_out

    def forward(self, x):
        crop_tensor_list, h_index_list, w_index_list, device, dtype = self.get_local_input(x)
        route_layers_list, x_list = self.forward_backbone(crop_tensor_list)
        route_layers, x = self.forward_post_process(route_layers_list, x_list, device, dtype)
        return route_layers, x.to(device)

I think my using a few lists during the forward method throws the warning above, but how to deal with it without rewriting the whole forward process? I have to crop the original input to patches and concatenate them finally for downstream tasks, thus using a list to save the tensors temporarily cropped is the first idea coming to my mind. Is there any way else out?

Instead of storing index in separate list use range in python to get patches

patchwidth = imagewidth//cropidx
patchheight = imageheight//cropidx
patches=[]
for row in range(0,imageheight, patchheight):
for col in range(0,imagewidth, patchwidth):
patches.append(image[row:row+patchheight, col:col+pathwidth])