Module initialization error: An error occurred (403) when calling the HeadObject operation: Forbidden

Hello, everyone. I am trying to deploy main Text to speech trained model on AWS using lambda function.
There this error while loading my model from s3 bucket. I am also providing the code which of lambda function I have created and this error is occurring while testing the code in test configuration. Please help me resolve this issue.

try:
	import unzip_requirements
except ImportError:
	print('Unable to import');
import json
import urllib.parse
import boto3

print('Loading function')

s3 = boto3.client('s3')
import torch
import numpy as np
print(np.__version__);print(np.__file__)
#from scipy.io.wavfile import write
from os import path
print(torch.__version__)
print(torch.cuda.is_available())

my_bucket = 'modeltts'
orig_file = 'TTS_Nigerian'
dest_file = '/'

s3 = boto3.resource('s3')
s3.Bucket(my_bucket).download_file(orig_file, dest_file)

model_path= dest_file
tacotron2 = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_tacotron2')
tacotron2.load_state_dict(torch.load(model_path)['state_dict'])
tacotron2 = tacotron2.to('cuda')
tacotron2.eval()

def lambda_handler(event, context):
    #print("Received event: " + json.dumps(event, indent=2))

    # Get the object from the event and show its content type
    #bucket = event['Records'][0]['s3']['bucket']['name']
    #key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
    #try:
        #response = s3.get_object(Bucket=bucket, Key=key)
        #print("CONTENT TYPE: " + response['ContentType'])
        #return response['ContentType']
    #except Exception as e:
        #print(e)
        #print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
        #raise e
    
    #s3 = boto3.resource('s3')
    #with open('TTS_Nigerian', wb) as tacotron2:
        #s3.Bucket(bucket).download_file(key, tacotron2)          
    #tacotron2 = load_model()
    text = "Dear User, Your guider got closed unexpectedly.Would you like to continue from where you left off."

    utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')
    sequence, lengths = utils.prepare_input_sequence([text])
    



    with torch.no_grad():
    
        mel_output_postnet, _, alignment = tacotron2.infer(sequence, lengths)
        audio = waveglow.infer(mel_output_postnet)
    
        #plot_data((mel_output_postnet.float().data.cpu().numpy()[0],
                    #alignment.float().data.cpu().numpy()[0].T))
    audio_numpy = audio[0].data.cpu().numpy()
    sampling_rate = 22050
    
    from IPython.display import Audio
    Audio(audio_numpy, rate=sampling_rate)

@ptrblck please acknowledge

I don’t see anything Pytorch specific in your error so might be helpful to confirm that the AWS bucket you’re using is public, that the object on that bucket is public and that it’s in the same region as where you’re deploying your cloud function