NotImplementedError-Trying to load and train the data

Hi Guys, I tried to train my custom dataset which generates triangle pieces juxtaposed on a black background but I keep getting a NotImplementedError with my densenet161 model.
Error:
5 board_writer,
6 device,
----> 7 batches_per_epoch=500)

6 frames
in train_it(no_of_epochs, starting_epoch, model_name, model, loss_criterion, optimizer, batch_size, allTheDataloaders, board_writer, device, batches_per_epoch, is_best, min_validation_loss)
33 no_of_batches_in_this_epoch = 0
34 train_correct_in_this_epoch = 0
—> 35 for train_batch_data, train_batch_labels in allTheDataloaders[“Training”]:
36 train_batch_data, train_batch_labels = train_batch_data.to(device), train_batch_labels.to(device)
37 no_of_batches_in_this_epoch+= 1

/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py in iter(self)
353 return self._iterator
354 else:
→ 355 return self._get_iterator()
356
357 @property

/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py in _get_iterator(self)
296 def _get_iterator(self) → ‘_BaseDataLoaderIter’:
297 if self.num_workers == 0:
→ 298 return _SingleProcessDataLoaderIter(self)
299 else:
300 self.check_worker_number_rationality()

/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py in init(self, loader)
551
552 self._dataset_fetcher = _DatasetKind.create_fetcher(
→ 553 self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
554
555 def _next_data(self):

/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py in create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last)
49 return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
50 else:
—> 51 return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
52
53

/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/fetch.py in init(self, dataset, auto_collation, collate_fn, drop_last)
19 def init(self, dataset, auto_collation, collate_fn, drop_last):
20 super(_IterableDatasetFetcher, self).init(dataset, auto_collation, collate_fn, drop_last)
—> 21 self.dataset_iter = iter(dataset)
22
23 def fetch(self, possibly_batched_index):

/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataset.py in iter(self)
145
146 def iter(self) → Iterator[T_co]:
→ 147 raise NotImplementedError
148
149 def add(self, other: Dataset[T_co]):

NotImplementedError:

The custom dataset class:
def shuffle_buffer_iterator(actual_iterator, size_of_buffer):
shuffle =

while(True):
    size = 0
    while(size < size_of_buffer):
        try:
            shuffle.append(next(actual_iterator))
            size +=1
        except StopIteration:
            shuffle = sample(shuffle, len(shuffle))    
            for s in shuffle:
                yield s
            return
    
    shuffle = sample(shuffle, len(shuffle))    
    for s in shuffle:
        yield s
    shuffle = []

class triangle_pieces_generator(IterableDataset):
def init(self, root_dir, puzzle_piece_dim, size_of_buffer, model_dim):
super(IterableDataset).init()
self.root_dir = root_dir
# self.root_dir = root_dir
self.puzzle_piece_dim = puzzle_piece_dim
self.size_of_buffer = size_of_buffer
self.model_dim = model_dim

def visualize_the_triangle_image_sliced(self, image_path):
    image_current = Image.open(image_path).convert("RGBA")
    widthOfImage = image_current.size[0]
    heightOfImage = image_current.size[1]
    newImageLength = (self.puzzle_piece_dim / 2)
    widthOfThePuzzlePiece = newImageLength
    heightOfThePuzzlePiece = newImageLength

    ##Calculates the number of rows and columns that can fit in a puzzle grid.
    rows = round(widthOfImage // widthOfThePuzzlePiece)
    columns = round(heightOfImage // heightOfThePuzzlePiece)

    image_height_new = rows * widthOfThePuzzlePiece
    image_width_new = columns * heightOfThePuzzlePiece

    image_current = image_current.resize((image_width_new, image_height_new))
    square_puzzle_pieces = []
    cropped_pieces = []
    coordinates = []
    square_coordinates = []
    i = 0
    j = 0
    for i in range(rows):
        for j in range(columns):
            square_coordinates.append((i, j))
            # coordinates.append[(i,j)]
            # (x + 1, y), (x + 1, y + 1), (x, y + 1)
            # yield [(i,j),(i + 1, j),(i + 1, j + 1), (i, j + 1)]
            coordinates.extend([(i, j), (i + 1, j), (i + 1, j + 1), (i, j + 1)])

            croppedImage = image_current.crop((j * widthOfThePuzzlePiece, i * heightOfThePuzzlePiece,
                                               (j + 1) * widthOfThePuzzlePiece,
                                               ((i + 1) * heightOfThePuzzlePiece)))  # variable to cut the image
            cropped_pieces.append(croppedImage)
            square_puzzle_pieces.append(cropped_pieces)
        # yield coordinates
        print("square coordinates", square_coordinates)
        print(coordinates)
        # k=k+1
        my_dpi = 300
        fig_cropped = plt.figure(dpi=my_dpi)
        plt.title("Cropped puzzle pieces")
        plt.axis('off')
        for i, croppedPiece in enumerate(cropped_pieces):
            ax = fig_cropped.add_subplot(rows, columns, i + 1)
            ax.imshow(croppedPiece)
            ax.axis('off')
        plt.show()
    return coordinates, cropped_pieces

    piece_coordinates, puzzle_pieces = self.visualize_the_triangle_image_sliced(image_path)

    newImageLength = (self.puzzle_piece_dim / 2)
    widthOfThePuzzlePiece = newImageLength
    heightOfThePuzzlePiece = newImageLength

    create_triangle_image = Image.new('L', size=(widthOfThePuzzlePiece, heightOfThePuzzlePiece), color=(0))
    draw = ImageDraw.Draw(create_triangle_image)
    draw.polygon(((newImageLength + 1, newImageLength + 1), (0, newImageLength), (0, 0)), fill=(255), outline=(0))
    create_triangle_image.show()
    return create_triangle_image


def draw_triangle_part2(self):
    newImageLength_2 = (self.puzzle_piece_dim / 2)
    widthOfThePuzzlePiece = newImageLength_2
    heightOfThePuzzlePiece = newImageLength_2
    create_triangle_image_two = Image.new('L', size=(widthOfThePuzzlePiece, heightOfThePuzzlePiece), color=(0))
    draw_two = ImageDraw.Draw(create_triangle_image_two)
    # draw.polygon(((100, 100), (100, 100), (c, c)), fill=(255, 255, 255), outline=(255, 255, 255))
    draw_two.polygon(((0, 0), (newImageLength_2 + 1, 0), (newImageLength_2 + 1, newImageLength_2 + 1)), fill=(255), outline=(0))
    create_triangle_image_two.show()
    return create_triangle_image_two

def image_alpha(self, image_current):
    newImageLength_alpha = (self.puzzle_piece_dim / 2)
    image_another = image_current.copy()
    image_new = image_another.resize((newImageLength_alpha, newImageLength_alpha))
    image_new.putalpha(self.draw_triangle())
    return image_new

def image_alpha_triangle_2(self, image_current):
    newImageLength_alpha_2 = (self.puzzle_piece_dim / 2)
    image_another_two = image_current.copy()
    image_new_2 = image_another_two.resize((newImageLength_alpha_2, newImageLength_alpha_2))
    image_new_2.putalpha(self.draw_triangle_part2(newImageLength_alpha_2))
    return image_new_2

triangles_one = []
triangles_90 = []

def generate_triangles(self):

    for i in range(0, len(self.puzzle_pieces)):
        newImageLength = self.puzzle_piece_dim / 2
        tri_image = self.puzzle_pieces[i]
        widthOfTriangle = tri_image.size[0]
        heightOfTriangle = tri_image.size[1]
        print(tri_image)

        tri_image_1 = self.image_alpha(tri_image, newImageLength)
       
        self.triangles_one.append(tri_image_1)
    return self.triangles_one

def generate_triangles_two(self):
    for i in range(0, len(self.puzzle_pieces)):
        newImageLength = self.puzzle_piece_dim / 2
   
        tri_image_two = self.puzzle_pieces[i]
        print(tri_image_two)
        tri_image_2 = self.image_alpha_triangle_2(tri_image_two, newImageLength)
         self.triangles_90.append(tri_image_2)
    return self.triangles_90

def get_concat_v(self, image_RGBA_new, imageTwo_RGBA_v):  ##This method is called when i+2 and i
    

    dst_new = Image.new('RGBA', (image_RGBA_new.width,
                                 imageTwo_RGBA_v.height + imageTwo_RGBA_v.height))
    dst_new.paste(image_RGBA_new, (0, 0))
    dst_new.paste(imageTwo_RGBA_v, (0, image_RGBA_new.height)

    return dst_new


def vertical_concat_on_black_image(self, image_RGBA_new, imageTwo_RGBA_v):
    new_v_image = self.get_concat_v(image_RGBA_new, imageTwo_RGBA_v)
    juxtaposed_black_background = Image.new("RGB", (self.puzzle_piece_dim, self.puzzle_piece_dim), (0, 0, 0))

    juxtaposed_black_background_copy_v = juxtaposed_black_background.copy()
    juxtaposed_black_background_copy_v.paste(new_v_image, (30, 1), mask=new_v_image)


    return transforms.ToTensor()(juxtaposed_black_background_copy_v)

def get_concat_h(self, image_RGBA_new, imageTwo_RGBA_new):

    dst_new_another = Image.new('RGBA', (self.imageTwo_RGBA_new.width
                                         + image_RGBA_new.width, self.imageTwo_RGBA_new.height))
    dst_new_another.paste(self.imageTwo_RGBA_new, (0, 0))
    dst_new_another.paste(image_RGBA_new, (self.imageTwo_RGBA_new.width, 0))

    return dst_new_another

def horizonal_concat_on_black_image(self, image_RGBA_new, imageTwo_RGBA_new):
    new_h_image = self.get_concat_h(image_RGBA_new, imageTwo_RGBA_new)
    juxtaposed_black_background = Image.new("RGB", (self.puzzle_piece_dim, self.puzzle_piece_dim), (0, 0, 0))
    juxtaposed_black_background_copy_h = juxtaposed_black_background.copy()
    juxtaposed_black_background_copy_h.paste(new_h_image, (30, 1), mask=new_h_image)

    return transforms.ToTensor()(juxtaposed_black_background_copy_h)


def join_triangles_positive(self, x, y):
    imageLength = self.puzzle_piece_dim / 2
    mask_image = self.draw_triangle(imageLength)
    joined_image = Image.composite(x, y, self.mask_image)
    return joined_image

def paste_image_positive_black(self, x, y):
    mirror = self.join_triangles_positive(x, y)
    juxtaposed_black_background_m = Image.new("RGB", (self.puzzle_piece_dim, self.puzzle_piece_dim * 2), (0, 0, 0))
    juxtaposed_black_background_copy_m = juxtaposed_black_background_m.copy()
    juxtaposed_black_background_copy_m.paste(mirror, (30, 1), mask=mirror)
    return transforms.ToTensor()(juxtaposed_black_background_copy_m)

def slice_and_Create(self):
    for folder in sample(os.listdir(self.root_dir), len(os.listdir(self.root_dir))):
        folder_path = self.root_dir + "/" + folder
        print(folder_path)
        for image in sample(os.listdir(folder_path), len(os.listdir(folder_path))):
            piece_coordinates, puzzle_pieces = self.visualize_the_triangle_image_sliced(folder_path + "/" + image)
            listImages = self.generate_triangles()
            listImagesTwo = self.generate_triangles_two()
            for k in len(listImages):
                for l in len(listImagesTwo):
                    positive_label = 1
                    negative_label = 0
                    if listImages[k] and listImagesTwo[l]:
                        image_RGBA_new = listImages[k]
                        imageTwo_RGBA_v = listImagesTwo[l]
                        positive_past_torch = self.paste_image_positive_black(k, l)
                        vertical_torch_neg = self.vertical_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        horizontal_torch_neg = self.horizonal_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        yield (positive_past_torch, positive_label)
                        yield (horizontal_torch_neg, negative_label)
                        yield (vertical_torch_neg, negative_label)
                    if listImages[k] and listImagesTwo[l + 2]:
                        image_RGBA_new = listImages[k]
                        imageTwo_RGBA_v = listImagesTwo[l + 2]
                        vertical_torch = self.vertical_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        horizontal_torch_neg = self.horizonal_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        positive_torch_neg = self.paste_image_positive_black(k, l)
                        yield (vertical_torch, positive_label)
                        yield (horizontal_torch_neg, negative_label)
                        yield (positive_torch_neg, negative_label)
                    if listImages[k + 1] and listImagesTwo[l]:
                        image_RGBA_new = listImages[k] + 1
                        imageTwo_RGBA_v = listImagesTwo[l]
                        horizonatl_torch = self.horizonal_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        vertical_torch_neg = self.vertical_concat_on_black_image(image_RGBA_new, imageTwo_RGBA_v)
                        positive_torch_neg = self.paste_image_positive_black(k, l)
                        yield (vertical_torch_neg, negative_label)
                        yield (positive_torch_neg, negative_label)
                        yield (horizonatl_torch, positive_label)

def _iter_(self):
    iterator = self.slice_and_Create()
    return shuffle_buffer_iterator(iterator, self.size_of_buffer)

DenseNet Model

def retireve_the_dataset_input():
puzzle_piece_dim = int(input("Enter puzzle_piece_dim "))
size_of_buffer = int(input("Enter size_of_shuffle_buffer "))
model_dim = int(input("Enter model_dim "))
batch_size = int(input("Enter batch_size "))
return sq_puzzle_piece_dim, size_of_buffer, model_dim, batch_size

def set_the_dataset_input(default=True):
if default:
puzzle_piece_dim=100
size_of_buffer = 1000
model_dim = 224
batch_size = 20
else:
puzzle_piece_dim, size_of_buffer, model_dim, batch_size = retireve_the_dataset_input()
return puzzle_piece_dim, size_of_buffer, model_dim, batch_size

def initialse_dataloader(root_dir,val_dir,puzzle_piece_dim,size_of_buffer, model_dim,batch_size):

training_dataset=triangles_pieces_dataset.triangle_pieces_generator(root_dir,puzzle_piece_dim,size_of_buffer, model_dim)

 train_dataset_dataloader = DataLoader(training_dataset, batch_size)

 validation_dataset=triangles_pieces_dataset.triangle_pieces_generator(val_dir,puzzle_piece_dim,size_of_buffer, model_dim)

 validation_dataset_dataloader = DataLoader(training_dataset, batch_size)

 allTheDataloaders={'Training':train_dataset_dataloader , 'Validation': validation_dataset_dataloader}
 return allTheDataloaders

def save_the_models_at_the_best_checkpoint(check_point_save,is_best,checkpoint_path,best_model_path):
new_path=checkpoint_path
torch.save(check_point_save,new_path)
if is_best:
torch.save(check_point_save, best_model_path)
else:
torch.save(check_point_save, checkpoint_path)

def load_the_models_at_the_best_checkpoint(new_path, model, optimizer):

checkpoint = torch.load(new_path)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
valid_loss_min = checkpoint['valid_loss_min']
return model, optimizer, checkpoint['epoch'], valid_loss_min.item()

def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False

def initialise_the_model(model,numOfClasses,feature_extract,use_pretrained=True):
if model==‘densenet’:
model = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model.classifier = nn.Linear(num_ftrs, numOfClasses)
input_size = 224
return model

def create_optimizer(given_model_parameters, learning_rate, momentum):
optimizer = optim.SGD(given_model_parameters, lr = learning_rate, momentum = momentum)
return optimizer

model_names = [“Densenet”]

def get_model_details():
i = int(input(“Press 0 for Densenet “))
model_name = model_names[i]
if i==1:
j = int(input(“Press 0 for FineTuning and 1 for FeatureExtracting “))
feature_extracting=(j==1)
else:
feature_extracting=False
print(”************”)
print(f"Using {model_name}”)
print(f"feature_extracting : {feature_extracting}”)
return model_name, feature_extracting

def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False

def reshape_denseNet(no_of_classes, feature_extract, use_pretrained=True):
model_dense = None
input_size = 0
model_dense = models.densenet161(pretrained=True)
set_parameter_requires_grad(model_dense, feature_extract)
no_of_features = model_dense.classifier.in_features #CHANGE
model_dense.classifier = nn.Linear(no_of_features, no_of_classes)
return model_dense

def parameters_to_update(model_name, model, feature_extract=False):
params = list(model.parameters())
if model_name==“Densenet”:#check with the model name created
if feature_extract:
print(“Feature extracting from Densenet - Expect less number of parameters to learn!”)
params =
for name,param in model.named_parameters():
if param.requires_grad == True:
params.append(param)
print(“\t”,name)
else:
print(“Fine tuning Densenet - Expect more number of parameters to learn!”)
for name,param in model.named_parameters():
if param.requires_grad == True:
print(“\t”,name)
print(f"No_of_parameters to learn : {len(params)}")
return params

def make_loss_criterion(model_name):
loss_criterions = nn.CrossEntropyLoss()
return loss_criterions

def make_model_lc_optimizer(model,learning_rate, momentum,
feature_extract=False,no_of_classes=2):
model =reshape_denseNet(no_of_classes, feature_extract, use_pretrained=True)
params_to_update = parameters_to_update(model, model, feature_extract)
loss_criterion = make_loss_criterion(model)
optimizer = create_optimizer(params_to_update, learning_rate, momentum)
return model, loss_criterion, optimizer

def get_hyperparameters(default=True):
if default:
learning_rate=0.001
momentum = 0.9
else:
learning_rate = float(input("Enter learning rate "))
momentum = float(input("Enter momentum "))
return learning_rate, momentum

#Helper function from pytorch

def train_it(no_of_epochs, starting_epoch,
model_name,model,loss_criterion, optimizer,
batch_size, allTheDataloaders,board_writer,device,batches_per_epoch=100,
is_best=False,min_validation_loss=math.inf):

last_checkpoint_path = f"./last_checkpoint_for_{model_name}.pt"
best_model_path=f"./best_model_for_{model_name}.pt"


for epoch in range(starting_epoch,starting_epoch+no_of_epochs):
    print(f"Epoch : {epoch}")
    start_time = time()

    model.train()
    t_labels=[]
    t_predicitions=[]
 
    print("Training")
    train_loss_in_this_epoch = 0
    no_of_batches_in_this_epoch = 0
    train_correct_in_this_epoch = 0
    for train_batch_data, train_batch_labels in allTheDataloaders["Training"]:
            train_batch_data, train_batch_labels = train_batch_data.to(device), train_batch_labels.to(device)
            no_of_batches_in_this_epoch+= 1
            optimizer.zero_grad()
            train_batch_outputs = model(train_batch_data)
            #Compute loss for this batch
            train_batch_loss = loss_criterion(train_batch_outputs, train_batch_labels)
            train_loss_in_this_batch = train_batch_loss.item()
            train_loss_in_this_epoch += train_loss_in_this_batch 
            train_batch_loss.backward()
            optimizer.step()
            with torch.no_grad():
                new_pred=torch.max(train_batch_outputs, axis = 1) 
                train_score, train_predictions = torch.max(train_batch_outputs, axis = 1)   
                train_correct_in_this_batch = torch.sum(train_predictions == train_batch_labels.data).item()
                train_correct_in_this_epoch += train_correct_in_this_batch
                train_batch_labels = train_batch_labels.detach().cpu().numpy()
                train_score = train_score.detach().cpu().numpy()
                #append the values to calculation
                for i in range(batch_size):
                    t_labels.append(train_batch_labels[i])
                    t_predicitions.append(train_score[i])

            if (no_of_batches_in_this_epoch % (batches_per_epoch//10)) == 0:
                print(f"Training #{no_of_batches_in_this_epoch} Batch Acc : {train_correct_in_this_batch}/{batch_size}, Batch Loss: {train_loss_in_this_batch}")
            if no_of_batches_in_this_epoch == batches_per_epoch:
                print(f"Epoch : {epoch}, Training Batch: {no_of_batches_in_this_epoch}")
                break

    board_writer.add_scalar(f'Training/Loss/Average', train_loss_in_this_epoch/no_of_batches_in_this_epoch, epoch)
    board_writer.add_scalar(f'Training/Accuracy/Average', train_correct_in_this_epoch/(no_of_batches_in_this_epoch*batch_size), epoch)
    board_writer.add_scalar(f'Training/TimeTakenInMinutes', (time()-start_time)/60, epoch)
    board_writer.flush()
    print(f"Training average accuracy : {train_correct_in_this_epoch/(no_of_batches_in_this_epoch*batch_size)}")
    print(f"Training average loss : {train_loss_in_this_epoch/no_of_batches_in_this_epoch}")
        
    #create f measure
    model.eval()
    print("Validation")
    val_loss_in_this_epoch = 0
    no_of_batches_in_this_epoch = 0
    val_correct_in_this_epoch = 0
    with torch.no_grad():
        for val_batch_data, val_batch_labels in allTheDataloaders["Validation"]:
            val_batch_data, val_batch_labels = val_batch_data.to(device), val_batch_labels.to(device)
            no_of_batches_in_this_epoch+= 1
            val_batch_outputs = model(val_batch_data)
            #Compute loss for this batch
            val_batch_loss = loss_criterion(val_batch_outputs, val_batch_labels)
            val_loss_in_this_batch = val_batch_loss.item()
            val_loss_in_this_epoch += val_loss_in_this_batch 
            val_score, val_predictions = torch.max(val_batch_outputs, axis = 1)   
            val_correct_in_this_batch = torch.sum(val_predictions == val_batch_labels.data).item()
            val_correct_in_this_epoch += val_correct_in_this_batch
            if (no_of_batches_in_this_epoch % (batches_per_epoch//10)) == 0:
                print(f"Validation #{no_of_batches_in_this_epoch} Batch Acc : {val_correct_in_this_batch}/{batch_size}, Batch Loss: {val_loss_in_this_batch}")
            if no_of_batches_in_this_epoch == batches_per_epoch:
                print(f"Epoch : {epoch}, Validation Batch: {no_of_batches_in_this_epoch}")
                break
        board_writer.add_scalar(f'Validation/Loss/Average', val_loss_in_this_epoch/no_of_batches_in_this_epoch, epoch)
        board_writer.add_scalar(f'Validation/Accuracy/Average', val_correct_in_this_epoch/(no_of_batches_in_this_epoch*batch_size), epoch)
        board_writer.add_scalar(f'Validation/TimeTakenInMinutes', (time()-start_time)/60, epoch)
        board_writer.flush()
        print(f"Validation average accuracy : {val_correct_in_this_epoch/(no_of_batches_in_this_epoch*batch_size)}")
        print(f"Validation average loss : {val_loss_in_this_epoch/no_of_batches_in_this_epoch}")
        if  min_validation_loss >= val_loss_in_this_epoch:
                is_best = True
                min_validation_loss = min(min_validation_loss,val_loss_in_this_epoch)
                checkpoint = {
                    'epoch': epoch + 1,
                    'min_validation_loss': min_validation_loss,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }
                save_the_models_at_the_best_checkpoint(checkpoint, is_best, last_checkpoint_path, best_model_path)
                print(f"In epoch number {epoch}, average validation loss decreased to {val_loss_in_this_epoch/no_of_batches_in_this_epoch}")
        load_the_models_at_the_best_checkpoint = {
                'epoch': epoch + 1,
                'min_validation_loss': min_validation_loss,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                 }
        save_the_models_at_the_best_checkpoint(load_the_models_at_the_best_checkpoint, False, last_checkpoint_path, best_model_path)
       

board_writer.close()

##Variables
root_dir = os.getenv(“MY_ROOT_DIR”)
val_dir = os.getenv(“MY_VAL_DIR”)

#Change this to False if you want to set the variables instead of using default
default_setting_for_dataset = True

puzzle_piece_dim,size_of_buffer,model_dim,batch_size = set_the_dataset_input(default_setting_for_dataset)

print(f"my_puzzle_piece_dim = {puzzle_piece_dim}“)
print(f"my_size_of_buffer = {size_of_buffer}”)
print(f"my_model_dim = {model_dim}“)
print(f"my_batch_size = {batch_size}”)

my_dataloaders = initialse_dataloader(root_dir,val_dir, puzzle_piece_dim,size_of_buffer, model_dim,batch_size)

model_name, feature_extract = get_model_details()

default_setting_for_hyperparameters = True

learning_rate,momentum = get_hyperparameters(default_setting_for_hyperparameters)

#Change the number
epoch=3

model,loss_criterion,optimizer=make_model_lc_optimizer(model_name,learning_rate, momentum,feature_extract)

if torch.cuda.is_available():
device = torch.device(“cuda:0”)
print(“Running on the GPU”)
#putting model on gpu
model.to(device)
else:
device = torch.device(“cpu”)
print(“Running on the CPU”)

tensorboard_dir=f"Training_{model_name}"
board_writer = SummaryWriter(tensorboard_dir)

train_it(epoch, 0,
model_name,model,loss_criterion, optimizer,
batch_size,
my_dataloaders,
board_writer,
device,
batches_per_epoch=500)

%load_ext tensorboard
%tensorboard --logdir=“$tensorboard_dir”

Any Guidance would be appreciated as I am super lost.(Sorry about the formatting of this question)

The issue is most likely caused by a typo in:

def _iter_(self):

since two underscores are expected:

def __iter__(self):

PS: you can post code snippets by wrapping them into three backticks ```, which makes debugging easier. :wink:

1 Like