SRGAN model training error


import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from custom_dataset import CustomDataset
import RRDBNet_arch as arch
from PIL import Image 

torch.cuda.empty_cache()
# Define your dataset paths
hr_folder = 'resized/*.png'
lr_folder = 'lr_folder/*.png'
val_hr_folder = 'val_hr_folder/*.png'
val_lr_folder = 'val_lr_folder/*.png'

# Hyperparameters
batch_size = 2
learning_rate = 0.0002
num_epochs = 50
target_size = 256  
scale_factor = 4    

# Add the if __name__ == '__main__': block
if __name__ == '__main__':
    # Create instances of the dataset and DataLoader
    train_dataset = CustomDataset(hr_folder, lr_folder, transform=transforms.Compose([
        transforms.Resize((target_size, target_size), Image.BICUBIC),
        transforms.ToTensor(),
    ]))
    
    val_dataset = CustomDataset(val_hr_folder, val_lr_folder, transform=transforms.Compose([
        transforms.Resize((target_size, target_size), Image.BICUBIC),
        transforms.ToTensor(),
    ]))

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

    # Create your SRGAN model
    device = torch.device('cuda')
    model = arch.RRDBNet(3, 3, 64, 23, gc=32).to(device)

    # Define loss function and optimizer
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Training loop with validation
    for epoch in range(num_epochs):
        model.train()
        for hr_batch, lr_batch in train_loader:
            hr_batch, lr_batch = hr_batch.to(device), lr_batch.to(device)

            optimizer.zero_grad()
            sr_batch = model(lr_batch)
            loss = criterion(sr_batch, hr_batch)
            loss.backward()
            optimizer.step()

        # Validation loop
        model.eval()
        with torch.no_grad():
            for val_hr_batch, val_lr_batch in val_loader:
                val_hr_batch, val_lr_batch = val_hr_batch.to(device), val_lr_batch.to(device)
                val_sr_batch = model(val_lr_batch)
                val_loss = criterion(val_sr_batch, val_hr_batch)

        # Optionally, print or log the loss for monitoring training progress
        print(f'Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item()}, Validation Loss: {val_loss.item()}')

    # Save the trained model
    torch.save(model.state_dict(), 'models/trained_model.pth')

RuntimeError: The size of tensor a (1024) must match the size of tensor b (256) at non-singleton dimension 3

Your code is not properly formatted and does not contain the model definition or the failing line of code.
The error is pointing to a shape mismatch, so isolate the failing line of code and check the shape of all tensors involved in the operation.

Sorry my bad I am using this platform for the first time ( code is formatted correctly now)
Btw I tried resizing the images and stuff like that but still there is this tensor issue
please help me with this