Dataloader for two concatenated models

Hello Guys,
i have two models cnn (without fc) and the third do the concatenate and the FC
there is the model :
import torch

import torch.nn as nn

import torch.nn.functional

class block(nn.Module):

def __init__(

    self, in_channels, intermediate_channels, identity_downsample=None, stride=1

):

    super(block, self).__init__()

    self.expansion = 4

    self.conv1 = nn.Conv2d(

        in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0

    )

    self.bn1 = nn.BatchNorm2d(intermediate_channels)

    #Le BatchNormalization applique une transformation qui maintient la sortie moyenne proche de 0 et l'écart type de sortie proche de 1.

    self.conv2 = nn.Conv2d(

        intermediate_channels,

        intermediate_channels,

        kernel_size=3,

        stride=stride,

        padding=1,

    )

    self.bn2 = nn.BatchNorm2d(intermediate_channels )

    self.conv3 = nn.Conv2d(

        intermediate_channels,

        intermediate_channels * self.expansion,

        kernel_size=1,

        stride=1,

        padding=0,

    )

    self.bn3 = nn.BatchNorm2d(intermediate_channels * self.expansion)

    self.relu = nn.ReLU()

    self.identity_downsample = identity_downsample

    self.stride = stride

def forward(self, x):

    identity = x.clone()

    x = self.conv1(x)

    x = self.bn1(x)

    x = self.relu(x)

    x = self.conv2(x)

    x = self.bn2(x)

    x = self.relu(x)

    x = self.conv3(x)

    x = self.bn3(x)

    if self.identity_downsample is not None:

        identity = self.identity_downsample(identity)

    x += identity

    x = self.relu(x)

    return x

class ResNet(nn.Module):

def __init__(self, block, layers, image_channels, num_classes):

    super(ResNet, self).__init__()

    self.in_channels = 4

    self.conv1 = nn.Conv2d(image_channels, 4, kernel_size=3, stride=2, padding=3)

    self.bn1 = nn.BatchNorm2d(4)

    self.relu = nn.ReLU()

    self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

    self.softmax = nn.Softmax(dim=1)

    # Essentially the entire ResNet architecture are in these 4 lines below

    self.layer1 = self._make_layer(

        block, layers[0], intermediate_channels=55, stride=1

    )

    self.layer2 = self._make_layer(

        block, layers[1], intermediate_channels=64, stride=2

    )

            

    self.layer3 = self._make_layer(

        block, layers[2], intermediate_channels=128, stride=2

    )

   

    self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    

def forward(self, x):

    x = self.conv1(x)

    x = self.bn1(x)

    x = self.relu(x)

    x = self.maxpool(x)

    x = self.layer1(x)

    x = self.layer2(x)

    x = self.layer3(x)

           

    x = self.avgpool(x)

    x = x.reshape(x.shape[0], -1)

    return x

def _make_layer(self, block, num_residual_blocks, intermediate_channels, stride):

    identity_downsample = None

    layers = []

    if stride != 1 or self.in_channels != intermediate_channels * 4:

        identity_downsample = nn.Sequential(

            nn.Conv2d(

                self.in_channels,

                intermediate_channels * 4,

                kernel_size=1,

                stride=stride,

            ),

            nn.BatchNorm2d(intermediate_channels * 4),

        )

    layers.append(

        block(self.in_channels, intermediate_channels, identity_downsample, stride)

    )

    

    self.in_channels = intermediate_channels * 4

 

    for i in range(num_residual_blocks - 1):

        layers.append(block(self.in_channels, intermediate_channels))

    return nn.Sequential(*layers)

def ResNet50(img_channel=4, num_classes=2):

return ResNet(block, [3,4,3], img_channel, num_classes)

import torch.optim as optim

net1 = ResNet50(img_channel=4, num_classes=2)

###############################################################################################################"

###################################################################################################################"

import torch

import torch.nn as nn

import torch.nn.functional

class block(nn.Module):

def __init__(

    self, in_channels, intermediate_channels, identity_downsample=None, stride=1

):

    super(block, self).__init__()

    self.expansion = 4

    self.conv1 = nn.Conv2d(

        in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0

    )

    self.bn1 = nn.BatchNorm2d(intermediate_channels)

    #Le BatchNormalization applique une transformation qui maintient la sortie moyenne proche de 0 et l'écart type de sortie proche de 1.

    self.conv2 = nn.Conv2d(

        intermediate_channels,

        intermediate_channels,

        kernel_size=3,

        stride=stride,

        padding=1,

    )

    self.bn2 = nn.BatchNorm2d(intermediate_channels )

    self.conv3 = nn.Conv2d(

        intermediate_channels,

        intermediate_channels * self.expansion,

        kernel_size=1,

        stride=1,

        padding=0,

    )

    self.bn3 = nn.BatchNorm2d(intermediate_channels * self.expansion)

    self.relu = nn.ReLU()

    self.identity_downsample = identity_downsample

    self.stride = stride

def forward(self, x):

    identity = x.clone()

    x = self.conv1(x)

    x = self.bn1(x)

    x = self.relu(x)

    x = self.conv2(x)

    x = self.bn2(x)

    x = self.relu(x)

    x = self.conv3(x)

    x = self.bn3(x)

    if self.identity_downsample is not None:

        identity = self.identity_downsample(identity)

    x += identity

    x = self.relu(x)

    return x

class ResNet(nn.Module):

def __init__(self, block, layers, image_channels, num_classes):

    super(ResNet, self).__init__()

    self.in_channels = 40

    self.conv1 = nn.Conv2d(image_channels, 40, kernel_size=3, stride=2, padding=3)

    self.bn1 = nn.BatchNorm2d(40)

    self.relu = nn.ReLU()

    self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

    self.softmax = nn.Softmax(dim=1)

    # Essentially the entire ResNet architecture are in these 4 lines below

    self.layer1 = self._make_layer(

        block, layers[0], intermediate_channels=32, stride=1

    )

    self.layer2 = self._make_layer(

        block, layers[1], intermediate_channels=64, stride=2

    )

            

    self.layer3 = self._make_layer(

        block, layers[2], intermediate_channels=128, stride=2

    )

    self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

def forward(self, x):

    x = self.conv1(x)

    x = self.bn1(x)

    x = self.relu(x)

    x = self.maxpool(x)

    x = self.layer1(x)

    x = self.layer2(x)

    x = self.layer3(x)

    

    x = self.avgpool(x)

    x = x.reshape(x.shape[0], -1)

    return x

def _make_layer(self, block, num_residual_blocks, intermediate_channels, stride):

    identity_downsample = None

    layers = []

    if stride != 1 or self.in_channels != intermediate_channels * 4:

        identity_downsample = nn.Sequential(

            nn.Conv2d(

                self.in_channels,

                intermediate_channels * 4,

                kernel_size=1,

                stride=stride,

            ),

            nn.BatchNorm2d(intermediate_channels * 4),

        )

    layers.append(

        block(self.in_channels, intermediate_channels, identity_downsample, stride)

    )

    

    self.in_channels = intermediate_channels * 4

 

    for i in range(num_residual_blocks - 1):

        layers.append(block(self.in_channels, intermediate_channels))

    return nn.Sequential(*layers)

def ResNet50(img_channel=40, num_classes=2):

return ResNet(block, [3,4,3], img_channel, num_classes)

import torch.optim as optim

net2 = ResNet50(img_channel=40, num_classes=2)

import torch.optim as optim

net2 = ResNet50(img_channel=40, num_classes=2)

class Net(nn.Module):

def init(self):

super(Net, self).__init__()

self.feature1 = net1

self.feature2 = net2

self.fc = nn.Linear(128 *4,2)

def forward(self, x,y):

x1= self.feature1(x)

x2= self.feature2(y)

x3 = torch.cat((x1,x2),1)

x3 = x3.view(x3.size(0), -1)

x3 = self.fc(x3)

return x3

net=Net()

loss_fn = nn.BCEWithLogitsLoss()

import torch.optim as optim

net.train()


i would like to build a data loader for each model and for the third model
please help me

This is my data loader for one model

import torch
import numpy as np
import glob
import scipy
import scipy.io as sio
from ModelResNet import *
from Preper import *
from torch.utils.data import Dataset, DataLoader

class ImageDataset:
def init(self,alldata, transform=None):
#self.root_dir = root_dir
#load data from mat files
self.alldata=alldata
alldata_olivier = A
alldata_non_olivier = Y

    #a list of label 

    for i in range(5027) : #number of samples 'non olive'
        labels.append(0)
    for i in range(4990) : #number of samples 'olive
        labels.append(1)
    
    #shuffle data using sklearn

    self.numdata = 10017
    self.transform = transform
    
def __len__(self):
    return self.numdata
def __getitem__(self, idx):
    label=labels[idx]

    #newidx = self.shuffle[idx]
    image = self.alldata[idx]
    label=np.asarray(label)
    #transform data from numpy to torch tensor
    imageTensor =np.asarray(alldata)# 
    imageTensor =torch.from_numpy(imageTensor)
    #plt.imshow(imageTensor[:,:,0])
    labelTensor =np.asarray(labels)# torch.from_numpy(label)
    labelTensor =torch.from_numpy(labelTensor)
    #print(imageTensor) 
    return imageTensor , labelTensor

if name == ‘main’:
k= ImageDataset(z)
k.getitem(60)

import sklearn.model_selection as model_selection
X_train, X_test, y_train, y_test = model_selection.train_test_split(alldata, labels, train_size=0.8,test_size=0.2)

class trainData():

def __init__(self, X_data, y_data):
    self.X_data = X_data
    self.y_data = y_data
    
def __getitem__(self, index):
    return self.X_data[index], self.y_data[index]
    
def __len__ (self):
    return len(self.X_data)

train_data = trainData(torch.FloatTensor(X_train),
torch.FloatTensor(y_train),
)

class testData():

def __init__(self, X_data):
    self.X_data = X_data
    
def __getitem__(self, index):
    return self.X_data[index]
  
def __len__ (self):
    return len(self.X_data)

train_loader = DataLoader(dataset=train_data, batch_size=2, shuffle=True)
net.to(device)
device=torch.device(“cuda:0” if torch.cuda.is_available () else “cpu”)
test_data = testData(torch.FloatTensor(X_test))
EPOCHS =21
LEARNING_RATE = 0.001
def binary_acc(y_pred, y_testt):
y_pred_tag = torch.round(y_pred)
correct_results_sum = (y_pred_tag[:,0] == y_testt).sum().float()
acc = correct_results_sum/y_testt.shape[0]
acc = torch.round(acc * 100)
return acc

net.train()

for e in range(1, EPOCHS+1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in train_loader:

    X_batch =np.asarray(X_batch)
    X_batch =torch.from_numpy(X_batch)
    optimizer.zero_grad()
    
    X_batch= X_batch.permute(0,3,2,1).float()
    y_batch =np.asarray(y_batch) 
    y_batch =torch.from_numpy(y_batch)
    X_batch, y_batch =X_batch.to(device) , y_batch.to(device)
    y_pred = net(X_batch) 
    
    acc = binary_acc(y_pred, y_batch)
    
    loss = loss_fn(y_pred[:,0], y_batch.float())  
        
    loss.backward()
    
    optimizer.step()
    
    epoch_loss += loss.item()
    epoch_acc += acc.item()
    
print('debut training : ')   
print(f'Epoch {e+0:03}: | Loss: {epoch_loss/len(train_loader):.5f} | Acc: {epoch_acc/len(train_loader):.3f}')

Hi Adam

There’s a fair bit of code here. Are you experiencing any errors or anything you weren’t expecting?

From a quick look through, it looks as though you have a single model (Net) that contains two separate ResNet models. So it sounds as though you may want to pass two datasets into the same model using one dataloader? If so, following something like this will be helpful.

Let me know how you get on or if I’ve misunderstood.