Multiple forward passes in pytorch lightning

Hi All, I am very new to Lightning. I have a pytorch code and I want it convert it to lightning. If you have some time Please help me. I have converted most of the step, but the only problem I am facing is in my pytorch code I am doing multiple forwarded passes as shown in the last loop. I don’t know how to imply this in pytorch lightning. Please help me with that.

import random
import time
import torch
import torch.nn as nn
import torch.nn.functional as F

class Made(nn.Module):
def init(self,m):
super(Made,self).init()
self.m=m
self.neural_made1=nn.Linear(self.m, self.m)
self.neural_made2=nn.Linear(self.m, self.m)
self.neural_made3=nn.Linear(self.m, self.m)
self.prelu1=nn.PReLU()# learnable activation function
self.prelu2=nn.PReLU()
self.sig=nn.Sigmoid()

def forward(self,x):
    self.hidden1=self.prelu1(self.neural_made1(x))
    self.hidden2=self.prelu2(self.neural_made2(self.hidden1))
    self.final=self.sig(self.neural_made3(self.hidden2))
    return self.final


    dyn_var=256
    Net=pf.Made(dyn_var).to(dev)

    I_Par=torch.zeros((bs,dyn_var)).to(dev)
    for i in range(dyn_var):#Autoregressive property or multiple forward passes proportional to number of input variables
        rand=torch.rand(bs).to(dev)
        with torch.no_grad():
            Net_1=(Net(I_Par))#forward pass of the neural network
            I_Par[:,i]=torch.where(Net_1[:,i] > rand,-1,1)    
        assert not I_Par.requires_grad
    Net_2=(Net(I_Par))

Could you describe what the issue is and which error you are seeing?

Hi ptrblock, I want to convert my pytorch lightning, But i am having some troubles regarding the same. Till now I have written a script(given below). But I am not sure what to add in training_step(self, ). I have to add ``` the below code from my torch code. This code implies a multiple forward pass of Made() nn. Please help me with that.

I_Par=torch.zeros((bs,dyn_var)).to(dev)
for i in range(dyn_var):#Autoregressive property or multiple forward passes proportional to number of input variables
rand=torch.rand(bs).to(dev)
with torch.no_grad():
Net_1=(Net(I_Par))#forward pass of the neural network
I_Par[:,i]=torch.where(Net_1[:,i] > rand,-1,1)
assert not I_Par.requires_grad
Net_2=(Net(I_Par))


import random
import time
from torch.optim import SGD
from pytorch_lightning.utilities.types import STEP_OUTPUT
import torch 
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import ising_fun as fi
import pytorch_lightning as pl

class ZerosDataset(Dataset):

    def __init__(self, batch_size, var_size):

        self.batch_size = batch_size
        self.var_size = var_size

    def __len__(self):
        return self.batch_size

    def __getitem__(self, idx):
        return torch.zeros(self.var_size)

class Made(nn.Module):

    def **init**(self,m):
        super(Made,self).**init**()
        self.m=m
        self.neural_made1=nn.Linear(self.m, self.m)
        self.neural_made2=nn.Linear(self.m, self.m)
        self.neural_made3=nn.Linear(self.m, self.m)
        self.prelu1=nn.PReLU()# learnable activation function
        self.prelu2=nn.PReLU()
        self.sig=nn.Sigmoid()

def forward(self,x):

    self.hidden1=self.prelu1(self.neural_made1(x))
    self.hidden2=self.prelu2(self.neural_made2(self.hidden1))
    self.final=self.sig(self.neural_made3(self.hidden2))
    return self.final


def train_dataloader(self):
    dataset = ZerosDataset(batch_size=1000, var_size=1)  # Using ZerosDataset here
    return DataLoader(dataset, batch_size=1000)

def training_step(self, )
    return 
def _compute_kl_loss(self,Prob,I_Par): 
        with torch.no_grad():
            Mu1=torch.zeros((bs,m,m),dtype=torch.int64)
            Mu=fi.matrix_ising(bs,I_Par,I_Val_2,Mu1)
            Energy=fi.energy_ising(bs,m,Mu,I_Val_2,J1)
            loss=Prob+Energy

        assert not Energy.requires_grad
        assert not loss.requires_grad
        loss_ren=torch.mean((loss-loss.mean())*Prob)
        return loss_ren
def configure_optimizers(self):
    return SGD(self.parameters(), lr=1e-3)```

if name == ‘main’:

J1=0.6
bs=2500#batch size,*m*m*m*m*
m=10
dyn_var=m*m
epochs=20000

I_Val_2=fi.ising_linear(m)