How to do a batch training, when you have multiple neural networks and you have to select through which network you have to pass your input

(Yashaswi Pathak) #1

class NN(torch.nn.Module):
def init(self,H):
super(NN,self).init()

            self.nn_1 = nn.Linear(1,H)
            self.nn_2 = nn.Linear(H,1)

def forward(self, x):
	#print(x)
	#inp = Variable(torch.Tensor(x)).long()
	l1 = F.relu(self.nn_1(x))
	l2 = F.relu(self.nn_2(l1))
	return l2

class TwoLayerNet(torch.nn.Module):
def init(self):
super(TwoLayerNet,self).init()

	self.nn1 = NN(4)
	self.nn2 = NN(4)
	self.nn3 = NN(4)
	self.nn4 = NN(4)
	self.nn5 = NN(4)
	self.nn6 = NN(4)
def forward(self,x,n):		
	nn_o = torch.zeros(1)
	
	#how to define this part?,I have to select the network by checking the
	#corresponding element in the tensor/list 'n' and then pass the corresponding 
	#element in x to the corresponding nn.I dont understand how to do batch training in such scenario
	for i in range(len(x[..something_here..])):
		if n[i] == 1:
			nn_o += self.nn1(x[i])
		elif n[i]==2:
			nn_o += self.nn2(x[i])
		elif n[i]==3:
			nn_o += self.nn3(x[i])
		elif n[i]==4:
			nn_o += self.nn4(x[i])
		elif n[i]==5:
			nn_o += self.nn5(x[i])	
		elif n[i]==6:
			nn_o += self.nn6(x[i])
	return nn_o

model= TwoLayerNet()

criterion = nn.MSELoss(size_average=False)
optimizer = torch.optim.Adam(model.parameters(),lr=1e-4)

#a=(1.2,1)
#b=(1.5,2)
#c=(1.2,1)

values = torch.Tensor([[[1.0],[2.2],[3.1],[4.1]],[[1.5],[2.4],[3.1],[4.5]]])
y=torch.Tensor([[10],[24]])
nnselector =torch.Tensor([[1,1,1,1],[2,3,4,1]])
for i in range(10):
y_pred = model(values,nnselector)
print (y_pred)
loss = criterion(y_pred,y)

optimizer.zero_grad()
loss.backward()
optimizer.step()
#2

@Yashaswi_Pathak Did you get the solution of this network?