Batch normalization in eval mode

I am evaluating a network in the eval mode. when I pass a value through the network, I still see that I am not able to pass a 1D array. The forward function looks like this:

def __init__(self, stateSize, actionSize, layers=[10, 5], activations=[F.tanh, F.tanh], batchNormalization = True, lr=0.01 ):
	'''This is a Q network with discrete actions
	
	This takes a state and returns a Q function for each action. Hence, the
	input is a state and the output is a set of Q values, one for each action
	in the action space. The action is assumed to be discrete. i.e. a ``1``
	when the particular action is to be desired.
	
	Parameters
	----------
	stateSize : {[type]}
		[description]
	actionSize : {[type]}
		[description]
	layers : {list}, optional
		[description] (the default is [10, 5], which [default_description])
	activations : {list}, optional
		[description] (the default is [F.tanh, F.tanh], which [default_description])
	batchNormalization : {bool}, optional
		[description] (the default is True, which [default_description])
	'''


	try:
		super(qNetworkDiscrete, self).__init__()
		self.stateSize           = stateSize
		self.actionSize          = actionSize
		self.layers              = layers
		self.activations         = activations
		self.batchNormalization  = batchNormalization

		# Generate the fullly connected layer functions
		self.fcLayers = []
		self.bns      = []

		oldN = stateSize
		for i, layer in enumerate(layers):
			self.fcLayers.append( nn.Linear(oldN, layer) )
			self.bns.append( nn.BatchNorm1d( num_features = layer, track_running_stats=True ) )
			oldN = layer

		# ------------------------------------------------------
		# The final layer will only need to supply a quality
		# function. This is a single value for an action 
		# provided. Ideally, you would want to provide a 
		# OHE action sequence for most purposes ...
		# ------------------------------------------------------
		self.fcFinal = nn.Linear( oldN, actionSize )

		# we shall put this is eval mode and only use 
		# the trian mode when we need to train the 
		# mode
		self.optimizer = optim.Adam(
			self.parameters(), lr=lr)

	
	except Exception as e:
		print(f'Unable to generate the Q network ... : {e}')


	return

def forward(self, x):
	'''forward function that is called during the forward pass
	
	This is the forward function that will be called during a 
	forward pass. It takes thee states and gives the Q value 
	correspondidng to each of the applied actions that are 
	associated with that state. 
	
	Parameters
	----------
	x : {tensor}
		This is a 2D tensor. 
	
	Returns
	-------
	tensor
		This represents the Q value of the function
	'''

	try:
		print('[qNNetwork] [i] -> ', x.shape)

		for i, (bn, fc, a) in enumerate(zip(self.bns, self.fcLayers, self.activations)):
			print(f'[qNNetwork] [i]-> ', x.shape)
			x = a(bn(fc(x)))

		print('[qNNetwork] [f1]-> ', x.shape)
		x = self.fcFinal( x )
		print('[qNNetwork] [f2]-> ', x.shape)
	except Exception as e:
		print( '### in Training mode:', self.training )
		print(f'#### ERROR in the qNetwork: {e}')

	return x

I see that even in the eval mode this is not able to get proper results, which is a bit strange …

Part of the output:

[qNNetwork] [i] ->  torch.Size([1, 37])
[qNNetwork] [i]->  torch.Size([1, 37])
### in Training mode: False
#### ERROR in the qNetwork: Expected more than 1 value per channel when training, got input size torch.Size([1, 50])

Any thoughts on these?

What is self.bns?
Add bn.eval() in your loop

Thanks I shall try that!