Should i use multiple instances of activation functions and things like torch.nn.upsample, or just one, so basically should i do this:
class model(nn.Module):
def __init__(self):
super(model,self).__init__()
self.layer1 = nn.Linear(x,y)
self.layer2 = nn.Linear(z,w)
self.activation1 = nn.ReLU()
self.activation2 = nn.ReLU()
def forward(self, input):
out = self.layer1(input)
out = self.activation1(out)
out = self.layer2(out)
out = self.activation2(out)
return out
or should i do this:
class model(nn.Module):
def __init__(self):
super(model,self).__init__()
self.layer1 = nn.Linear(x,y)
self.layer2 = nn.Linear(z,w)
self.activation = nn.ReLU()
def forward(self, input):
out = self.layer1(input)
out = self.activation(out)
out = self.layer2(out)
out = self.activation(out)
return out