I am trying to multi-task regression problem.
Input shape: 200 * 60000, Output shape: 200*3 (here, 200 = Total number of data, 60000 = number of features)
So, for each data point, I have to predict 3 values (continuous).
Sample Code:
class Classifier(nn.Module):
def __init__(self,input_nodes):
super(Classifier, self).__init__()
self.input_nodes = input_nodes
self.sharedlayer = nn.Sequential(
nn.Linear(input_nodes, 300),
nn.ReLU(),
nn.Dropout(),
nn.Linear(300, 100),
nn.ReLU(),
nn.Dropout(),
)
self.att1 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
self.att2 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
self.att3 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
def forward(self, x):
h_shared = self.sharedlayer(x)
out1 = self.att1(h_shared)
out2 = self.att2(h_shared)
out3 = self.att3(h_shared)
return out1, out2, out3
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
for epoch in range(n_epochs):
running_loss = 0
i = 0
model.train()
for data, label in trainloader:
i = i + 1
out1, out2, out3 = model(data)
l1 = criterion(out1, label[:,0].view(-1,1))
l2 = criterion(out2, label[:,1].view(-1,1))
l3 = criterion(out3, label[:,2].view(-1,1))
loss = (l1 + l2 + l3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
Problem: The model always produces the same value for all test data.
Example: Suppose, 3 Test data:
For output1: 3.5 3.5 3.5
For output2: 9.5 9.5 9.5
For output3: 0.2 0.2 0.2
Can you please help me to figure out what is the problem here?
Why does it generate the same value for all test data?