Is that possible to generate an input which seems ideal w.r.t. model by backpropagation of the model?
Here I have such arch for classification sequences into 2 categories:
class SDNet(nn.Module):
def __init__(self, input_size=2, hidden_layer_size=10, output_size=1):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.sigmoid = nn.Sigmoid()
self.hidden_cell = None
def forward(self, input_seq):
lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq) ,1, -1), self.hidden_cell)
out = self.linear(lstm_out.view(len(input_seq), -1))[-1]
out = self.sigmoid(out)
return out
def init_hidden(self):
self.hidden_cell = (torch.zeros(1,1,self.hidden_layer_size),
torch.zeros(1,1,self.hidden_layer_size))
def evaluate(self, seq):
#rnn.eval()
self.init_hidden()
seq = torch.tensor(seq, dtype = torch.float)
with torch.no_grad():
y_pred = self(seq)
return y_pred.item()
Can I take an output like for example 1 or 0 (or near it cause an activation is sigmoid) and generate an input for the model which will give such a prediction???