I’m implemnting a model that requires custom dataset. Using torchdyn as dependency.
Dataset
class LotkaVolterraDataset(torch.utils.data.Dataset):
def __init__(self, X):
self.t, X = X
self.x, self.y = np.array([]), np.array([])
for val in X:
#print(type(val[0]), val[0].requires_grad)
val[0].requires_grad = True
val[1].requires_grad = True
self.x = np.append(self.x, val[0])
self.y = np.append(self.y, val[1])
def __len__(self):
return len(self.t)
def __getitem__(self, idx):
return self.t[idx], self.x[idx], self.y[idx]
res = odeint(derivative, X0, t_span, solver='tsit5')
dataset = LotkaVolterraDataset(res)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1000)
data_iter = iter(train_loader)
where derivative
is a function :
class LotkaVolterraDataset(torch.utils.data.Dataset):
def __init__(self, X):
self.t, X = X
self.x, self.y = np.array([]), np.array([])
for val in X:
#print(type(val[0]), val[0].requires_grad)
val[0].requires_grad = True
val[1].requires_grad = True
self.x = np.append(self.x, val[0])
self.y = np.append(self.y, val[1])
def __len__(self):
return len(self.t)
def __getitem__(self, idx):
return self.t[idx], self.x[idx], self.y[idx]
res = odeint(derivative, X0, t_span, solver='tsit5')
dataset = LotkaVolterraDataset(res)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1000)
data_iter = iter(train_loader)
My model looks like this :
class f(nn.Module):
def __init__(self, dim):
super(f, self).__init__()
self.model = nn.Sequential(
nn.Linear(dim,124),
nn.ReLU(),
nn.Linear(124,124),
nn.ReLU(),
nn.Linear(124,dim),
nn.Tanh()
)
def forward(self, t, x):
x = x.float()
return self.model(x)
from torchdyn.core import NeuralODE
class DifferentialEquation(nn.Module):
def __init__(self,
network:nn.Module = f(dim=1000),
t_span=torch.Tensor([0., 1.]),
solver='tsit5',
order=1,
atol=0.001,
rtol=0.001,
sensitivity='autograd',
atol_adjoint=0.0001,
rtol_adjoint=0.0001,
interpolator = None,
integral_loss=None,
seminorm=False,
return_t_eval=True,
optimizable_params={}) -> None:
super(DifferentialEquation, self).__init__()
#self.input_layer = nn.Linear(1000,124)
self.model = NeuralODE(vector_field=network,
solver=solver,
order=order,
atol=atol,
rtol=rtol,
sensitivity=sensitivity,
atol_adjoint=atol_adjoint,
rtol_adjoint=rtol_adjoint,
interpolator=interpolator,
integral_loss=integral_loss,
seminorm=seminorm,
return_t_eval=return_t_eval,
optimizable_params=optimizable_params)
#self.output_layer = nn.Linear(124,1000)
self.t_span = t_span
def forward(self, t):
z = self.model(t, self.t_span)
#print(z.shape)
return z
and finally the training code:
N_EPOCH = 5
optimizer = torch.optim.Adam(predator.parameters(), lr=0.005)
lossFn = nn.L1Loss()
torch.set_grad_enabled(True)
running_loss = 0.0
data_iter = iter(train_loader)
for epoch in tqdm.tqdm(range(N_EPOCH)):
x_train, y_train, t_train = data_iter.next()
print(type(x_train), type(y_train), type(t_train))
x_out, _ = predator(t_train)
y_out, _ = prey(t_train)
print(x_train.shape, x_out.shape)
print("X_Grads",x_train.requires_grad,x_out.requires_grad)
loss_predator = lossFn(x_out, x_train)
print(y_train.shape, y_out.shape)
print("Y_Grads",y_train.requires_grad,y_out.requires_grad)
loss_prey = lossFn(y_out, y_train)
optimizer.zero_grad()
loss_predator.backward()
loss_prey.backward()
optimizer.step()
running_loss += loss_predator
running_loss += loss_prey
** Output **
0%| | 0/5 [00:00<?, ?it/s]<class 'torch.Tensor'> <class 'torch.Tensor'> <class 'torch.Tensor'>
20%|██ | 1/5 [00:07<00:30, 7.51s/it]torch.Size([1000]) torch.Size([1000])
X_Grads True False
torch.Size([1000]) torch.Size([1000])
Y_Grads True False
**StopIteration** Traceback (most recent call last) **c:\Users\shiva\Desktop\deepchem\Demonstrating_neuralode_api.ipynb Cell 20** in <cell line: 13>**()** [12](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=11) predator.train() [13](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=12) for epoch in tqdm.tqdm(range(N_EPOCH)): [14](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=13) #try: **---> [15](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=14)** x_train, y_train, t_train = data_iter.next() [16](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=15) print(type(x_train), type(y_train), type(t_train)) [18](vscode-notebook-cell:/c%3A/Users/shiva/Desktop/deepchem/Demonstrating_neuralode_api.ipynb#X25sZmlsZQ%3D%3D?line=17) x_train.requires_grad = True File **c:\Users\shiva\.conda\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py:681**, in _BaseDataLoaderIter.__next__**(self)** 678 if self._sampler_iter is None: 679 # TODO(https://github.com/pytorch/pytorch/issues/76750) 680 self._reset() # type: ignore[call-arg] **--> 681** data = self._next_data() 682 self._num_yielded += 1 683 if self._dataset_kind == _DatasetKind.Iterable and \ 684 self._IterableDataset_len_called is not None and \ 685 self._num_yielded > self._IterableDataset_len_called: File **c:\Users\shiva\.conda\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py:720**, in _SingleProcessDataLoaderIter._next_data**(self)** 719 def _next_data(self): **--> 720** index = self._next_index() # may raise StopIteration 721 data = self._dataset_fetcher.fetch(index) # may raise StopIteration 722 if self._pin_memory:
...
File **c:\Users\shiva\.conda\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py:671**, in _BaseDataLoaderIter._next_index**(self)** 670 def _next_index(self): **--> 671** return next(self._sampler_iter) **StopIteration**:
My question :
- Why does not tensors in dataset have gradients, even though I’m explicitly setting it to true.
- How to eliminate the
StopIteration
exception, and why am I getting it in first place?