Model optimization whit adam not working

Hi everyone,
I need to optimized the parameter of an electric circuit minimizing the square mean errore between the inlet know pressure and the measured inlet pressure. I would like to use the Adam optimizer, however i always get the same errore and i don’t know how to fix this.
Below there is the code thai I wrote and the error that i get:
import torch
import numpy as np

class AortaModel(torch.nn.Module):
def init(self, fixed_values, dt):
super(AortaModel, self).init()
self.Rp_sup = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32))
self.Rd_sup = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32)) # R è ottimizzabile
self.Rp_out = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32)) # R è ottimizzabile
self.Rd_out = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32)) # R è ottimizzabile
self.Csup = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32)) # C è ottimizzabile
self.Cout = torch.nn.Parameter(torch.tensor(1.0, dtype=torch.float32)) # C è ottimizzabile

    # Valori fissi
    self.a = fixed_values['a']
    self.RAA = fixed_values['RAA']
    self.RDA = fixed_values['RDA']
    self.b = fixed_values['b']
    self.c = fixed_values['c']
    self.dt = dt

def forward(self, Qin_next, Pin_prev, Qin_prev, Qda_prev):
    Qsup_prev = Qin_prev - Qda_prev
    Parco_prev = Pin_prev - self.a * (Qin_prev ** 2) - (self.b + self.RAA) * Qin_prev - self.c
    Pout_prev = Parco_prev - self.a * (Qda_prev ** 2) - (self.b + self.RDA) * Qda_prev - self.c

    beq = self.b + self.RDA + self.Rp_out + self.Rp_sup
    ceq = - ((self.dt / self.Csup) * (Qsup_prev - ((Parco_prev - self.Rp_sup * Qsup_prev) / self.Rd_sup))) + (
            (self.dt / self.Cout) * (
                Qda_prev - (Pout_prev - self.Rp_out * Qda_prev) / self.Rd_out)) + self.Rp_sup * (
                  Qsup_prev - Qin_next) - self.a * (Qin_prev ** 2) - (self.b + self.RDA + self.Rp_out) * Qda_prev

    with torch.no_grad():  # To ensure no gradient computation on these parts
        sqrt_term = torch.sqrt(beq ** 2 - 4 * self.a * ceq)

    Qda_next = (-beq + torch.sqrt(beq ** 2 - 4 * self.a * ceq)) / (2 * self.a)
    Qsup_next = Qin_next - Qda_next

    Pin_next = self.a * (Qin_next ** 2) + (self.b + self.RAA) * Qin_next + self.c + \
               (self.dt / self.Csup) * (Qsup_prev - ((Parco_prev - (self.Rp_sup * Qsup_prev)) / self.Rd_sup)) + \
               Parco_prev + self.Rd_sup * (Qsup_next - Qsup_prev)

    return Qda_next, Pin_next

Definizione dei dati

num_points = 322
t = np.linspace(0, 0.8, num_points)
Qin = np.sin(t) # Flusso di ingresso
Qda = np.zeros(num_points) # Esempio di Qda
Pin_real = np.sin(t) # Esempio di valori reali per la pressione

Converti i dati in tensori PyTorch

Qin_tensor = torch.tensor(Qin, dtype=torch.float32)
Qda_tensor = torch.tensor(Qda, dtype=torch.float32)
Pin_real_tensor = torch.tensor(Pin_real, dtype=torch.float32)
Qda_var = torch.zeros_like(Qda_tensor)

Parametri fissi

fixed_values = {
‘a’: 1.0,
‘RAA’: 2.0,
‘RDA’: 3.0,
‘b’: 4.0,
‘c’: 5.0
}
dt = t[1] - t[0]

Istanzia il modello

model = AortaModel(fixed_values, dt)

Definisci la funzione di perdita (MSE)

#def loss_fn(Pin_pred, Pin_real):
#return torch.mean((Pin_pred - Pin_real) ** 2)

criterion = torch.nn.MSELoss(reduction=‘sum’)

Definisci l’ottimizzatore (Adam)

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

Ciclo di addestramento

num_epochs = 1000

Inizializza la pressione dell’iterazione precedente

Pin_prev = torch.zeros_like(Pin_real_tensor)
#Pin_var = torch.zeros_like(Pin_real_tensor, requires_grad=True) # Inizia con valori iniziali di pressione, es. zero
torch.autograd.set_detect_anomaly(True)

loss_Adam =
for epoch in range(num_epochs):
if not epoch ==0:
tensore_detached = Qda_new.detach()
tensore_detached2 = Pin_predicted.detach()
# Inizializza Pin_predicted come tensore vuoto
Pin_predicted = torch.zeros(len(Qin_tensor), dtype=torch.float32)
Qda_new = Qda_tensor.clone()

for i in range(len(Qin_tensor) - 1):
    # Calcola la pressione per l'istante i
    Qda_next, Pin_i = model(Qin_tensor[i + 1], Pin_predicted[i], Qin_tensor[i], Qda_new[i])

    # Aggiorna i valori per l'iterazione successiva senza usare in-place
    Qda_new[i + 1] = Qda_next
    Pin_predicted[i + 1] = Pin_i
print('fine for')

# Calcola la perdita
loss = criterion(Pin_predicted, Pin_real_tensor)
loss_Adam.append(loss.item())

# Zeroing gradients
optimizer.zero_grad()
# Backward pass
# Aggiorna i parametri
optimizer.step()


if epoch % 100 == 0:
    print(f'Epoch {epoch}, Loss: {loss.item()}')

optimized_Rp_sup = model.Rp_sup.item()
optimized_Rd_sup = model.Rd_sup.item()
optimized_Rp_out = model.Rp_out.item()
optimized_Rd_out = model.Rd_out.item()
optimized_Csup = model.Csup.item()
optimized_Cout = model.Cout.item()

the error is this: C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch\autograd\graph.py:768: UserWarning: Error detected in MulBackward0. Traceback of forward call that caused the error:
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 2199, in
main()
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 2181, in main
globals = debugger.run(setup[‘file’], None, None, is_module)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 1493, in run
return self._exec(is_module, entry_point_fn, module_name, file, globals, locals)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 1500, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev_pydev_imps_pydev_execfile.py”, line 18, in execfile
exec(compile(contents+“\n”, file, ‘exec’), glob, loc)
File “C:\Users\Ugo\Desktop\Ione\Progetto NIH\Codici ottimizzazione\Circuit_opt.py”, line 94, in
loss_Adam =
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 1160, in do_wait_suspend
self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 1174, in _do_wait_suspend
self.process_internal_commands()
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev\pydevd.py”, line 873, in process_internal_commands
int_cmd.do_it(self)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev_pydevd_bundle\pydevd_comm.py”, line 1850, in do_it
result, exception_occurred = pydevd_console_integration.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev_pydevd_bundle\pydevd_console_integration.py”, line 236, in console_exec
Exec(code, updated_globals, updated_globals)
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev_pydevd_bundle\pydevd_exec2.py”, line 3, in Exec
exec(exp, global_vars, local_vars)
File “”, line 15, in
File “C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch\nn\modules\module.py”, line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch\nn\modules\module.py”, line 1562, in _call_impl
return forward_call(*args, **kwargs)
File “C:\Users\Ugo\Desktop\Ione\Progetto NIH\Codici ottimizzazione\Circuit_opt.py”, line 35, in forward
Qsup_prev - Qin_next) - self.a * (Qin_prev ** 2) - (self.b + self.RDA + self.Rp_out) * Qda_prev
(Triggered internally at C:\actions-runner_work\pytorch\pytorch\builder\windows\pytorch\torch\csrc\autograd\python_anomaly_mode.cpp:116.)
return Variable.execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
Traceback (most recent call last):
File “C:\Program Files\JetBrains\PyCharm Community Edition 2023.2.2\plugins\python-ce\helpers\pydev_pydevd_bundle\pydevd_exec2.py”, line 3, in Exec
exec(exp, global_vars, local_vars)
File “”, line 30, in
File “C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch_tensor.py”, line 521, in backward
torch.autograd.backward(
File "C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch\autograd_init
.py", line 289, in backward
_engine_run_backward(
File “C:\Users\Ugo\anaconda3\envs\opt\Lib\site-packages\torch\autograd\graph.py”, line 768, in _engine_run_backward
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor ], which is output 0 of AsStridedBackward0, is at version 321; expected version 320 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
How can I solve this? Thanks

Ione

Hi Ione!

As you note in your comment, writing into Pin_predicted with
an index is an inplace modification and could well be the
cause of the error you are seeing.

The code you posted doesn’t show any call to .backward(),
so it’s not possible to track down where things are going wrong.

You can find some suggestions for locating and fixing
inplace-modification errors in this post:

Good luck!

K. Frank