I solve the problem of optimization with constraints. The main problem is that I have two kinds of constraints: the constraints on the solution x and on coeff_matrix.mv(x). It’s a problem-solving code without taking into account constraints. Please, tell me how to modify it to take into account constraints. Here are my constraints:
lower_bounds <= x <= upper_bounds,
lower_constr_bounds < coeff_matrix.mv(x) <= upper_constr_bounds
Here is my code:
import numpy as np
import torch
from torch.optim import LBFGS
from torch.nn import Parameter
import torch.optim as optim
A = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float64)
def func_cond(x):
x = x.reshape(A.shape[1], A.shape[0])
return torch.linalg.cond(x * A)
n = A.numel()
bounds =
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
bounds.append([…, …])
lower_bounds = torch.tensor([…, …, …, …, …, …, …, …, …], dtype=torch.float64)
upper_bounds = torch.tensor([…, …, …, …, …, …, …, …, …], dtype=torch.float64)
x_start = np.zeros(n)
for i in range(n):
x_start[i] = np.random.uniform(low=bounds[i][0], high=bounds[i][1], size=1)
x = Parameter(torch.tensor(x_start), requires_grad=True)
coeff_matrix = torch.tensor([…, …, …, …, …, …, …, …, …], […, …, …, …, …, …, …, …, …]
[…, …, …, …, …, …, …, …, …], […, …, …, …, …, …, …, …, …],
[…, …, …, …, …, …, …, …, …], […, …, …, …, …, …, …, …, …],
[…, …, …, …, …, …, …, …, …], […, …, …, …, …, …, …, …, …],
[…, …, …, …, …, …, …, …, …], […, …, …, …, …, …, …, …, …], dtype=torch.float64)
lower_constr_bounds = torch.tensor([-np.inf] * 10, dtype=torch.float64)
upper_constr_bounds = torch.tensor([…, …, …, …, …, …, …, …, …, …], dtype=torch.float64)
def closure():
optimizer.zero_grad()
loss = func_cond(x)
loss.backward()
return loss
optimizer = optim.LBFGS()
for _ in range(100):
optimizer.step(closure)
min_cond = func_cond(x)
print(x)
print(min_cond)