8000 Parametric sensitivities with linear least-squares cost by FreyJo · Pull Request #1362 · acados/acados · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Parametric sensitivities with linear least-squares cost #1362

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Nov 18, 2024
86 changes: 31 additions & 55 deletions acados/ocp_nlp/ocp_nlp_cost_ls.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,80 +82,39 @@ void *ocp_nlp_cost_ls_dims_assign(void *config_, void *raw_memory)



static void ocp_nlp_cost_ls_set_nx(void *config_, void *dims_, int *nx)
{
/// Initialize the dimensions struct of the
/// ocp_nlp_cost_ls module
///
/// \param[in] config_ structure containing configuration of ocp_nlp_cost module
/// \param[in] nx number of states
/// \param[out] dims_
/// \return []

ocp_nlp_cost_ls_dims *dims = (ocp_nlp_cost_ls_dims *) dims_;
dims->nx = *nx;
}


static void ocp_nlp_cost_ls_set_nz(void *config_, void *dims_, int *nz)
{
ocp_nlp_cost_ls_dims *dims = (ocp_nlp_cost_ls_dims *) dims_;
dims->nz = *nz;
}

static void ocp_nlp_cost_ls_set_nu(void *config_, void *dims_, int *nu)
{
ocp_nlp_cost_ls_dims *dims = (ocp_nlp_cost_ls_dims *) dims_;
dims->nu = *nu;
}



static void ocp_nlp_cost_ls_set_ny(void *config_, void *dims_, int *ny)
{
ocp_nlp_cost_ls_dims *dims = (ocp_nlp_cost_ls_dims *) dims_;
dims->ny = *ny;
}



static void ocp_nlp_cost_ls_set_ns(void *config_, void *dims_, int *ns)
void ocp_nlp_cost_ls_dims_set(void *config_, void *dims_, const char *field, int* value)
{
ocp_nlp_cost_ls_dims *dims = (ocp_nlp_cost_ls_dims *) dims_;
dims->ns = *ns;
}



void ocp_nlp_cost_ls_dims_set(void *config_, void *dims_, const char *field, int* value)
{
if (!strcmp(field, "nx"))
{
ocp_nlp_cost_ls_set_nx(config_, dims_, value);
dims->nx = *value;
}
else if (!strcmp(field, "nz"))
{
ocp_nlp_cost_ls_set_nz(config_, dims_, value);
dims->nz = *value;
}
else if (!strcmp(field, "nu"))
{
ocp_nlp_cost_ls_set_nu(config_, dims_, value);
dims->nu = *value;
}
else if (!strcmp(field, "ny"))
{
ocp_nlp_cost_ls_set_ny(config_, dims_, value);
dims->ny = *value;
}
else if (!strcmp(field, "ns"))
{
ocp_nlp_cost_ls_set_ns(config_, dims_, value);
dims->ns = *value;
}
else if (!strcmp(field, "np"))
{
// np dimension not needed
}
else if (!strcmp(field, "np_global"))
{
// np_global dimension not needed
dims->np_global = *value;
}
else
{
Expand Down Expand Up @@ -635,6 +594,15 @@ void ocp_nlp_cost_ls_memory_set_dzdux_tran_ptr(struct blasfeo_dmat *dzdux_tran,
}


void ocp_nlp_cost_ls_memory_set_jac_lag_stat_p_global_ptr(struct blasfeo_dmat *jac_lag_stat_p_global, void *memory_)
{
// do nothing -- ls cost can not depend on p_global, as it is not parametric

// ocp_nlp_cost_ls_memory *memory = memory_;
// memory->jac_lag_stat_p_global = jac_lag_stat_p_global;
}



////////////////////////////////////////////////////////////////////////////////
// workspace //
Expand Down Expand Up @@ -1014,16 +982,24 @@ void ocp_nlp_cost_ls_compute_fun(void *config_, void *dims_, void *model_, void
}


void ocp_nlp_cost_ls_compute_jac_p(void *config_, void *dims, void *model_, void *opts_, void *memory_, void *work_)
void ocp_nlp_cost_ls_compute_jac_p(void *config_, void *dims_, void *model_,
void *opts_, void *memory_, void *work_)
{
printf("ocp_nlp_cost_ls_compute_jac_p: not implemented.\n");
exit(1);
// printf("in ocp_nlp_cost_ls_compute_jac_p\n");
// adds contribution to stationarity jacobian:
// jac_lag_stat_p_global += scaling * cost_grad_params_jac

// do nothing -- ls cost can not depend on p_global, as it is not parametric

return;
}

void ocp_nlp_cost_ls_eval_grad_p(void *config_, void *dims, void *model_, void *opts_, void *memory_, void *work_, struct blasfeo_dvec *out)

void ocp_nlp_cost_ls_eval_grad_p(void *config_, void *dims_, void *model_, void *opts_, void *memory_, void *work_, struct blasfeo_dvec *out)
{
printf("ocp_nlp_cost_ls_eval_grad_p: not implemented.\n");
exit(1);
ocp_nlp_cost_ls_dims *dims = dims_;

blasfeo_dvecse(dims->np_global, 0.0, out, 0);
}


Expand All @@ -1042,7 +1018,6 @@ void ocp_nlp_cost_ls_set_external_fun_workspaces(void *config_, void *dims_, voi
}



void ocp_nlp_cost_ls_config_initialize_default(void *config_, int stage)
{
ocp_nlp_cost_config *config = config_;
Expand All @@ -1068,6 +1043,7 @@ void ocp_nlp_cost_ls_config_initialize_default(void *config_, int stage)
config->memory_set_dzdux_tran_ptr = &ocp_nlp_cost_ls_memory_set_dzdux_tran_ptr;
config->memory_set_RSQrq_ptr = &ocp_nlp_cost_ls_memory_set_RSQrq_ptr;
config->memory_set_Z_ptr = &ocp_nlp_cost_ls_memory_set_Z_ptr;
config->memory_set_jac_lag_stat_p_global_ptr = &ocp_nlp_cost_ls_memory_set_jac_lag_stat_p_global_ptr;
config->workspace_calculate_size = &ocp_nlp_cost_ls_workspace_calculate_size;
config->get_external_fun_workspace_requirement = &ocp_nlp_cost_ls_get_external_fun_workspace_requirement;
config->set_external_fun_workspaces = &ocp_nlp_cost_ls_set_external_fun_workspaces;
Expand Down
1 change: 1 addition & 0 deletions acados/ocp_nlp/ocp_nlp_cost_ls.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ typedef struct
int nu; // number of inputs
int ny; // number of outputs
int ns; // number of slacks
int np_global;
} ocp_nlp_cost_ls_dims;


Expand Down
6 changes: 3 additions & 3 deletions interfaces/acados_template/acados_template/acados_ocp.py
Original file line number Diff line number Diff line change
Expand Up @@ -847,8 +847,8 @@ def make_consistent(self, is_mocp_phase=False) -> None:
if opts.with_solution_sens_wrt_params:
if dims.np_global == 0:
raise Exception('with_solution_sens_wrt_params is only c 8000 ompatible if global parameters `p_global` are provided. Sensitivities wrt parameters have been refactored to use p_global instead of p in https://github.com/acados/acados/pull/1316. Got emty p_global.')
if cost.cost_type != "EXTERNAL" or cost.cost_type_0 != "EXTERNAL" or cost.cost_type_e != "EXTERNAL":
raise Exception('with_solution_sens_wrt_params is only compatible with EXTERNAL cost_type.')
if any([cost_type not in ["EXTERNAL", "LINEAR_LS"] for cost_type in [cost.cost_type, cost.cost_type_0, cost.cost_type_e]]):
raise Exception(f'with_solution_sens_wrt_params is only compatible with EXTERNAL and LINEAR_LS cost_type, got cost_types {cost.cost_type_0, cost.cost_type, cost.cost_type_e}.')
if opts.integrator_type != "DISCRETE":
raise Exception('with_solution_sens_wrt_params is only compatible with DISCRETE dynamics.')
for horizon_type, constraint in bgp_type_constraint_pairs:
Expand All @@ -861,7 +861,7 @@ def make_consistent(self, is_mocp_phase=False) -> None:
if opts.with_value_sens_wrt_params:
if dims.np_global == 0:
raise Exception('with_value_sens_wrt_params is only compatible if global parameters `p_global` are provided. Sensitivities wrt parameters have been refactored to use p_global instead of p in https://github.com/acados/acados/pull/1316. Got emty p_global.')
if cost.cost_type != "EXTERNAL" or cost.cost_type_0 != "EXTERNAL" or cost.cost_type_e != "EXTERNAL":
if any([cost_type not in ["EXTERNAL", "LINEAR_LS"] for cost_type in [cost.cost_type, cost.cost_type_0, cost.cost_type_e]]):
raise Exception('with_value_sens_wrt_params is only compatible with EXTERNAL cost_type.')
if opts.integrator_type != "DISCRETE":
raise Exception('with_value_sens_wrt_params is only compatible with DISCRETE dynamics.')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def eval_adjoint_solution_sensitivity(self,
The stage is the stage at which the seed_vec is applied, and seed_vec is the seed for the controls at that stage with shape (N_batch, nu, n_seeds)
:param with_respect_to : s 6D40 tring in ["p_global"]
:param sanity_checks : bool - whether to perform sanity checks, turn off for minimal overhead, default: True
:returns : np.ndarray of shape (N_batch, n_seeds, np_global)
"""

if seed_x is None:
Expand Down Expand Up @@ -160,7 +161,7 @@ def eval_adjoint_solution_sensitivity(self,
N_horizon = self.__ocp_solvers[0].acados_ocp.solver_options.N_horizon

for n in range(self.N_batch):
self.__ocp_solvers[n].sanity_check_parametric_sensitivities()
self.__ocp_solvers[n]._sanity_check_solution_sensitivities()

nx = self.__acados_lib.ocp_nlp_dims_get_from_attr(
self.__ocp_solvers[0].nlp_config, self.__ocp_solvers[0].nlp_dims, self.__ocp_solvers[0].nlp_out, 0, "x".encode('utf-8'))
Expand Down Expand Up @@ -190,8 +191,7 @@ def eval_adjoint_solution_sensitivity(self,

t1 = time.time()

grad = np.zeros((self.N_batch, n_seeds, np_global))
grad_p = np.ascontiguousarray(grad, dtype=np.float64)
grad_p = np.zeros((self.N_batch, n_seeds, np_global), order="C", dtype=np.float64)
offset = n_seeds*np_global

for i_seed in range(n_seeds):
Expand Down
47 changes: 25 additions & 22 deletions interfaces/acados_template/acados_template/acados_ocp_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,9 +574,8 @@ def eval_and_get_optimal_value_gradient(self, with_respect_to: str = "initial_st

field = "p_global".encode('utf-8')
t0 = time.time()
grad = np.zeros((np_global,))
grad_p = np.ascontiguousarray(grad, dtype=np.float64)
c_grad_p = cast(grad_p.ctypes.data, POINTER(c_double))
grad = np.zeros((np_global,), dtype=np.float64, order='C')
c_grad_p = cast(grad.ctypes.data, POINTER(c_double))
self.__acados_lib.ocp_nlp_eval_lagrange_grad_p(self.nlp_solver, self.nlp_in, field, c_grad_p)
self.time_value_grad = time.time() - t0

Expand All @@ -590,7 +589,7 @@ def get_optimal_value_gradient(self, with_respect_to: str = "initial_state") ->
return self.eval_and_get_optimal_value_gradient(with_respect_to)


def sanity_check_parametric_sensitivities(self):
def _sanity_check_solution_sensitivities(self, parametric=True) -> None:
if not (self.acados_ocp.solver_options.qp_solver == 'FULL_CONDENSING_HPIPM' or
self.acados_ocp.solver_options.qp_solver == 'PARTIAL_CONDENSING_HPIPM'):
raise Exception("Parametric sensitivities are only available with HPIPM as QP solver.")
Expand All @@ -609,6 +608,9 @@ def sanity_check_parametric_sensitivities(self):
):
raise Exception("Parametric sensitivities are only correct if an exact Hessian is used!")

if parametric and not self.acados_ocp.solver_options.with_solution_sens_wrt_params:
raise Exception("Parametric sensitivities are only available if with_solution_sens_wrt_params is set to True.")


def eval_solution_sensitivity(self, stages: Union[int, List[int]], with_respect_to: str) \
-> Tuple[Union[List[np.ndarray], np.ndarray], Union[List[np.ndarray], np.ndarray]]:
Expand Down Expand Up @@ -642,8 +644,6 @@ def eval_solution_sensitivity(self, stages: Union[int, List[int]], with_respect_
print("Deprecation warning: 'params_global' is deprecated and has been renamed to 'p_global'.")
with_respect_to = "p_global"

self.sanity_check_parametric_sensitivities()

stages_is_list = isinstance(stages, list)
stages_ = stages if stages_is_list else [stages]

Expand All @@ -660,11 +660,15 @@ def eval_solution_sensitivity(self, stages: Union[int, List[int]], with_respect_
nx = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, 0, "x".encode('utf-8'))
ngrad = nx
field = "ex"
self._sanity_check_solution_sensitivities(parametric=False)


elif with_respect_to == "p_global":
np_global = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, 0, "p_global".encode('utf-8'))
ngrad = np_global
field = "p_global"
self._sanity_check_solution_sensitivities()


# compute jacobians wrt params in all modules
t0 = time.time()
Expand Down Expand Up @@ -753,7 +757,7 @@ def eval_adjoint_solution_sensitivity(self,

if sanity_checks:
N_horizon = self.acados_ocp.solver_options.N_horizon
self.sanity_check_parametric_sensitivities()
self._sanity_check_solution_sensitivities()
nx = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, 0, "x".encode('utf-8'))
nu = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, 0, "u".encode('utf-8'))

Expand All @@ -772,8 +776,7 @@ def eval_adjoint_solution_sensitivity(self,

nparam = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, 0, field)

grad = np.zeros((n_seeds, nparam))
grad_p = np.ascontiguousarray(grad, dtype=np.float64)
grad_p = np.zeros((n_seeds, nparam), order='C', dtype=np.float64)

# compute jacobian wrt params
t0 = time.time()
Expand Down Expand Up @@ -820,7 +823,7 @@ def eval_param_sens(self, index: int, stage: int=0, field="ex"):

print("WARNING: eval_param_sens() is deprecated. Please use eval_solution_sensitivity() instead!")

self.sanity_check_parametric_sensitivities()
self._sanity_check_solution_sensitivities(False)

field = field.encode('utf-8')

Expand Down Expand Up @@ -889,7 +892,7 @@ def get(self, stage_: int, field_: str):

dims = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, stage_, field)

out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out = np.zeros((dims,), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))

if field_ in in_fields:
Expand All @@ -914,7 +917,7 @@ def get_flat(self, field_: str) -> np.ndarray:

dims = self.__acados_lib.ocp_nlp_dims_get_total_from_attr(self.nlp_config, self.nlp_dims, field)

out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out = np.zeros((dims,), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))

self.__acados_lib.ocp_nlp_get_all(self.nlp_solver, self.nlp_in, self.nlp_out, field, out_data)
Expand Down Expand Up @@ -1387,14 +1390,14 @@ def get_stats(self, field_: str) -> Union[int, float, np.ndarray]:
stat_m = self.get_stats("stat_m")
10000 stat_n = self.get_stats("stat_n")
min_size = min([stat_m, nlp_iter+1])
out = np.ascontiguousarray(np.zeros((stat_n+1, min_size)), dtype=np.float64)
out = np.zeros((stat_n+1, min_size), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))
self.__acados_lib.ocp_nlp_get(self.nlp_solver, field, out_data)
return out

elif field_ == 'primal_step_norm':
nlp_iter = self.get_stats("nlp_iter")
out = np.ascontiguousarray(np.zeros((nlp_iter,)), dtype=np.float64)
out = np.zeros((nlp_iter,), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))
self.__acados_lib.ocp_nlp_get(self.nlp_solver, field, out_data)
return out
Expand Down Expand Up @@ -1484,7 +1487,7 @@ def get_cost(self) -> float:
self.__acados_lib.ocp_nlp_eval_cost(self.nlp_solver, self.nlp_in, self.nlp_out)

# create output array
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64)
out = np.zeros((1,), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))

# call getter
Expand All @@ -1509,7 +1512,7 @@ def get_residuals(self, recompute=False):
self.__acados_lib.ocp_nlp_eval_residuals(self.nlp_solver, self.nlp_in, self.nlp_out)

# create output array
out = np.ascontiguousarray(np.zeros((4, 1)), dtype=np.float64)
out = np.zeros((4, 1), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))

# call getters
Expand Down Expand Up @@ -1656,7 +1659,7 @@ def cost_set(self, stage_: int, field_: str, value_, api='warn'):
field = field_.encode('utf-8')
stage = c_int(stage_)

dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims = np.zeros((2,), dtype=np.intc, order="C")
dims_data = cast(dims.ctypes.data, POINTER(c_int))

self.__acados_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \
Expand Down Expand Up @@ -1723,7 +1726,7 @@ def constraints_set(self, stage_, field_, value_, api='warn'):
field = field_.encode('utf-8')
stage = c_int(stage_)

dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims = np.zeros((2,), dtype=np.intc, order="C")
dims_data = cast(dims.ctypes.data, POINTER(c_int))

self.__acados_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \
Expand Down Expand Up @@ -1815,17 +1818,17 @@ def get_from_qp_in(self, stage_: int, field_: str):
stage = c_int(stage_)

# get dims
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims = np.zeros((2,), dtype=np.intc, order="C")
dims_data = cast(dims.ctypes.data, POINTER(c_int))

self.__acados_lib.ocp_nlp_qp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)

# create output data
if field_ in self.__qp_constraint_int_fields:
out = np.ascontiguousarray(np.zeros((np.prod(dims),)), dtype=np.int32)
out =np.zeros((np.prod(dims),), dtype=np.int32, order="C")
else:
out = np.ascontiguousarray(np.zeros((np.prod(dims),)), dtype=np.float64)
out = np.zeros((np.prod(dims),), dtype=np.float64, order="C")
out = out.reshape(dims[0], dims[1], order='F')

out_data = cast(out.ctypes.data, POINTER(c_double))
Expand All @@ -1848,7 +1851,7 @@ def __ocp_nlp_get_from_iterate(self, iteration_, stage_, field_):
dim = self.__acados_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out,
stage, field)

out = np.ascontiguousarray(np.zeros((dim,)), dtype=np.float64)
out = np.zeros((dim,), dtype=np.float64, order="C")
out_data = cast(out.ctypes.data, POINTER(c_double))
out_data_p = cast((out_data), c_void_p)
self.__acados_lib.ocp_nlp_get_from_iterate(self.nlp_solver, iteration, stage, field, out_data_p)
Expand Down
Loading
0