diff --git a/pyomo/contrib/doe/__init__.py b/pyomo/contrib/doe/__init__.py
index 0295d6eed79..68355a47ab6 100644
--- a/pyomo/contrib/doe/__init__.py
+++ b/pyomo/contrib/doe/__init__.py
@@ -10,6 +10,7 @@
# ___________________________________________________________________________
from .doe import DesignOfExperiments, ObjectiveLib, FiniteDifferenceStep
from .utils import rescale_FIM
+from .grey_box_utilities import FIMExternalGreyBox
# Deprecation errors for old Pyomo.DoE interface classes and structures
from pyomo.common.deprecation import deprecated
diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py
index ab68ae19814..889fdceaf1c 100644
--- a/pyomo/contrib/doe/doe.py
+++ b/pyomo/contrib/doe/doe.py
@@ -38,12 +38,19 @@
pandas as pd,
pathlib,
matplotlib as plt,
+ scipy_available,
)
-from pyomo.common.modeling import unique_component_name
+
+from pyomo.common.errors import DeveloperError
from pyomo.common.timing import TicTocTimer
from pyomo.contrib.sensitivity_toolbox.sens import get_dsdp
+if numpy_available and scipy_available:
+ from pyomo.contrib.doe.grey_box_utilities import FIMExternalGreyBox
+
+ from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock
+
import pyomo.environ as pyo
from pyomo.contrib.doe.utils import (
check_FIM,
@@ -58,6 +65,7 @@ class ObjectiveLib(Enum):
determinant = "determinant"
trace = "trace"
minimum_eigenvalue = "minimum_eigenvalue"
+ condition_number = "condition_number"
zero = "zero"
@@ -74,6 +82,7 @@ def __init__(
fd_formula="central",
step=1e-3,
objective_option="determinant",
+ use_grey_box_objective=False,
scale_constant_value=1.0,
scale_nominal_param_value=False,
prior_FIM=None,
@@ -81,7 +90,9 @@ def __init__(
fim_initial=None,
L_diagonal_lower_bound=1e-7,
solver=None,
+ grey_box_solver=None,
tee=False,
+ grey_box_tee=False,
get_labeled_model_args=None,
logger_level=logging.WARNING,
_Cholesky_option=True,
@@ -97,39 +108,51 @@ def __init__(
Parameters
----------
experiment:
- Experiment object that holds the model and labels all the components. The object
- should have a ``get_labeled_model`` where a model is returned with the following
- labeled sets: ``unknown_parameters``, ``experimental_inputs``, ``experimental_outputs``
+ Experiment object that holds the model and labels all the components. The
+ object should have a ``get_labeled_model`` where a model is returned with
+ the following labeled sets: ``unknown_parameters``,
+ ``experimental_inputs``,
+ ``experimental_outputs``
fd_formula:
- Finite difference formula for computing the sensitivity matrix. Must be one of
- [``central``, ``forward``, ``backward``], default: ``central``
+ Finite difference formula for computing the sensitivity matrix. Must be
+ one of [``central``, ``forward``, ``backward``], default: ``central``
step:
Relative step size for the finite difference formula.
default: 1e-3
objective_option:
- String representation of the objective option. Current available options are:
- ``determinant`` (for determinant, or D-optimality) and ``trace`` (for trace or
- A-optimality)
+ String representation of the objective option. Current available options
+ are: ``determinant`` (for determinant, or D-optimality),
+ ``trace`` (for trace, or A-optimality), ``minimum_eigenvalue``, (for
+ E-optimality), or ``condition_number`` (for ME-optimality)
+ Note: E-optimality and ME-optimality are only supported when using the
+ grey box objective (i.e., ``grey_box_solver`` is True)
+ default: ``determinant``
+ use_grey_box_objective:
+ Boolean of whether or not to use the grey-box version of the objective
+ function. True to use grey box, False to use standard.
+ Default: False (do not use grey box)
scale_constant_value:
- Constant scaling for the sensitivity matrix. Every element will be multiplied by this
- scaling factor.
+ Constant scaling for the sensitivity matrix. Every element will be
+ multiplied by this scaling factor.
default: 1
scale_nominal_param_value:
- Boolean for whether or not to scale the sensitivity matrix by the nominal parameter
- values. Every column of the sensitivity matrix will be divided by the respective
- nominal parameter value.
+ Boolean for whether or not to scale the sensitivity matrix by the
+ nominal parameter values. Every column of the sensitivity matrix
+ will be divided by the respective nominal parameter value.
default: False
prior_FIM:
- 2D numpy array representing information from prior experiments. If no value is given,
- the assumed prior will be a matrix of zeros. This matrix will be assumed to be scaled
- as the user has specified (i.e., if scale_nominal_param_value is true, we will assume
- the FIM provided here has been scaled by the parameter values)
+ 2D numpy array representing information from prior experiments. If
+ no value is given, the assumed prior will be a matrix of zeros. This
+ matrix will be assumed to be scaled as the user has specified (i.e.,
+ if scale_nominal_param_value is true, we will assume the FIM provided
+ here has been scaled by the parameter values)
jac_initial:
2D numpy array as the initial values for the sensitivity matrix.
fim_initial:
2D numpy array as the initial values for the FIM.
L_diagonal_lower_bound:
- Lower bound for the values of the lower triangular Cholesky factorization matrix.
+ Lower bound for the values of the lower triangular Cholesky factorization
+ matrix.
default: 1e-7
solver:
A ``solver`` object specified by the user, default=None.
@@ -137,16 +160,17 @@ def __init__(
tee:
Solver option to be passed for verbose output.
get_labeled_model_args:
- Additional arguments for the ``get_labeled_model`` function on the Experiment object.
+ Additional arguments for the ``get_labeled_model`` function on the
+ Experiment object.
_Cholesky_option:
- Boolean value of whether or not to use the cholesky factorization to compute the
- determinant for the D-optimality criteria. This parameter should not be changed
- unless the user intends to make performance worse (i.e., compare an existing tool
- that uses the full FIM to this algorithm)
+ Boolean value of whether or not to use the cholesky factorization to
+ compute the determinant for the D-optimality criteria. This parameter
+ should not be changed unless the user intends to make performance worse
+ (i.e., compare an existing tool that uses the full FIM to this algorithm)
_only_compute_fim_lower:
- If True, only the lower triangle of the FIM is computed. This parameter should not
- be changed unless the user intends to make performance worse (i.e., compare an
- existing tool that uses the full FIM to this algorithm)
+ If True, only the lower triangle of the FIM is computed. This parameter
+ should not be changed unless the user intends to make performance worse
+ (i.e., compare an existing tool that uses the full FIM to this algorithm)
logger_level:
Specify the level of the logger. Change to logging.DEBUG for all messages.
"""
@@ -168,6 +192,7 @@ def __init__(
# Set the objective type and scaling options:
self.objective_option = ObjectiveLib(objective_option)
+ self.use_grey_box = use_grey_box_objective
self.scale_constant_value = scale_constant_value
self.scale_nominal_param_value = scale_nominal_param_value
@@ -194,6 +219,17 @@ def __init__(
self.solver = solver
self.tee = tee
+ self.grey_box_tee = grey_box_tee
+
+ if grey_box_solver:
+ self.grey_box_solver = grey_box_solver
+ else:
+ grey_box_solver = pyo.SolverFactory("cyipopt")
+ grey_box_solver.config.options["linear_solver"] = "ma57"
+ grey_box_solver.config.options['tol'] = 1e-4
+ grey_box_solver.config.options['mu_strategy'] = "monotone"
+
+ self.grey_box_solver = grey_box_solver
# Set get_labeled_model_args as an empty dict if no arguments are passed
if get_labeled_model_args is None:
@@ -250,17 +286,21 @@ def run_doe(self, model=None, results_file=None):
else:
# TODO: Add safe naming when a model is passed by the user.
# doe_block = pyo.Block()
- # doe_block_name = unique_component_name(model, "design_of_experiments_block")
+ # doe_block_name = unique_component_name(model,
+ # "design_of_experiments_block")
# model.add_component(doe_block_name, doe_block)
pass
- # ToDo: potentially work with this for more complicated models
+ # TODO: potentially work with this for more complicated models
# Create the full DoE model (build scenarios for F.D. scheme)
if not self._built_scenarios:
self.create_doe_model(model=model)
# Add the objective function to the model
- self.create_objective_function(model=model)
+ if self.use_grey_box:
+ self.create_grey_box_objective_function(model=model)
+ else:
+ self.create_objective_function(model=model)
# Track time required to build the DoE model
build_time = sp_timer.toc(msg=None)
@@ -268,9 +308,12 @@ def run_doe(self, model=None, results_file=None):
"Successfully built the DoE model.\nBuild time: %0.1f seconds" % build_time
)
- # Solve the square problem first to initialize the fim and
- # sensitivity constraints
- # Deactivate objective expression and objective constraints (on a block), and fix design variables
+ # Solve the square problem first to
+ # initialize the fim and
+ # sensitivity constraints. First, we
+ # Deactivate objective expression and
+ # objective constraints (on a block),
+ # and fix the design variables.
model.objective.deactivate()
model.obj_cons.deactivate()
for comp in model.scenario_blocks[0].experiment_inputs:
@@ -282,15 +325,18 @@ def run_doe(self, model=None, results_file=None):
# if pyo.check_optimal_termination(res):
# model.load_solution(res)
# else:
- # # The solver was unsuccessful, might want to warn the user or terminate gracefully, etc.
+ # # The solver was unsuccessful, might want to warn the user
+ # # or terminate gracefully, etc.
model.dummy_obj = pyo.Objective(expr=0, sense=pyo.minimize)
self.solver.solve(model, tee=self.tee)
# Track time to initialize the DoE model
initialization_time = sp_timer.toc(msg=None)
self.logger.info(
- "Successfully initialized the DoE model.\nInitialization time: %0.1f seconds"
- % initialization_time
+ (
+ "Successfully initialized the DoE model."
+ "\nInitialization time: %0.1f seconds" % initialization_time
+ )
)
model.dummy_obj.deactivate()
@@ -301,6 +347,33 @@ def run_doe(self, model=None, results_file=None):
model.objective.activate()
model.obj_cons.activate()
+ if self.use_grey_box:
+ # Initialize grey box inputs to be fim values currently
+ for i in model.parameter_names:
+ for j in model.parameter_names:
+ if list(model.parameter_names).index(i) >= list(
+ model.parameter_names
+ ).index(j):
+ model.obj_cons.egb_fim_block.inputs[(j, i)].set_value(
+ pyo.value(model.fim[(i, j)])
+ )
+ # Set objective value
+ if self.objective_option == ObjectiveLib.trace:
+ # Do safe inverse here?
+ trace_val = 1 / np.trace(np.array(self.get_FIM()))
+ model.obj_cons.egb_fim_block.outputs["A-opt"].set_value(trace_val)
+ elif self.objective_option == ObjectiveLib.determinant:
+ det_val = np.linalg.det(np.array(self.get_FIM()))
+ model.obj_cons.egb_fim_block.outputs["log-D-opt"].set_value(
+ np.log(det_val)
+ )
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ eig, _ = np.linalg.eig(np.array(self.get_FIM()))
+ model.obj_cons.egb_fim_block.outputs["E-opt"].set_value(np.min(eig))
+ elif self.objective_option == ObjectiveLib.condition_number:
+ cond_number = np.linalg.cond(np.array(self.get_FIM()))
+ model.obj_cons.egb_fim_block.outputs["ME-opt"].set_value(cond_number)
+
# If the model has L, initialize it with the solved FIM
if hasattr(model, "L"):
# Get the FIM values
@@ -313,7 +386,8 @@ def run_doe(self, model=None, results_file=None):
(len(model.parameter_names), len(model.parameter_names))
)
- # Need to compute the full FIM before initializing the Cholesky factorization
+ # Need to compute the full FIM before
+ # initializing the Cholesky factorization
if self.only_compute_fim_lower:
fim_np = fim_np + fim_np.T - np.diag(np.diag(fim_np))
@@ -323,7 +397,8 @@ def run_doe(self, model=None, results_file=None):
min_eig = np.min(np.linalg.eigvals(fim_np))
if min_eig < _SMALL_TOLERANCE_DEFINITENESS:
- # Raise the minimum eigenvalue to at least _SMALL_TOLERANCE_DEFINITENESS
+ # Raise the minimum eigenvalue to at
+ # least _SMALL_TOLERANCE_DEFINITENESS
jitter = np.min(
[
-min_eig + _SMALL_TOLERANCE_DEFINITENESS,
@@ -346,20 +421,26 @@ def run_doe(self, model=None, results_file=None):
model.determinant.value = np.linalg.det(np.array(self.get_FIM()))
# Solve the full model, which has now been initialized with the square solve
- res = self.solver.solve(model, tee=self.tee)
+ if self.use_grey_box:
+ res = self.grey_box_solver.solve(model, tee=self.grey_box_tee)
+ else:
+ res = self.solver.solve(model, tee=self.tee)
# Track time used to solve the DoE model
solve_time = sp_timer.toc(msg=None)
self.logger.info(
- "Successfully optimized experiment.\nSolve time: %0.1f seconds" % solve_time
+ (
+ "Successfully optimized experiment."
+ "\nSolve time: %0.1f seconds" % solve_time
+ )
)
self.logger.info(
"Total time for build, initialization, and solve: %0.1f seconds"
% (build_time + initialization_time + solve_time)
)
- #
+ # Avoid accidental carry-over of FIM information
fim_local = self.get_FIM()
# Make sure stale results don't follow the DoE object instance
@@ -367,6 +448,11 @@ def run_doe(self, model=None, results_file=None):
self.results["Solver Status"] = res.solver.status
self.results["Termination Condition"] = res.solver.termination_condition
+ if type(res.solver.message) is str:
+ results_message = res.solver.message
+ elif type(res.solver.message) is bytes:
+ results_message = res.solver.message.decode("utf-8")
+ self.results["Termination Message"] = results_message
# Important quantities for optimal design
self.results["FIM"] = fim_local
@@ -412,8 +498,8 @@ def run_doe(self, model=None, results_file=None):
self.results["Finite Difference Step"] = self.step
self.results["Nominal Parameter Scaling"] = self.scale_nominal_param_value
- # ToDo: Add more useful fields to the results object?
- # ToDo: Add MetaData from the user to the results object? Or leave to the user?
+ # TODO: Add more useful fields to the results object?
+ # TODO: Add MetaData from the user to the results object? Or leave to the user?
# If the user specifies to save the file, do it here as a json
if results_file is not None:
@@ -453,7 +539,8 @@ def compute_FIM(self, model=None, method="sequential"):
else:
# TODO: Add safe naming when a model is passed by the user.
# doe_block = pyo.Block()
- # doe_block_name = unique_component_name(model, "design_of_experiments_block")
+ # doe_block_name = unique_component_name(model,
+ # "design_of_experiments_block")
# model.add_component(doe_block_name, doe_block)
# self.compute_FIM_model = model
pass
@@ -485,8 +572,9 @@ def compute_FIM(self, model=None, method="sequential"):
self._computed_FIM = self.kaug_FIM
else:
raise ValueError(
- "The method provided, {}, must be either `sequential` or `kaug`".format(
- method
+ (
+ "The method provided, {}, must be either `sequential` "
+ "or `kaug`".format(method)
)
)
@@ -513,7 +601,8 @@ def _sequential_FIM(self, model=None):
model.del_component(model.parameter_scenarios)
model.parameter_scenarios = pyo.Suffix(direction=pyo.Suffix.LOCAL)
- # Populate parameter scenarios, and scenario inds based on finite difference scheme
+ # Populate parameter scenarios, and scenario
+ # inds based on finite difference scheme
if self.fd_formula == FiniteDifferenceStep.central:
model.parameter_scenarios.update(
(2 * ind, k) for ind, k in enumerate(model.unknown_parameters.keys())
@@ -532,8 +621,9 @@ def _sequential_FIM(self, model=None):
)
model.scenarios = range(len(model.unknown_parameters) + 1)
else:
- raise AttributeError(
- "Finite difference option not recognized. Please contact the developers as you should not see this error."
+ raise DeveloperError(
+ "Finite difference option not recognized. Please "
+ "contact the developers as you should not see this error."
)
# Fix design variables
@@ -573,13 +663,17 @@ def _sequential_FIM(self, model=None):
res = self.solver.solve(model, tee=self.tee)
pyo.assert_optimal_termination(res)
except:
- # TODO: Make error message more verbose, i.e., add unknown parameter values so the
- # user can try to solve the model instance outside of the pyomo.DoE framework.
+ # TODO: Make error message more verbose,
+ # (i.e., add unknown parameter values so the user
+ # can try to solve the model instance outside of
+ # the pyomo.DoE framework)
raise RuntimeError(
- "Model from experiment did not solve appropriately. Make sure the model is well-posed."
+ "Model from experiment did not solve appropriately."
+ " Make sure the model is well-posed."
)
- # Reset value of parameter to default value before computing finite difference perturbation
+ # Reset value of parameter to default value
+ # before computing finite difference perturbation
param.set_value(model.unknown_parameters[param])
# Extract the measurement values for the scenario and append
@@ -600,7 +694,8 @@ def _sequential_FIM(self, model=None):
# Counting variable for loop
i = 0
- # Loop over parameter values and grab correct columns for finite difference calculation
+ # Loop over parameter values and grab correct
+ # columns for finite difference calculation
for k, v in model.unknown_parameters.items():
curr_step = v * self.step
@@ -616,7 +711,8 @@ def _sequential_FIM(self, model=None):
col_1 = 0
col_2 = i
- # If scale_nominal_param_value is active, scale by nominal parameter value (v)
+ # If scale_nominal_param_value is active, scale
+ # by nominal parameter value (v)
scale_factor = (1.0 / curr_step) * self.scale_constant_value
if self.scale_nominal_param_value:
scale_factor *= v
@@ -629,8 +725,10 @@ def _sequential_FIM(self, model=None):
# Increment the count
i += 1
- # ToDo: As more complex measurement error schemes are put in place, this needs to change
- # Add independent (non-correlated) measurement error for FIM calculation
+ # TODO: As more complex measurement error schemes
+ # are put in place, this needs to change
+ # Add independent (non-correlated) measurement
+ # error for FIM calculation
cov_y = np.zeros((len(model.measurement_error), len(model.measurement_error)))
count = 0
for k, v in model.measurement_error.items():
@@ -728,7 +826,7 @@ def _kaug_FIM(self, model=None):
cov_y[count, count] = 1 / v
count += 1
- # ToDo: need to add a covariance matrix for measurements (sigma inverse)
+ # TODO: need to add a covariance matrix for measurements (sigma inverse)
# i.e., cov_y = self.cov_y or model.cov_y
# Still deciding where this would be best.
@@ -756,19 +854,23 @@ def create_doe_model(self, model=None):
else:
# TODO: Add safe naming when a model is passed by the user.
# doe_block = pyo.Block()
- # doe_block_name = unique_component_name(model, "design_of_experiments_block")
+ # doe_block_name = unique_component_name(model,
+ # "design_of_experiments_block")
# model.add_component(doe_block_name, doe_block)
pass
- # Developer recommendation: use the Cholesky decomposition for D-optimality
- # The explicit formula is available for benchmarking purposes and is NOT recommended
+ # Developer recommendation: use the Cholesky
+ # decomposition for D-optimality. The explicit
+ # formula is available for benchmarking purposes
+ # and is NOT recommended.
if (
self.only_compute_fim_lower
and self.objective_option == ObjectiveLib.determinant
and not self.Cholesky_option
):
raise ValueError(
- "Cannot compute determinant with explicit formula if only_compute_fim_lower is True."
+ "Cannot compute determinant with explicit formula "
+ "if only_compute_fim_lower is True."
)
# Generate scenarios for finite difference formulae
@@ -807,7 +909,8 @@ def identity_matrix(m, i, j):
dict_jac_initialize = {}
for i, bu in enumerate(model.output_names):
for j, un in enumerate(model.parameter_names):
- # Jacobian is a numpy array, rows are experimental outputs, columns are unknown parameters
+ # Jacobian is a numpy array, rows are experimental
+ # outputs, columns are unknown parameters
dict_jac_initialize[(bu, un)] = self.jac_initial[i][j]
# Initialize the Jacobian matrix
@@ -817,8 +920,10 @@ def initialize_jac(m, i, j):
return dict_jac_initialize[(i, j)]
# Otherwise initialize to 0.1 (which is an arbitrary non-zero value)
else:
- raise AttributeError(
- "Jacobian being initialized when the jac_initial attribute is None. Please contact the developers as you should not see this error."
+ raise DeveloperError(
+ "Jacobian being initialized when the jac_initial attribute "
+ "is None. Please contact the developers as you should not "
+ "see this error."
)
model.sensitivity_jacobian = pyo.Var(
@@ -923,7 +1028,9 @@ def read_prior(m, i, j):
model.parameter_names, model.parameter_names, rule=read_prior
)
- # Off-diagonal elements are symmetric, so only half of the off-diagonal elements need to be specified.
+ # Off-diagonal elements are symmetric, so only
+ # half of the off-diagonal elements need to be
+ # specified.
def fim_rule(m, p, q):
"""
m: Pyomo model
@@ -1008,7 +1115,8 @@ def _generate_scenario_blocks(self, model=None):
if self.n_measurement_error != self.n_experiment_outputs:
raise ValueError(
- "Number of experiment outputs, {}, and length of measurement error, {}, do not match. Please check model labeling.".format(
+ "Number of experiment outputs, {}, and length of measurement error, "
+ "{}, do not match. Please check model labeling.".format(
self.n_experiment_outputs, self.n_measurement_error
)
)
@@ -1029,10 +1137,12 @@ def _generate_scenario_blocks(self, model=None):
else:
self.jac_initial = np.eye(self.n_experiment_outputs, self.n_parameters)
- # Make a new Suffix to hold which scenarios are associated with parameters
+ # Make a new Suffix to hold which scenarios
+ # are associated with parameters
model.parameter_scenarios = pyo.Suffix(direction=pyo.Suffix.LOCAL)
- # Populate parameter scenarios, and scenario inds based on finite difference scheme
+ # Populate parameter scenarios, and scenario
+ # inds based on finite difference scheme
if self.fd_formula == FiniteDifferenceStep.central:
model.parameter_scenarios.update(
(2 * ind, k)
@@ -1053,14 +1163,11 @@ def _generate_scenario_blocks(self, model=None):
)
model.scenarios = range(len(model.base_model.unknown_parameters) + 1)
else:
- raise AttributeError(
- "Finite difference option not recognized. Please contact the developers as you should not see this error."
+ raise DeveloperError(
+ "Finite difference option not recognized. Please contact "
+ "the developers as you should not see this error."
)
- # TODO: Allow Params for `unknown_parameters` and `experiment_inputs`
- # May need to make a new converter Param to Var that allows non-string names/references to be passed
- # Waiting on updates to the parmest params_to_vars utility function.....
-
# Run base model to get initialized model and check model function
for comp in model.base_model.experiment_inputs:
comp.fix()
@@ -1071,7 +1178,8 @@ def _generate_scenario_blocks(self, model=None):
self.logger.info("Model from experiment solved.")
except:
raise RuntimeError(
- "Model from experiment did not solve appropriately. Make sure the model is well-posed."
+ "Model from experiment did not solve appropriately. "
+ "Make sure the model is well-posed."
)
for comp in model.base_model.experiment_inputs:
@@ -1083,7 +1191,8 @@ def build_block_scenarios(b, s):
m = b.model()
b.transfer_attributes_from(m.base_model.clone())
- # Forward/Backward difference have a stationary case (s == 0), no parameter to perturb
+ # Forward/Backward difference have a stationary
+ # case (s == 0), no parameter to perturb
if self.fd_formula in [
FiniteDifferenceStep.forward,
FiniteDifferenceStep.backward,
@@ -1103,7 +1212,7 @@ def build_block_scenarios(b, s):
elif self.fd_formula == FiniteDifferenceStep.forward:
diff = self.step # Forward always positive
else:
- # To-Do: add an error message for this as not being implemented yet
+ # TODO: add an error message for this as not being implemented yet
diff = 0
pass
@@ -1124,8 +1233,8 @@ def build_block_scenarios(b, s):
model.scenario_blocks = pyo.Block(model.scenarios, rule=build_block_scenarios)
- # To-Do: this might have to change if experiment inputs have
- # a different value in the Suffix (currently it is the CUID)
+ # TODO: this might have to change if experiment inputs have
+ # a different value in the Suffix (currently it is the CUID)
design_vars = [k for k, v in model.scenario_blocks[0].experiment_inputs.items()]
# Add constraints to equate block design with global design:
@@ -1148,7 +1257,7 @@ def global_design_fixing(m, s):
# Clean up the base model used to generate the scenarios
model.del_component(model.base_model)
- # ToDo: consider this logic? Multi-block systems need something more fancy
+ # TODO: consider this logic? Multi-block systems need something more fancy
self._built_scenarios = True
# Create objective function
@@ -1174,13 +1283,15 @@ def create_objective_function(self, model=None):
ObjectiveLib.trace,
ObjectiveLib.zero,
]:
- raise AttributeError(
- "Objective option not recognized. Please contact the developers as you should not see this error."
+ raise DeveloperError(
+ "Objective option not recognized. Please contact the "
+ "developers as you should not see this error."
)
if not hasattr(model, "fim"):
raise RuntimeError(
- "Model provided does not have variable `fim`. Please make sure the model is built properly before creating the objective."
+ "Model provided does not have variable `fim`. Please make "
+ "sure the model is built properly before creating the objective."
)
small_number = 1e-10
@@ -1203,7 +1314,8 @@ def create_objective_function(self, model=None):
# Calculate the eigenvalues of the FIM matrix
eig = np.linalg.eigvals(fim)
- # If the smallest eigenvalue is (practically) negative, add a diagonal matrix to make it positive definite
+ # If the smallest eigenvalue is (practically) negative,
+ # add a diagonal matrix to make it positive definite
small_number = 1e-10
if min(eig) < small_number:
fim = fim + np.eye(len(model.parameter_names)) * (
@@ -1259,12 +1371,14 @@ def determinant_general(b):
for i in range(len(list_p)):
name_order = []
x_order = list_p[i]
- # sigma_i is the value in the i-th position after the reordering \sigma
+ # sigma_i is the value in the i-th
+ # position after the reordering \sigma
for x in range(len(x_order)):
for y, element in enumerate(m.parameter_names):
if x_order[x] == y:
name_order.append(element)
- # det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i})
+ # det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) *
+ # \Prod_{i=1}^n a_{i,\sigma_i})
det_perm = sum(
self._sgn(list_p[d])
* math.prod(
@@ -1285,7 +1399,8 @@ def determinant_general(b):
)
elif self.objective_option == ObjectiveLib.determinant:
- # if not cholesky but determinant, calculating det and evaluate the OBJ with det
+ # if not Cholesky but determinant, calculating
+ # det and evaluate the OBJ with det
model.determinant = pyo.Var(
initialize=np.linalg.det(fim), bounds=(small_number, None)
)
@@ -1295,17 +1410,88 @@ def determinant_general(b):
)
elif self.objective_option == ObjectiveLib.trace:
- # if not determinant or cholesky, calculating the OBJ with trace
+ # if not determinant or Cholesky, calculating
+ # the OBJ with trace
model.trace = pyo.Var(initialize=np.trace(fim), bounds=(small_number, None))
model.obj_cons.trace_rule = pyo.Constraint(rule=trace_calc)
model.objective = pyo.Objective(
expr=pyo.log10(model.trace), sense=pyo.maximize
)
+ # TODO: Add warning (should be unreachable) if the user calls
+ # the grey box objectives with the standard model
elif self.objective_option == ObjectiveLib.zero:
# add dummy objective function
model.objective = pyo.Objective(expr=0)
+ def create_grey_box_objective_function(self, model=None):
+ # Add external grey box block to a block named ``obj_cons`` to
+ # reuse material for initializing the objective-free square model
+ if model is None:
+ model = model = self.model
+
+ # TODO: Make this naming convention robust
+ model.obj_cons = pyo.Block()
+
+ # Create FIM External Grey Box object
+ grey_box_FIM = FIMExternalGreyBox(
+ doe_object=self,
+ objective_option=self.objective_option,
+ logger_level=self.logger.getEffectiveLevel(),
+ )
+
+ # Attach External Grey Box Model
+ # to the model as an External
+ # Grey Box Block
+ model.obj_cons.egb_fim_block = ExternalGreyBoxBlock(external_model=grey_box_FIM)
+
+ # Adding constraints to for all grey box input values to equate to fim values
+ def FIM_egb_cons(m, p1, p2):
+ """
+
+ m: Pyomo model
+ p1: parameter 1
+ p2: parameter 2
+
+ """
+ # Using upper triangular FIM to
+ # make numerics better.
+ if list(model.parameter_names).index(p1) >= list(
+ model.parameter_names
+ ).index(p2):
+ return model.fim[(p1, p2)] == m.egb_fim_block.inputs[(p2, p1)]
+ else:
+ return pyo.Constraint.Skip
+
+ # Add the FIM and External Grey
+ # Box inputs constraints
+ model.obj_cons.FIM_equalities = pyo.Constraint(
+ model.parameter_names, model.parameter_names, rule=FIM_egb_cons
+ )
+
+ # Add objective based on user provided
+ # type within ObjectiveLib
+ if self.objective_option == ObjectiveLib.trace:
+ model.objective = pyo.Objective(
+ expr=model.obj_cons.egb_fim_block.outputs["A-opt"], sense=pyo.minimize
+ )
+ elif self.objective_option == ObjectiveLib.determinant:
+ model.objective = pyo.Objective(
+ expr=model.obj_cons.egb_fim_block.outputs["log-D-opt"],
+ sense=pyo.maximize,
+ )
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ model.objective = pyo.Objective(
+ expr=model.obj_cons.egb_fim_block.outputs["E-opt"], sense=pyo.maximize
+ )
+ elif self.objective_option == ObjectiveLib.condition_number:
+ model.objective = pyo.Objective(
+ expr=model.obj_cons.egb_fim_block.outputs["ME-opt"], sense=pyo.minimize
+ )
+ # Else error not needed for spurious objective
+ # options as the error will always appear
+ # when creating the FIMExternalGreyBox block
+
# Check to see if the model has all the required suffixes
def check_model_labels(self, model=None):
"""
@@ -1370,23 +1556,26 @@ def check_model_FIM(self, model=None, FIM=None):
if FIM.shape != (self.n_parameters, self.n_parameters):
raise ValueError(
- "Shape of FIM provided should be n parameters by n parameters, or {} by {}, FIM provided has shape {} by {}".format(
+ "Shape of FIM provided should be n parameters by n parameters, "
+ "or {} by {}, FIM provided has shape {} by {}".format(
self.n_parameters, self.n_parameters, FIM.shape[0], FIM.shape[1]
)
)
- # Check FIM is positive definite and symmetric
check_FIM(FIM)
self.logger.info(
- "FIM provided matches expected dimensions from model and is approximately positive (semi) definite."
+ "FIM provided matches expected dimensions from model "
+ "and is approximately positive (semi) definite."
)
# Check the jacobian shape against what is expected from the model.
def check_model_jac(self, jac=None):
if jac.shape != (self.n_experiment_outputs, self.n_parameters):
raise ValueError(
- "Shape of Jacobian provided should be n experiment outputs by n parameters, or {} by {}, Jacobian provided has shape {} by {}".format(
+ "Shape of Jacobian provided should be n experiment outputs "
+ "by n parameters, or {} by {}, Jacobian provided has "
+ "shape {} by {}".format(
self.n_experiment_outputs,
self.n_parameters,
jac.shape[0],
@@ -1419,7 +1608,8 @@ def update_FIM_prior(self, model=None, FIM=None):
if not hasattr(model, "fim"):
raise RuntimeError(
- "``fim`` is not defined on the model provided. Please build the model first."
+ "``fim`` is not defined on the model provided. "
+ "Please build the model first."
)
self.check_model_FIM(model=model, FIM=FIM)
@@ -1431,14 +1621,15 @@ def update_FIM_prior(self, model=None, FIM=None):
self.logger.info("FIM prior has been updated.")
- # TODO: Add an update function for the parameter values? --> closed loop parameter estimation?
- # Or leave this to the user?????
+ # TODO: Add an update function for the parameter values?
+ # Closed loop parameter estimation?
def update_unknown_parameter_values(self, model=None, param_vals=None):
raise NotImplementedError(
"Updating unknown parameter values not yet supported."
)
- # Evaluates FIM and statistics for a full factorial space (same as run_grid_search)
+ # Evaluates FIM and statistics for a
+ # full factorial space (same as run_grid_search)
def compute_FIM_full_factorial(
self, model=None, design_ranges=None, method="sequential"
):
@@ -1628,23 +1819,32 @@ def draw_factorial_figure(
log_scale=True,
):
"""
- Extract results needed for drawing figures from the results dictionary provided by
- the ``compute_FIM_full_factorial`` function.
+ Extract results needed for drawing figures from
+ the results dictionary provided by the
+ ``compute_FIM_full_factorial`` function.
Draw either the 1D sensitivity curve or 2D heatmap.
Parameters
----------
- results: dictionary, results dictionary from ``compute_FIM_full_factorial``, default: None (self.fim_factorial_results)
+ results: dictionary, results dictionary from ``compute_FIM_full_factorial``
+ default: None (self.fim_factorial_results)
sensitivity_design_variables: a list, design variable names to draw sensitivity
- fixed_design_variables: a dictionary, keys are the design variable names to be fixed, values are the value of it to be fixed.
+ fixed_design_variables: a dictionary, keys are the design variable names to be
+ fixed, values are the value of it to be fixed.
full_design_variable_names: a list, all the design variables in the problem.
title_text: a string, name for the figure
- xlabel_text: a string, label for the x-axis of the figure (default: last design variable name)
- In a 1D sensitivity curve, it should be design variable by which the curve is drawn.
- In a 2D heatmap, it should be the second design variable in the design_ranges
- ylabel_text: a string, label for the y-axis of the figure (default: None (1D); first design variable name (2D))
- A 1D sensitivity curve does not need it. In a 2D heatmap, it should be the first design variable in the dv_ranges
+ xlabel_text: a string, label for the x-axis of the figure
+ default: last design variable name
+ In a 1D sensitivity curve, it should be design variable by
+ which the curve is drawn
+ In a 2D heatmap, it should be the second design variable
+ in the design_ranges
+ ylabel_text: a string, label for the y-axis of the figure
+ default: None (1D); first design variable name (2D)
+ A 1D sensitivity curve does not need it.
+ In a 2D heatmap, it should be the first
+ design variable in the dv_ranges
figure_file_name: string or Path, path to save the figure as
font_axes: axes label font size
font_tick: tick label font size
@@ -1654,7 +1854,8 @@ def draw_factorial_figure(
if results is None:
if not hasattr(self, "fim_factorial_results"):
raise RuntimeError(
- "Results must be provided or the compute_FIM_full_factorial function must be run."
+ "Results must be provided or the "
+ "compute_FIM_full_factorial function must be run."
)
results = self.fim_factorial_results
full_design_variable_names = [
@@ -1663,13 +1864,14 @@ def draw_factorial_figure(
else:
if full_design_variable_names is None:
raise ValueError(
- "If results object is provided, you must include all the design variable names."
+ "If results object is provided, you must "
+ "include all the design variable names."
)
des_names = full_design_variable_names
# Inputs must exist for the function to do anything
- # ToDo: Put in a default value function?????
+ # TODO: Put in a default value function?????
if sensitivity_design_variables is None:
raise ValueError("``sensitivity_design_variables`` must be included.")
@@ -1686,14 +1888,16 @@ def draw_factorial_figure(
if not check_des_vars:
raise ValueError(
- "Fixed design variables do not all appear in the results object keys."
+ "Fixed design variables do not all appear "
+ "in the results object keys."
)
if not check_sens_vars:
raise ValueError(
- "Sensitivity design variables do not all appear in the results object keys."
+ "Sensitivity design variables do not all appear "
+ "in the results object keys."
)
- # ToDo: Make it possible to plot pair-wise sensitivities for all variables
+ # TODO: Make it possible to plot pair-wise sensitivities for all variables
# e.g. a curve like low-dimensional posterior distributions
if len(sensitivity_design_variables) > 2:
raise NotImplementedError(
@@ -1704,7 +1908,8 @@ def draw_factorial_figure(
sensitivity_design_variables
) != len(des_names):
raise ValueError(
- "Error: All design variables that are not used to generate sensitivity plots must be fixed."
+ "Error: All design variables that are not used to "
+ "generate sensitivity plots must be fixed."
)
if type(results) is dict:
@@ -1712,7 +1917,8 @@ def draw_factorial_figure(
else:
results_pd = results
- # generate a combination of logic sentences to filter the results of the DOF needed.
+ # generate a combination of logic to
+ # filter the results of the DOF needed.
# an example filter: (self.store_all_results_dataframe["CA0"]==5).
if len(fixed_design_variables.keys()) != 0:
filter = ""
@@ -1759,7 +1965,7 @@ def draw_factorial_figure(
log_scale=log_scale,
figure_file_name=figure_file_name,
)
- # ToDo: Add the multidimensional plotting
+ # TODO: Add the multidimensional plotting
else:
pass
@@ -1779,7 +1985,8 @@ def _curve1D(
----------
title_text: name of the figure, a string
xlabel_text: x label title, a string.
- In a 1D sensitivity curve, it is the design variable by which the curve is drawn.
+ In a 1D sensitivity curve, it is the design
+ variable by which the curve is drawn.
font_axes: axes label font size
font_tick: tick label font size
figure_file_name: string or Path, path to save the figure as
@@ -1787,7 +1994,7 @@ def _curve1D(
Returns
--------
- 4 Figures of 1D sensitivity curves for each criteria
+ 4 Figures of 1D sensitivity curves for each criterion
"""
if figure_file_name is not None:
show_fig = False
@@ -1912,9 +2119,11 @@ def _heatmap(
----------
title_text: name of the figure, a string
xlabel_text: x label title, a string.
- In a 2D heatmap, it should be the second design variable in the design_ranges
+ In a 2D heatmap, it should be the second
+ design variable in the design_ranges
ylabel_text: y label title, a string.
- In a 2D heatmap, it should be the first design variable in the dv_ranges
+ In a 2D heatmap, it should be the first
+ design variable in the dv_ranges
font_axes: axes label font size
font_tick: tick label font size
figure_file_name: string or Path, path to save the figure as
@@ -1922,7 +2131,7 @@ def _heatmap(
Returns
--------
- 4 Figures of 2D heatmap for each criteria
+ 4 Figures of 2D heatmap for each criterion
"""
if figure_file_name is not None:
show_fig = False
@@ -2093,7 +2302,8 @@ def get_FIM(self, model=None):
if not hasattr(model, "fim"):
raise RuntimeError(
- "Model provided does not have variable `fim`. Please make sure the model is built properly before calling `get_FIM`"
+ "Model provided does not have variable `fim`. Please make sure "
+ "the model is built properly before calling `get_FIM`"
)
fim_vals = [
@@ -2133,7 +2343,9 @@ def get_sensitivity_matrix(self, model=None):
if not hasattr(model, "sensitivity_jacobian"):
raise RuntimeError(
- "Model provided does not have variable `sensitivity_jacobian`. Please make sure the model is built properly before calling `get_sensitivity_matrix`"
+ "Model provided does not have variable `sensitivity_jacobian`. "
+ "Please make sure the model is built properly before calling "
+ "`get_sensitivity_matrix`"
)
Q_vals = [
@@ -2169,7 +2381,9 @@ def get_experiment_input_values(self, model=None):
if not hasattr(model, "experiment_inputs"):
if not hasattr(model, "scenario_blocks"):
raise RuntimeError(
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_experiment_input_values`"
+ "Model provided does not have expected structure. "
+ "Please make sure model is built properly before "
+ "calling `get_experiment_input_values`"
)
d_vals = [
@@ -2194,7 +2408,8 @@ def get_unknown_parameter_values(self, model=None):
Returns
-------
- theta: 1D list of unknown parameter values at which this experiment was designed
+ theta: 1D list of unknown parameter values at which
+ this experiment was designed
"""
if model is None:
@@ -2203,7 +2418,9 @@ def get_unknown_parameter_values(self, model=None):
if not hasattr(model, "unknown_parameters"):
if not hasattr(model, "scenario_blocks"):
raise RuntimeError(
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_unknown_parameter_values`"
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_unknown_parameter_values`"
)
theta_vals = [
@@ -2237,7 +2454,9 @@ def get_experiment_output_values(self, model=None):
if not hasattr(model, "experiment_outputs"):
if not hasattr(model, "scenario_blocks"):
raise RuntimeError(
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_experiment_output_values`"
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_experiment_output_values`"
)
y_hat_vals = [
@@ -2249,7 +2468,7 @@ def get_experiment_output_values(self, model=None):
return y_hat_vals
- # ToDo: For more complicated error structures, this should become
+ # TODO: For more complicated error structures, this should become
# get cov_y, or so, and this method will be deprecated
# Gets the measurement error values from an existing model
def get_measurement_error_values(self, model=None):
@@ -2273,7 +2492,9 @@ def get_measurement_error_values(self, model=None):
if not hasattr(model, "measurement_error"):
if not hasattr(model, "scenario_blocks"):
raise RuntimeError(
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_measurement_error_values`"
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_measurement_error_values`"
)
sigma_vals = [
diff --git a/pyomo/contrib/doe/examples/reactor_experiment.py b/pyomo/contrib/doe/examples/reactor_experiment.py
index 34a67a50f95..6ef32af3d28 100644
--- a/pyomo/contrib/doe/examples/reactor_experiment.py
+++ b/pyomo/contrib/doe/examples/reactor_experiment.py
@@ -156,6 +156,7 @@ def finalize_model(self):
for t in m.t:
if t in control_points:
cv = control_points[t]
+ m.T[t].fix()
m.T[t].setlb(self.data["T_bounds"][0])
m.T[t].setub(self.data["T_bounds"][1])
m.T[t] = cv
diff --git a/pyomo/contrib/doe/examples/result.json b/pyomo/contrib/doe/examples/result.json
index 7e1b1a79a1b..23b3a05a10d 100644
--- a/pyomo/contrib/doe/examples/result.json
+++ b/pyomo/contrib/doe/examples/result.json
@@ -1 +1 @@
-{"CA0": 5.0, "CA_bounds": [1.0, 5.0], "CB0": 0.0, "CC0": 0.0, "t_range": [0, 1], "control_points": {"0": 500, "0.125": 300, "0.25": 300, "0.375": 300, "0.5": 300, "0.625": 300, "0.75": 300, "0.875": 300, "1": 300}, "T_bounds": [300, 700], "A1": 84.79, "A2": 371.72, "E1": 7.78, "E2": 15.05}
\ No newline at end of file
+{"CA0": 5.0, "CA_bounds": [1.0, 5.0], "CB0": 0.0, "CC0": 0.0, "t_range": [0, 1], "control_points": {"0": 500, "0.125": 300, "0.25": 300, "0.375": 300, "0.5": 300, "0.625": 300, "0.75": 300, "0.875": 300, "1": 300}, "T_bounds": [300, 700], "A1": 84.79, "A2": 371.72, "E1": 7.78, "E2": 15.05}
diff --git a/pyomo/contrib/doe/examples/rooney_biegler_example.py b/pyomo/contrib/doe/examples/rooney_biegler_example.py
new file mode 100644
index 00000000000..954f0eed435
--- /dev/null
+++ b/pyomo/contrib/doe/examples/rooney_biegler_example.py
@@ -0,0 +1,148 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright (c) 2008-2025
+# National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+"""
+Rooney Biegler model, based on Rooney, W. C. and Biegler, L. T. (2001). Design for
+model parameter uncertainty using nonlinear confidence regions. AIChE Journal,
+47(8), 1794-1804.
+"""
+from pyomo.common.dependencies import numpy as np, pathlib
+
+from pyomo.contrib.doe.examples.rooney_biegler_experiment import (
+ RooneyBieglerExperimentDoE,
+)
+from pyomo.contrib.doe import DesignOfExperiments
+
+import pyomo.environ as pyo
+
+import json
+import sys
+
+
+# Example comparing Cholesky factorization
+# (standard solve) with grey box objective
+# solve for the log-deteriminant of the FIM
+# (D-optimality)
+def run_rooney_biegler_doe():
+ # Create a RooneyBiegler Experiment
+ experiment = RooneyBieglerExperimentDoE(data={'hour': 2, 'y': 10.3})
+
+ # Use a central difference, with step size 1e-3
+ fd_formula = "central"
+ step_size = 1e-3
+
+ # Use the determinant objective with scaled sensitivity matrix
+ objective_option = "determinant"
+ scale_nominal_param_value = True
+
+ data = [[1, 8.3], [7, 19.8], [2, 10.3], [5, 15.6], [3, 19.0], [4, 16.0]]
+ FIM_prior = np.zeros((2, 2))
+ # Calculate prior using existing experiments
+ for i in range(len(data)):
+ if i > int(sys.argv[1]):
+ break
+ prev_experiment = RooneyBieglerExperimentDoE(
+ data={'hour': data[i][0], 'y': data[i][1]}
+ )
+ doe_obj = DesignOfExperiments(
+ prev_experiment,
+ fd_formula=fd_formula,
+ step=step_size,
+ objective_option=objective_option,
+ scale_nominal_param_value=scale_nominal_param_value,
+ prior_FIM=None,
+ tee=False,
+ )
+
+ FIM_prior += doe_obj.compute_FIM(method='sequential')
+
+ if sys.argv[1] == 0:
+ FIM_prior[0][0] += 1e-6
+ FIM_prior[1][1] += 1e-6
+
+ # Create the DesignOfExperiments object
+ # We will not be passing any prior information in this example
+ # and allow the experiment object and the DesignOfExperiments
+ # call of ``run_doe`` perform model initialization.
+ doe_obj = DesignOfExperiments(
+ experiment,
+ fd_formula=fd_formula,
+ step=step_size,
+ objective_option=objective_option,
+ scale_constant_value=1,
+ scale_nominal_param_value=scale_nominal_param_value,
+ prior_FIM=FIM_prior,
+ jac_initial=None,
+ fim_initial=None,
+ L_diagonal_lower_bound=1e-7,
+ solver=None,
+ tee=False,
+ get_labeled_model_args=None,
+ _Cholesky_option=True,
+ _only_compute_fim_lower=True,
+ )
+
+ # Begin optimal DoE
+ ####################
+ doe_obj.run_doe()
+
+ # Print out a results summary
+ print("Optimal experiment values: ")
+ print(
+ "\tOptimal measurement time: {:.2f}".format(
+ doe_obj.results["Experiment Design"][0]
+ )
+ )
+ print("FIM at optimal design:\n {}".format(np.array(doe_obj.results["FIM"])))
+ print(
+ "Objective value at optimal design: {:.2f}".format(
+ pyo.value(doe_obj.model.objective)
+ )
+ )
+
+ print(doe_obj.results["Experiment Design Names"])
+
+ ###################
+ # End optimal DoE
+
+ # Begin optimal greybox DoE
+ ############################
+ doe_obj_gb = DesignOfExperiments(
+ experiment,
+ fd_formula=fd_formula,
+ step=step_size,
+ objective_option=objective_option,
+ use_grey_box_objective=True,
+ scale_nominal_param_value=scale_nominal_param_value,
+ prior_FIM=FIM_prior,
+ tee=False,
+ )
+
+ doe_obj_gb.run_doe()
+
+ print("Optimal experiment values: ")
+ print(
+ "\tOptimal measurement time: {:.2f}".format(
+ doe_obj_gb.results["Experiment Design"][0]
+ )
+ )
+ print("FIM at optimal design:\n {}".format(np.array(doe_obj_gb.results["FIM"])))
+ print(
+ "Objective value at optimal design: {:.2f}".format(
+ np.log10(np.exp(pyo.value(doe_obj_gb.model.objective)))
+ )
+ )
+
+ ############################
+ # End optimal greybox DoE
+
+
+if __name__ == "__main__":
+ run_rooney_biegler_doe()
diff --git a/pyomo/contrib/doe/examples/rooney_biegler_experiment.py b/pyomo/contrib/doe/examples/rooney_biegler_experiment.py
new file mode 100644
index 00000000000..4918100b78d
--- /dev/null
+++ b/pyomo/contrib/doe/examples/rooney_biegler_experiment.py
@@ -0,0 +1,100 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright (c) 2008-2025
+# National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+"""
+Rooney Biegler model, based on Rooney, W. C. and Biegler, L. T. (2001). Design for
+model parameter uncertainty using nonlinear confidence regions. AIChE Journal,
+47(8), 1794-1804.
+"""
+
+from pyomo.common.dependencies import pandas as pd
+import pyomo.environ as pyo
+from pyomo.contrib.parmest.experiment import Experiment
+
+
+class RooneyBieglerExperimentDoE(Experiment):
+ def __init__(self, data=None, theta=None):
+ if data is None:
+ self.data = {}
+ self.data['hour'] = 1
+ self.data['y'] = 8.3
+ else:
+ self.data = data
+ if theta is None:
+ self.theta = {}
+ self.theta['asymptote'] = 19.143
+ self.theta['rate constant'] = 0.5311
+ else:
+ self.theta = theta
+ self.model = None
+
+ def create_model(self):
+ # Creates Roony-Biegler model for
+ # individual data points as
+ # an experimental decision.
+ m = self.model = pyo.ConcreteModel()
+
+ # Specify the unknown parameters
+ m.asymptote = pyo.Var(initialize=self.theta['asymptote'])
+ m.rate_constant = pyo.Var(initialize=self.theta['rate constant'])
+
+ # Fix the unknown parameters
+ m.asymptote.fix()
+ m.rate_constant.fix()
+
+ # Add the experiment inputs
+ m.hour = pyo.Var(initialize=self.data['hour'], bounds=(0, 10))
+
+ # Fix the experimental design variable
+ m.hour.fix()
+
+ # Add the experiment outputs
+ m.y = pyo.Var(initialize=self.data['y'])
+
+ # Add governing equation
+ m.response_function = pyo.Constraint(
+ expr=m.y - m.asymptote * (1 - pyo.exp(-m.rate_constant * m.hour)) == 0
+ )
+
+ def finalize_model(self):
+ m = self.model
+ pass
+
+ def label_model(self):
+ m = self.model
+
+ # Add y value as experiment output
+ m.experiment_outputs = pyo.Suffix(direction=pyo.Suffix.LOCAL)
+ m.experiment_outputs[m.y] = m.y()
+
+ # Add measurement error associated with y
+ # We are assuming a flat error of 0.3
+ # or about 1-3 percent
+ m.measurement_error = pyo.Suffix(direction=pyo.Suffix.LOCAL)
+ m.measurement_error[m.y] = 1
+
+ # Add hour as experiment input
+ # We are deciding when to sample
+ m.experiment_inputs = pyo.Suffix(direction=pyo.Suffix.LOCAL)
+ m.experiment_inputs[m.hour] = m.hour()
+
+ # Adding the unknown parameters
+ m.unknown_parameters = pyo.Suffix(direction=pyo.Suffix.LOCAL)
+ m.unknown_parameters.update(
+ (k, k.value) for k in [m.asymptote, m.rate_constant]
+ )
+
+ def get_labeled_model(self):
+ self.create_model()
+ self.finalize_model()
+ self.label_model()
+
+ return self.model
diff --git a/pyomo/contrib/doe/grey_box_utilities.py b/pyomo/contrib/doe/grey_box_utilities.py
new file mode 100644
index 00000000000..771f1ae230b
--- /dev/null
+++ b/pyomo/contrib/doe/grey_box_utilities.py
@@ -0,0 +1,494 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright (c) 2008-2025
+# National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+#
+# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation
+# Initiative (CCSI), and is copyright (c) 2022 by the software owners:
+# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC.,
+# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory,
+# Battelle Memorial Institute, University of Notre Dame,
+# The University of Pittsburgh, The University of Texas at Austin,
+# University of Toledo, West Virginia University, et al. All rights reserved.
+#
+# NOTICE. This Software was developed under funding from the
+# U.S. Department of Energy and the U.S. Government consequently retains
+# certain rights. As such, the U.S. Government has been granted for itself
+# and others acting on its behalf a paid-up, nonexclusive, irrevocable,
+# worldwide license in the Software to reproduce, distribute copies to the
+# public, prepare derivative works, and perform publicly and display
+# publicly, and to permit other to do so.
+# ___________________________________________________________________________
+
+from pyomo.common.dependencies import (
+ numpy as np,
+ numpy_available,
+ scipy,
+ scipy_available,
+)
+
+from enum import Enum
+import itertools
+import logging
+
+if scipy_available and numpy_available:
+ from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel
+
+import pyomo.environ as pyo
+
+
+class FIMExternalGreyBox(
+ ExternalGreyBoxModel if (scipy_available and numpy_available) else object
+):
+ def __init__(self, doe_object, objective_option="determinant", logger_level=None):
+ """
+ Grey box model for metrics on the FIM. This methodology reduces
+ numerical complexity for the computation of FIM metrics related
+ to eigenvalue decomposition.
+
+ Parameters
+ ----------
+ doe_object:
+ Design of Experiments object that contains a built model
+ (with sensitivity matrix, Q, and fisher information matrix, FIM).
+ The external grey box model will utilize elements of the
+ `doe_object` model to build the FIM metric with consistent naming.
+ obj_option:
+ String representation of the objective option. Current available
+ options are: ``determinant`` (D-optimality), ``trace`` (A-optimality),
+ ``minimum_eigenvalue`` (E-optimality), ``condition_number``
+ (modified E-optimality).
+ default: ``determinant``
+ logger_level:
+ logging level to be specified if different from doe_object's logging level.
+ default: None, or equivalently, use the logging level of doe_object.
+ NOTE: Use logging.DEBUG for all messages.
+ """
+
+ if doe_object is None:
+ raise ValueError(
+ "DoE Object must be provided to build external grey box of the FIM."
+ )
+
+ self.doe_object = doe_object
+
+ # Grab parameter list from the doe_object model
+ self._param_names = [i for i in self.doe_object.model.parameter_names]
+ self._n_params = len(self._param_names)
+
+ # Check if the doe_object has model components that are required
+ # TODO: is this check necessary?
+ from pyomo.contrib.doe import ObjectiveLib
+
+ objective_option = ObjectiveLib(objective_option)
+ self.objective_option = objective_option
+
+ # Create logger for FIM egb object
+ self.logger = logging.getLogger(__name__)
+
+ # If logger level is None, use doe_object's logger level
+ if logger_level is None:
+ logger_level = doe_object.logger.level
+
+ self.logger.setLevel(level=logger_level)
+
+ # Set initial values for inputs
+ # Need a mask structure
+ self._masking_matrix = np.triu(np.ones_like(self.doe_object.fim_initial))
+ self._input_values = np.asarray(
+ self.doe_object.fim_initial[self._masking_matrix > 0], dtype=np.float64
+ )
+ self._n_inputs = len(self._input_values)
+
+ def _get_FIM(self):
+ # Grabs the current FIM subject
+ # to the input values.
+ # This function currently assumes
+ # that we use a lower triangular
+ # FIM.
+ upt_FIM = self._input_values
+
+ # Create FIM in the correct way
+ current_FIM = np.zeros_like(self.doe_object.fim_initial)
+ # Utilize upper triangular portion of FIM
+ current_FIM[np.triu_indices_from(current_FIM)] = upt_FIM
+ # Construct lower triangular using the
+ # current upper triangle minus the diagonal.
+ current_FIM += current_FIM.transpose() - np.diag(np.diag(current_FIM))
+
+ return current_FIM
+
+ def input_names(self):
+ # Cartesian product gives us matrix indices flattened in row-first format
+ # Can use itertools.combinations(self._param_names, 2) with added
+ # diagonal elements, or do double for loops if we switch to upper triangular
+ input_names_list = list(
+ itertools.combinations_with_replacement(self._param_names, 2)
+ )
+ return input_names_list
+
+ def equality_constraint_names(self):
+ # TODO: Are there any objectives that will have constraints?
+ return []
+
+ def output_names(self):
+ # TODO: add output name for the variable. This may have to be
+ # an input from the user. Or it could depend on the usage of
+ # the ObjectiveLib Enum object, which should have an associated
+ # name for the objective function at all times.
+ from pyomo.contrib.doe import ObjectiveLib
+
+ if self.objective_option == ObjectiveLib.trace:
+ obj_name = "A-opt"
+ elif self.objective_option == ObjectiveLib.determinant:
+ obj_name = "log-D-opt"
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ obj_name = "E-opt"
+ elif self.objective_option == ObjectiveLib.condition_number:
+ obj_name = "ME-opt"
+ else:
+ ObjectiveLib(self.objective_option)
+ return [obj_name]
+
+ def set_input_values(self, input_values):
+ # Set initial values to be flattened initial FIM (aligns with input names)
+ np.copyto(self._input_values, input_values)
+
+ def evaluate_equality_constraints(self):
+ # TODO: are there any objectives that will have constraints?
+ return None
+
+ def evaluate_outputs(self):
+ # Evaluates the objective value for the specified
+ # ObjectiveLib type.
+ current_FIM = self._get_FIM()
+
+ M = np.asarray(current_FIM, dtype=np.float64).reshape(
+ self._n_params, self._n_params
+ )
+
+ # Change objective value based on ObjectiveLib type.
+ from pyomo.contrib.doe import ObjectiveLib
+
+ if self.objective_option == ObjectiveLib.trace:
+ obj_value = np.trace(np.linalg.pinv(M))
+ elif self.objective_option == ObjectiveLib.determinant:
+ (sign, logdet) = np.linalg.slogdet(M)
+ obj_value = logdet
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ eig, _ = np.linalg.eig(M)
+ obj_value = np.min(eig)
+ elif self.objective_option == ObjectiveLib.condition_number:
+ eig, _ = np.linalg.eig(M)
+ obj_value = np.max(eig) / np.min(eig)
+ else:
+ ObjectiveLib(self.objective_option)
+
+ return np.asarray([obj_value], dtype=np.float64)
+
+ def finalize_block_construction(self, pyomo_block):
+ # Set bounds on the inputs/outputs
+ # Set initial values of the inputs/outputs
+ # This will depend on the objective used
+
+ # Initialize grey box FIM values
+ for ind, val in enumerate(self.input_names()):
+ pyomo_block.inputs[val] = self.doe_object.fim_initial[
+ self._masking_matrix > 0
+ ][ind]
+
+ # Initialize log_determinant value
+ from pyomo.contrib.doe import ObjectiveLib
+
+ # Calculate initial values for the output
+ output_value = self.evaluate_outputs()[0]
+
+ # Set the value of the output for the given
+ # objective function.
+ if self.objective_option == ObjectiveLib.trace:
+ pyomo_block.outputs["A-opt"] = output_value
+ elif self.objective_option == ObjectiveLib.determinant:
+ pyomo_block.outputs["log-D-opt"] = output_value
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ pyomo_block.outputs["E-opt"] = output_value
+ elif self.objective_option == ObjectiveLib.condition_number:
+ pyomo_block.outputs["ME-opt"] = output_value
+
+ def evaluate_jacobian_equality_constraints(self):
+ # TODO: Do any objectives require constraints?
+
+ # Returns coo_matrix of the correct shape
+ return None
+
+ def evaluate_jacobian_outputs(self):
+ # Compute the jacobian of the objective function with
+ # respect to the fisher information matrix. Then return
+ # a coo_matrix that aligns with what IPOPT will expect.
+ current_FIM = self._get_FIM()
+
+ M = np.asarray(current_FIM, dtype=np.float64).reshape(
+ self._n_params, self._n_params
+ )
+
+ # TODO: Add inertia correction for
+ # negative/small eigenvalues
+ eig_vals, eig_vecs = np.linalg.eig(M)
+ if min(eig_vals) <= 1e-3:
+ pass
+
+ from pyomo.contrib.doe import ObjectiveLib
+
+ if self.objective_option == ObjectiveLib.trace:
+ Minv = np.linalg.pinv(M)
+ # Derivative formula of A-optimality
+ # is -inv(FIM) @ inv(FIM). Add reference to
+ # pyomo.DoE 2.0 manuscript S.I.
+ jac_M = -Minv @ Minv
+ elif self.objective_option == ObjectiveLib.determinant:
+ Minv = np.linalg.pinv(M)
+ # Derivative formula derived using tensor
+ # calculus. Add reference to pyomo.DoE 2.0
+ # manuscript S.I.
+ jac_M = 0.5 * (Minv + Minv.transpose())
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ # Obtain minimum eigenvalue location
+ min_eig_loc = np.argmin(eig_vals)
+
+ # Grab eigenvector associated with
+ # the minimum eigenvalue and make
+ # it a matrix. This is so we can
+ # use matrix operations later in
+ # the code.
+ min_eig_vec = np.array([eig_vecs[:, min_eig_loc]])
+
+ # Calculate the derivative matrix.
+ # This is the expansion product of
+ # the eigenvector we grabbed in
+ # the previous line of code.
+ jac_M = min_eig_vec * np.transpose(min_eig_vec)
+ elif self.objective_option == ObjectiveLib.condition_number:
+ # Obtain minimum (and maximum) eigenvalue location(s)
+ min_eig_loc = np.argmin(eig_vals)
+ max_eig_loc = np.argmax(eig_vals)
+
+ min_eig = np.min(eig_vals)
+ max_eig = np.max(eig_vals)
+
+ # Grab eigenvector associated with
+ # the min (and max) eigenvalue and make
+ # it a matrix. This is so we can
+ # use matrix operations later in
+ # the code.
+ min_eig_vec = np.array([eig_vecs[:, min_eig_loc]])
+ max_eig_vec = np.array([eig_vecs[:, max_eig_loc]])
+
+ # Calculate the derivative matrix.
+ # Similar to minimum eigenvalue,
+ # this computation involves two
+ # expansion products.
+ min_eig_term = min_eig_vec * np.transpose(min_eig_vec)
+ max_eig_term = max_eig_vec * np.transpose(max_eig_vec)
+
+ min_eig_epsilon = 2e-16
+
+ # Computing a (hopefully) nonsingular
+ # condition number for the jacobian
+ # expression.
+ safe_cond_number = max_eig / (min_eig + np.sign(min_eig) * min_eig_epsilon)
+
+ # Combining the expression
+ jac_M = (
+ 1
+ / (min_eig + np.sign(min_eig) * min_eig_epsilon)
+ * (max_eig_term - safe_cond_number * min_eig_term)
+ )
+ else:
+ ObjectiveLib(self.objective_option)
+
+ # Filter jac_M using the
+ # masking matrix
+ jac_M = jac_M[self._masking_matrix > 0]
+ M_rows = np.zeros((len(jac_M.flatten()), 1)).flatten()
+ M_cols = np.arange(len(jac_M.flatten()))
+
+ # Returns coo_matrix of the correct shape
+ return scipy.sparse.coo_matrix(
+ (jac_M.flatten(), (M_rows, M_cols)), shape=(1, len(jac_M.flatten()))
+ )
+
+ # Beyond here is for Hessian information
+ def set_equality_constraint_multipliers(self, eq_con_multiplier_values):
+ # TODO: Do any objectives require constraints?
+ # Assert lengths match
+ self._eq_con_mult_values = np.asarray(
+ eq_con_multiplier_values, dtype=np.float64
+ )
+
+ def set_output_constraint_multipliers(self, output_con_multiplier_values):
+ # TODO: Do any objectives require constraints?
+ # Assert length matches
+ self._output_con_mult_values = np.asarray(
+ output_con_multiplier_values, dtype=np.float64
+ )
+
+ def evaluate_hessian_equality_constraints(self):
+ # Returns coo_matrix of the correct shape
+ # No constraints so this returns `None`
+ return None
+
+ def evaluate_hessian_outputs(self, FIM=None):
+ # TODO: significant bookkeeping if the hessian's require vectorized
+ # operations. Just need mapping that works well and we are good.
+ current_FIM = self._get_FIM()
+
+ M = np.asarray(current_FIM, dtype=np.float64).reshape(
+ self._n_params, self._n_params
+ )
+
+ # Hessian with correct size for using only the
+ # lower (upper) triangle of the FIM
+ hess_vals = []
+ hess_rows = []
+ hess_cols = []
+
+ # Need to iterate over the unique
+ # differentials
+ input_differentials_2D = itertools.combinations_with_replacement(
+ self.input_names(), 2
+ )
+
+ from pyomo.contrib.doe import ObjectiveLib
+
+ if self.objective_option == ObjectiveLib.trace:
+ # Grab Inverse
+ Minv = np.linalg.pinv(M)
+
+ # Also grab inverse squared
+ Minv_sq = Minv @ Minv
+
+ for current_differential in input_differentials_2D:
+ # Row will be the location of the
+ # first ordered pair (d1) in input names
+ #
+ # Col will be the location of the
+ # second ordered pair (d2) in input names
+ d1, d2 = current_differential
+ row = self.input_names().index(d2)
+ col = self.input_names().index(d1)
+
+ # Grabbing the ordered quadruple (i, j, k, l)
+ # `location` here refers to the index in the
+ # self._param_names list
+ #
+ # i is the location of the first element of d1
+ # j is the location of the second element of d1
+ # k is the location of the first element of d2
+ # l is the location of the second element of d2
+ i = self._param_names.index(d1[0])
+ j = self._param_names.index(d1[1])
+ k = self._param_names.index(d2[0])
+ l = self._param_names.index(d2[1])
+
+ # New Formula (tested with finite differencing)
+ # Will be cited from the Pyomo.DoE 2.0 paper
+ hess_vals.append(
+ (Minv[i, l] * Minv_sq[k, j]) + (Minv_sq[i, l] * Minv[k, j])
+ )
+ hess_rows.append(row)
+ hess_cols.append(col)
+
+ elif self.objective_option == ObjectiveLib.determinant:
+ # Grab inverse
+ Minv = np.linalg.pinv(M)
+
+ for current_differential in input_differentials_2D:
+ # Row, Col and i, j, k, l values are
+ # obtained identically as in the trace
+ # for loop above.
+ d1, d2 = current_differential
+ row = self.input_names().index(d2)
+ col = self.input_names().index(d1)
+
+ i = self._param_names.index(d1[0])
+ j = self._param_names.index(d1[1])
+ k = self._param_names.index(d2[0])
+ l = self._param_names.index(d2[1])
+
+ # New Formula (tested with finite differencing)
+ # Will be cited from the Pyomo.DoE 2.0 paper
+ hess_vals.append(-(Minv[i, l] * Minv[k, j]))
+ hess_rows.append(row)
+ hess_cols.append(col)
+ elif self.objective_option == ObjectiveLib.minimum_eigenvalue:
+ # Grab eigenvalues and eigenvectors
+ # Also need the min location
+ all_eig_vals, all_eig_vecs = np.linalg.eig(M)
+ min_eig_loc = np.argmin(all_eig_vals)
+
+ # Grabbing min eigenvalue and corresponding
+ # eigenvector
+ min_eig = all_eig_vals[min_eig_loc]
+ min_eig_vec = np.array([all_eig_vecs[:, min_eig_loc]])
+
+ for current_differential in input_differentials_2D:
+ # Row, Col and i, j, k, l values are
+ # obtained identically as in the trace
+ # for loop above.
+ d1, d2 = current_differential
+ row = self.input_names().index(d2)
+ col = self.input_names().index(d1)
+
+ i = self._param_names.index(d1[0])
+ j = self._param_names.index(d1[1])
+ k = self._param_names.index(d2[0])
+ l = self._param_names.index(d2[1])
+
+ # For lop to iterate over all
+ # eigenvalues/vectors
+ curr_hess_val = 0
+ for curr_eig in range(len(all_eig_vals)):
+ # Skip if we are at the minimum
+ # eigenvalue. Denominator is
+ # zero.
+ if curr_eig == min_eig_loc:
+ continue
+
+ # Formula derived in Pyomo.DoE Paper
+ curr_hess_val += (
+ 1
+ * (
+ min_eig_vec[0, i]
+ * all_eig_vecs[j, curr_eig]
+ * min_eig_vec[0, l]
+ * all_eig_vecs[k, curr_eig]
+ )
+ / (min_eig - all_eig_vals[curr_eig])
+ )
+ curr_hess_val += (
+ 1
+ * (
+ min_eig_vec[0, k]
+ * all_eig_vecs[i, curr_eig]
+ * min_eig_vec[0, j]
+ * all_eig_vecs[l, curr_eig]
+ )
+ / (min_eig - all_eig_vals[curr_eig])
+ )
+ hess_vals.append(curr_hess_val)
+ hess_rows.append(row)
+ hess_cols.append(col)
+ elif self.objective_option == ObjectiveLib.condition_number:
+ pass
+ else:
+ ObjectiveLib(self.objective_option)
+
+ # Returns coo_matrix of the correct shape
+ return scipy.sparse.coo_matrix(
+ (np.asarray(hess_vals), (hess_rows, hess_cols)),
+ shape=(self._n_inputs, self._n_inputs),
+ )
diff --git a/pyomo/contrib/doe/tests/test_doe_build.py b/pyomo/contrib/doe/tests/test_doe_build.py
index 39615f47808..a3e3a71df37 100644
--- a/pyomo/contrib/doe/tests/test_doe_build.py
+++ b/pyomo/contrib/doe/tests/test_doe_build.py
@@ -16,14 +16,20 @@
numpy_available,
pandas as pd,
pandas_available,
+ scipy_available,
)
+
from pyomo.common.fileutils import this_file_dir
import pyomo.common.unittest as unittest
-from pyomo.contrib.doe import DesignOfExperiments
-from pyomo.contrib.doe.examples.reactor_example import (
- ReactorExperiment as FullReactorExperiment,
-)
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest("Pyomo.DoE needs scipy and numpy to run tests")
+
+if scipy_available:
+ from pyomo.contrib.doe import DesignOfExperiments
+ from pyomo.contrib.doe.examples.reactor_example import (
+ ReactorExperiment as FullReactorExperiment,
+ )
import pyomo.environ as pyo
@@ -109,7 +115,13 @@ def get_standard_args(experiment, fd_method, obj_used):
args['jac_initial'] = None
args['fim_initial'] = None
args['L_diagonal_lower_bound'] = 1e-7
- args['solver'] = None
+ # Make solver object with
+ # good linear subroutines
+ solver = SolverFactory("ipopt")
+ solver.options["linear_solver"] = "ma57"
+ solver.options["halt_on_ampl_error"] = "yes"
+ solver.options["max_iter"] = 3000
+ args['solver'] = solver
args['tee'] = False
args['get_labeled_model_args'] = None
args['_Cholesky_option'] = True
@@ -119,6 +131,7 @@ def get_standard_args(experiment, fd_method, obj_used):
@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
@unittest.skipIf(not numpy_available, "Numpy is not available")
+@unittest.skipIf(not scipy_available, "scipy is not available")
class TestReactorExampleBuild(unittest.TestCase):
def test_reactor_fd_central_check_fd_eqns(self):
fd_method = "central"
@@ -264,7 +277,8 @@ def test_reactor_fd_central_design_fixing(self):
# Ensure that each set of constraints has all blocks pairs with scenario 0
# i.e., (0, 1), (0, 2), ..., (0, N) --> N - 1 constraints
self.assertEqual(len(getattr(model, con_name)), (len(model.scenarios) - 1))
- # Should not have any constraints sets beyond the length of design_vars - 1 (started with index 0)
+ # Should not have any constraints sets beyond the
+ # length of design_vars - 1 (started with index 0)
self.assertFalse(hasattr(model, con_name_base + str(len(design_vars))))
def test_reactor_fd_backward_design_fixing(self):
@@ -296,7 +310,8 @@ def test_reactor_fd_backward_design_fixing(self):
# Ensure that each set of constraints has all blocks pairs with scenario 0
# i.e., (0, 1), (0, 2), ..., (0, N) --> N - 1 constraints
self.assertEqual(len(getattr(model, con_name)), (len(model.scenarios) - 1))
- # Should not have any constraints sets beyond the length of design_vars - 1 (started with index 0)
+ # Should not have any constraints sets beyond the
+ # length of design_vars - 1 (started with index 0)
self.assertFalse(hasattr(model, con_name_base + str(len(design_vars))))
def test_reactor_fd_forward_design_fixing(self):
@@ -328,7 +343,8 @@ def test_reactor_fd_forward_design_fixing(self):
# Ensure that each set of constraints has all blocks pairs with scenario 0
# i.e., (0, 1), (0, 2), ..., (0, N) --> N - 1 constraints
self.assertEqual(len(getattr(model, con_name)), (len(model.scenarios) - 1))
- # Should not have any constraints sets beyond the length of design_vars - 1 (started with index 0)
+ # Should not have any constraints sets beyond the
+ # length of design_vars - 1 (started with index 0)
self.assertFalse(hasattr(model, con_name_base + str(len(design_vars))))
def test_reactor_check_user_initialization(self):
diff --git a/pyomo/contrib/doe/tests/test_doe_errors.py b/pyomo/contrib/doe/tests/test_doe_errors.py
index ce77fb4f553..a55e4a7166b 100644
--- a/pyomo/contrib/doe/tests/test_doe_errors.py
+++ b/pyomo/contrib/doe/tests/test_doe_errors.py
@@ -16,15 +16,22 @@
numpy_available,
pandas as pd,
pandas_available,
+ scipy_available,
)
+
+from pyomo.common.errors import DeveloperError
from pyomo.common.fileutils import this_file_dir
import pyomo.common.unittest as unittest
-from pyomo.contrib.doe import DesignOfExperiments
-from pyomo.contrib.doe.tests.experiment_class_example_flags import (
- BadExperiment,
- FullReactorExperiment,
-)
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest("Pyomo.DoE needs scipy and numpy to run tests")
+
+if scipy_available:
+ from pyomo.contrib.doe import DesignOfExperiments
+ from pyomo.contrib.doe.tests.experiment_class_example_flags import (
+ BadExperiment,
+ FullReactorExperiment,
+ )
from pyomo.opt import SolverFactory
@@ -50,7 +57,13 @@ def get_standard_args(experiment, fd_method, obj_used, flag):
args['jac_initial'] = None
args['fim_initial'] = None
args['L_diagonal_lower_bound'] = 1e-7
- args['solver'] = None
+ # Make solver object with
+ # good linear subroutines
+ solver = SolverFactory("ipopt")
+ solver.options["linear_solver"] = "ma57"
+ solver.options["halt_on_ampl_error"] = "yes"
+ solver.options["max_iter"] = 3000
+ args['solver'] = solver
args['tee'] = False
args['get_labeled_model_args'] = {"flag": flag}
args['_Cholesky_option'] = True
@@ -59,7 +72,21 @@ def get_standard_args(experiment, fd_method, obj_used, flag):
@unittest.skipIf(not numpy_available, "Numpy is not available")
+@unittest.skipIf(not scipy_available, "scipy is not available")
class TestReactorExampleErrors(unittest.TestCase):
+ def test_experiment_none_error(self):
+ fd_method = "central"
+ obj_used = "trace"
+ flag_val = 1 # Value for faulty model build mode - 1: No exp outputs
+
+ with self.assertRaisesRegex(
+ ValueError, "Experiment object must be provided to perform DoE."
+ ):
+ # Experiment provided as None
+ DoE_args = get_standard_args(None, fd_method, obj_used, flag_val)
+
+ doe_obj = DesignOfExperiments(**DoE_args)
+
def test_reactor_check_no_get_labeled_model(self):
fd_method = "central"
obj_used = "trace"
@@ -159,7 +186,8 @@ def test_reactor_check_bad_prior_size(self):
with self.assertRaisesRegex(
ValueError,
- "Shape of FIM provided should be n parameters by n parameters, or {} by {}, FIM provided has shape {} by {}".format(
+ "Shape of FIM provided should be n parameters by n parameters, "
+ "or {} by {}, FIM provided has shape {} by {}".format(
4, 4, prior_FIM.shape[0], prior_FIM.shape[1]
),
):
@@ -183,7 +211,8 @@ def test_reactor_check_bad_prior_negative_eigenvalue(self):
with self.assertRaisesRegex(
ValueError,
- r"FIM provided is not positive definite. It has one or more negative eigenvalue\(s\) less than -{:.1e}".format(
+ r"FIM provided is not positive definite. It has one or more "
+ r"negative eigenvalue\(s\) less than -{:.1e}".format(
_SMALL_TOLERANCE_DEFINITENESS
),
):
@@ -230,9 +259,9 @@ def test_reactor_check_bad_jacobian_init_size(self):
with self.assertRaisesRegex(
ValueError,
- "Shape of Jacobian provided should be n experiment outputs by n parameters, or {} by {}, Jacobian provided has shape {} by {}".format(
- 27, 4, jac_init.shape[0], jac_init.shape[1]
- ),
+ "Shape of Jacobian provided should be n experiment outputs "
+ "by n parameters, or {} by {}, Jacobian provided has "
+ "shape {} by {}".format(27, 4, jac_init.shape[0], jac_init.shape[1]),
):
doe_obj.create_doe_model()
@@ -251,7 +280,8 @@ def test_reactor_check_unbuilt_update_FIM(self):
with self.assertRaisesRegex(
RuntimeError,
- "``fim`` is not defined on the model provided. Please build the model first.",
+ "``fim`` is not defined on the model provided. "
+ "Please build the model first.",
):
doe_obj.update_FIM_prior(FIM=FIM_update)
@@ -305,9 +335,8 @@ def test_reactor_check_measurement_and_output_length_match(self):
with self.assertRaisesRegex(
ValueError,
- "Number of experiment outputs, {}, and length of measurement error, {}, do not match. Please check model labeling.".format(
- 27, 1
- ),
+ "Number of experiment outputs, {}, and length of measurement error, "
+ "{}, do not match. Please check model labeling.".format(27, 1),
):
doe_obj.create_doe_model()
@@ -347,7 +376,8 @@ def test_reactor_premature_figure_drawing(self):
with self.assertRaisesRegex(
RuntimeError,
- "Results must be provided or the compute_FIM_full_factorial function must be run.",
+ "Results must be provided "
+ "or the compute_FIM_full_factorial function must be run.",
):
doe_obj.draw_factorial_figure()
@@ -371,7 +401,8 @@ def test_reactor_figure_drawing_no_des_var_names(self):
with self.assertRaisesRegex(
ValueError,
- "If results object is provided, you must include all the design variable names.",
+ "If results object is provided, you must "
+ "include all the design variable names.",
):
doe_obj.draw_factorial_figure(results=doe_obj.fim_factorial_results)
@@ -468,7 +499,8 @@ def test_reactor_figure_drawing_bad_sens_names(self):
with self.assertRaisesRegex(
ValueError,
- "Sensitivity design variables do not all appear in the results object keys.",
+ "Sensitivity design variables do not all appear "
+ "in the results object keys.",
):
doe_obj.draw_factorial_figure(
sensitivity_design_variables={"bad": "entry"},
@@ -490,7 +522,8 @@ def test_reactor_check_get_FIM_without_FIM(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have variable `fim`. Please make sure the model is built properly before calling `get_FIM`",
+ "Model provided does not have variable `fim`. Please make sure "
+ "the model is built properly before calling `get_FIM`",
):
doe_obj.get_FIM()
@@ -509,7 +542,9 @@ def test_reactor_check_get_sens_mat_without_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have variable `sensitivity_jacobian`. Please make sure the model is built properly before calling `get_sensitivity_matrix`",
+ "Model provided does not have variable `sensitivity_jacobian`. "
+ "Please make sure the model is built properly before calling "
+ "`get_sensitivity_matrix`",
):
doe_obj.get_sensitivity_matrix()
@@ -528,7 +563,9 @@ def test_reactor_check_get_exp_inputs_without_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_experiment_input_values`",
+ "Model provided does not have expected structure. "
+ "Please make sure model is built properly before "
+ "calling `get_experiment_input_values`",
):
doe_obj.get_experiment_input_values()
@@ -547,7 +584,9 @@ def test_reactor_check_get_exp_outputs_without_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_experiment_output_values`",
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_experiment_output_values`",
):
doe_obj.get_experiment_output_values()
@@ -566,7 +605,9 @@ def test_reactor_check_get_unknown_params_without_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_unknown_parameter_values`",
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_unknown_parameter_values`",
):
doe_obj.get_unknown_parameter_values()
@@ -585,7 +626,9 @@ def test_reactor_check_get_meas_error_without_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have expected structure. Please make sure model is built properly before calling `get_measurement_error_values`",
+ "Model provided does not have expected structure. Please make "
+ "sure model is built properly before calling "
+ "`get_measurement_error_values`",
):
doe_obj.get_measurement_error_values()
@@ -658,8 +701,14 @@ def test_bad_FD_generate_scens(self):
doe_obj = DesignOfExperiments(**DoE_args)
with self.assertRaisesRegex(
- AttributeError,
- "Finite difference option not recognized. Please contact the developers as you should not see this error.",
+ DeveloperError,
+ "Internal Pyomo implementation error:\n"
+ " "
+ "'Finite difference option not recognized. Please contact the\n"
+ " "
+ "developers as you should not see this error.'\n"
+ " "
+ "Please report this to the Pyomo Developers.",
):
doe_obj.fd_formula = "bad things"
doe_obj._generate_scenario_blocks()
@@ -679,8 +728,14 @@ def test_bad_FD_seq_compute_FIM(self):
doe_obj = DesignOfExperiments(**DoE_args)
with self.assertRaisesRegex(
- AttributeError,
- "Finite difference option not recognized. Please contact the developers as you should not see this error.",
+ DeveloperError,
+ "Internal Pyomo implementation error:\n"
+ " "
+ "'Finite difference option not recognized. Please contact the\n"
+ " "
+ "developers as you should not see this error.'\n"
+ " "
+ "Please report this to the Pyomo Developers.",
):
doe_obj.fd_formula = "bad things"
doe_obj.compute_FIM(method="sequential")
@@ -699,8 +754,14 @@ def test_bad_objective(self):
doe_obj = DesignOfExperiments(**DoE_args)
with self.assertRaisesRegex(
- AttributeError,
- "Objective option not recognized. Please contact the developers as you should not see this error.",
+ DeveloperError,
+ "Internal Pyomo implementation error:\n"
+ " "
+ "'Objective option not recognized. Please contact the developers as\n"
+ " "
+ "you should not see this error.'\n"
+ " "
+ "Please report this to the Pyomo Developers.",
):
doe_obj.objective_option = "bad things"
doe_obj.create_objective_function()
@@ -720,7 +781,8 @@ def test_no_model_for_objective(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model provided does not have variable `fim`. Please make sure the model is built properly before creating the objective.",
+ "Model provided does not have variable `fim`. Please make "
+ "sure the model is built properly before creating the objective.",
):
doe_obj.create_objective_function()
diff --git a/pyomo/contrib/doe/tests/test_doe_solve.py b/pyomo/contrib/doe/tests/test_doe_solve.py
index ea66093253b..e10d6741c38 100644
--- a/pyomo/contrib/doe/tests/test_doe_solve.py
+++ b/pyomo/contrib/doe/tests/test_doe_solve.py
@@ -19,18 +19,24 @@
pandas_available,
scipy_available,
)
+
+
from pyomo.common.fileutils import this_file_dir
import pyomo.common.unittest as unittest
-from pyomo.contrib.doe import DesignOfExperiments
-from pyomo.contrib.doe.examples.reactor_experiment import ReactorExperiment
-from pyomo.contrib.doe.examples.reactor_example import (
- ReactorExperiment as FullReactorExperiment,
- run_reactor_doe,
-)
-from pyomo.contrib.doe.tests.experiment_class_example_flags import (
- FullReactorExperimentBad,
-)
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest("Pyomo.DoE needs scipy and numpy to run tests")
+
+if scipy_available:
+ from pyomo.contrib.doe import DesignOfExperiments
+ from pyomo.contrib.doe.examples.reactor_experiment import ReactorExperiment
+ from pyomo.contrib.doe.examples.reactor_example import (
+ ReactorExperiment as FullReactorExperiment,
+ run_reactor_doe,
+ )
+ from pyomo.contrib.doe.tests.experiment_class_example_flags import (
+ FullReactorExperimentBad,
+ )
from pyomo.contrib.doe.utils import rescale_FIM
import pyomo.environ as pyo
@@ -102,26 +108,33 @@ def get_FIM_Q_L(doe_obj=None):
def get_standard_args(experiment, fd_method, obj_used):
args = {}
- args["experiment"] = experiment
- args["fd_formula"] = fd_method
- args["step"] = 1e-3
- args["objective_option"] = obj_used
- args["scale_constant_value"] = 1
- args["scale_nominal_param_value"] = True
- args["prior_FIM"] = None
- args["jac_initial"] = None
- args["fim_initial"] = None
- args["L_diagonal_lower_bound"] = 1e-7
- args["solver"] = None
- args["tee"] = False
- args["get_labeled_model_args"] = None
- args["_Cholesky_option"] = True
- args["_only_compute_fim_lower"] = True
+ args['experiment'] = experiment
+ args['fd_formula'] = fd_method
+ args['step'] = 1e-3
+ args['objective_option'] = obj_used
+ args['scale_constant_value'] = 1
+ args['scale_nominal_param_value'] = True
+ args['prior_FIM'] = None
+ args['jac_initial'] = None
+ args['fim_initial'] = None
+ args['L_diagonal_lower_bound'] = 1e-7
+ # Make solver object with
+ # good linear subroutines
+ solver = SolverFactory("ipopt")
+ solver.options["linear_solver"] = "ma57"
+ solver.options["halt_on_ampl_error"] = "yes"
+ solver.options["max_iter"] = 3000
+ args['solver'] = solver
+ args['tee'] = False
+ args['get_labeled_model_args'] = None
+ args['_Cholesky_option'] = True
+ args['_only_compute_fim_lower'] = True
return args
@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
@unittest.skipIf(not numpy_available, "Numpy is not available")
+@unittest.skipIf(not scipy_available, "scipy is not available")
class TestReactorExampleSolving(unittest.TestCase):
def test_reactor_fd_central_solve(self):
fd_method = "central"
@@ -205,6 +218,10 @@ def test_reactor_obj_det_solve(self):
doe_obj = DesignOfExperiments(**DoE_args)
+ # Increase numerical performance by adding a prior
+ prior_FIM = doe_obj.compute_FIM()
+ doe_obj.prior_FIM = prior_FIM
+
doe_obj.run_doe()
self.assertEqual(doe_obj.results["Solver Status"], "ok")
@@ -233,7 +250,6 @@ def test_reactor_obj_cholesky_solve(self):
self.assertTrue(np.all(np.isclose(FIM, Q.T @ sigma_inv @ Q)))
def test_reactor_obj_cholesky_solve_bad_prior(self):
-
from pyomo.contrib.doe.doe import _SMALL_TOLERANCE_DEFINITENESS
fd_method = "central"
@@ -244,7 +260,8 @@ def test_reactor_obj_cholesky_solve_bad_prior(self):
DoE_args = get_standard_args(experiment, fd_method, obj_used)
# Specify a prior that is slightly negative definite
- # Because it is less than the tolerance, it should be adjusted to be positive definite
+ # Because it is less than the tolerance, it should be
+ # adjusted to be positive definite
# No error should be thrown
DoE_args["prior_FIM"] = -(_SMALL_TOLERANCE_DEFINITENESS / 100) * np.eye(4)
@@ -341,7 +358,8 @@ def test_reactor_grid_search(self):
design_ranges=design_ranges, method="sequential"
)
- # Check to make sure the lengths of the inputs in results object are indeed correct
+ # Check to make sure the lengths of the inputs
+ # in results object are indeed correct
CA_vals = doe_obj.fim_factorial_results["CA[0]"]
T_vals = doe_obj.fim_factorial_results["T[0]"]
@@ -408,7 +426,8 @@ def test_reactor_solve_bad_model(self):
with self.assertRaisesRegex(
RuntimeError,
- "Model from experiment did not solve appropriately. Make sure the model is well-posed.",
+ "Model from experiment did not solve appropriately. "
+ "Make sure the model is well-posed.",
):
doe_obj.run_doe()
diff --git a/pyomo/contrib/doe/tests/test_greybox.py b/pyomo/contrib/doe/tests/test_greybox.py
new file mode 100644
index 00000000000..f2dded77b33
--- /dev/null
+++ b/pyomo/contrib/doe/tests/test_greybox.py
@@ -0,0 +1,1154 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright (c) 2008-2025
+# National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import copy
+import itertools
+import json
+import os.path
+
+from pyomo.common.dependencies import (
+ numpy as np,
+ numpy_available,
+ pandas as pd,
+ pandas_available,
+ scipy_available,
+)
+
+from pyomo.common.fileutils import this_file_dir
+import pyomo.common.unittest as unittest
+
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest("Pyomo.DoE needs scipy and numpy to run tests")
+
+if scipy_available:
+ from pyomo.contrib.doe import DesignOfExperiments, FIMExternalGreyBox
+ from pyomo.contrib.doe.examples.reactor_example import (
+ ReactorExperiment as FullReactorExperiment,
+ )
+ from pyomo.contrib.doe.examples.rooney_biegler_example import (
+ RooneyBieglerExperimentDoE,
+ )
+
+import pyomo.environ as pyo
+
+from pyomo.opt import SolverFactory
+
+ipopt_available = SolverFactory("ipopt").available()
+cyipopt_available = SolverFactory("cyipopt").available()
+
+currdir = this_file_dir()
+file_path = os.path.join(currdir, "..", "examples", "result.json")
+
+with open(file_path) as f:
+ data_ex = json.load(f)
+
+data_ex["control_points"] = {float(k): v for k, v in data_ex["control_points"].items()}
+
+_FD_EPSILON_FIRST = 1e-6 # Epsilon for numerical comparison of derivatives
+_FD_EPSILON_SECOND = 1e-4 # Epsilon for numerical comparison of derivatives
+
+if numpy_available:
+ # Randomly generated P.S.D. matrix
+ # Matrix is 4x4 to match example
+ # number of parameters.
+ testing_matrix = np.array(
+ [
+ [5.13730123, 1.08084953, 1.6466824, 1.09943223],
+ [1.08084953, 1.57183404, 1.50704403, 1.4969689],
+ [1.6466824, 1.50704403, 2.54754738, 1.39902838],
+ [1.09943223, 1.4969689, 1.39902838, 1.57406692],
+ ]
+ )
+
+ masking_matrix = np.triu(np.ones_like(testing_matrix))
+
+
+def get_numerical_derivative(grey_box_object=None):
+ # Internal import to avoid circular imports
+ from pyomo.contrib.doe import ObjectiveLib
+
+ # Grab current FIM value
+ current_FIM = grey_box_object._get_FIM()
+ dim = current_FIM.shape[0]
+ unperturbed_value = 0
+
+ # Find the initial value of the function
+ if grey_box_object.objective_option == ObjectiveLib.trace:
+ unperturbed_value = np.trace(np.linalg.inv(current_FIM))
+ elif grey_box_object.objective_option == ObjectiveLib.determinant:
+ unperturbed_value = np.log(np.linalg.det(current_FIM))
+ elif grey_box_object.objective_option == ObjectiveLib.minimum_eigenvalue:
+ vals_init, vecs_init = np.linalg.eig(current_FIM)
+ unperturbed_value = np.min(vals_init)
+ elif grey_box_object.objective_option == ObjectiveLib.condition_number:
+ unperturbed_value = np.linalg.cond(current_FIM)
+
+ # Calculate the numerical derivative, using forward difference
+ numerical_derivative = np.zeros_like(current_FIM)
+
+ # perturb each direction
+ for i in range(dim):
+ for j in range(dim):
+ FIM_perturbed = copy.deepcopy(current_FIM)
+ FIM_perturbed[i, j] += _FD_EPSILON_FIRST
+
+ new_value_ij = 0
+ # Test which method is being used:
+ if grey_box_object.objective_option == ObjectiveLib.trace:
+ new_value_ij = np.trace(np.linalg.inv(FIM_perturbed))
+ elif grey_box_object.objective_option == ObjectiveLib.determinant:
+ new_value_ij = np.log(np.linalg.det(FIM_perturbed))
+ elif grey_box_object.objective_option == ObjectiveLib.minimum_eigenvalue:
+ vals, vecs = np.linalg.eig(FIM_perturbed)
+ new_value_ij = np.min(vals)
+ elif grey_box_object.objective_option == ObjectiveLib.condition_number:
+ new_value_ij = np.linalg.cond(FIM_perturbed)
+
+ # Calculate the derivative value from forward difference
+ diff = (new_value_ij - unperturbed_value) / _FD_EPSILON_FIRST
+
+ numerical_derivative[i, j] = diff
+
+ return numerical_derivative
+
+
+def get_numerical_second_derivative(grey_box_object=None, return_reduced=True):
+ # Internal import to avoid circular imports
+ from pyomo.contrib.doe import ObjectiveLib
+
+ # Grab current FIM value
+ current_FIM = grey_box_object._get_FIM()
+ dim = current_FIM.shape[0]
+
+ # Calculate the numerical derivative,
+ # using second order formula
+ numerical_derivative = np.zeros([dim, dim, dim, dim])
+
+ # perturb each direction
+ for i in range(dim):
+ for j in range(dim):
+ for k in range(dim):
+ for l in range(dim):
+ FIM_perturbed_1 = copy.deepcopy(current_FIM)
+ FIM_perturbed_2 = copy.deepcopy(current_FIM)
+ FIM_perturbed_3 = copy.deepcopy(current_FIM)
+ FIM_perturbed_4 = copy.deepcopy(current_FIM)
+
+ # Need 4 perturbations to cover the
+ # formula H[i, j] = [(FIM + eps (both))
+ # + (FIM +/- eps one each)
+ # + (FIM -/+ eps one each)
+ # + (FIM - eps (both))] / (4*eps**2)
+ FIM_perturbed_1[i, j] += _FD_EPSILON_SECOND
+ FIM_perturbed_1[k, l] += _FD_EPSILON_SECOND
+
+ FIM_perturbed_2[i, j] += _FD_EPSILON_SECOND
+ FIM_perturbed_2[k, l] += -_FD_EPSILON_SECOND
+
+ FIM_perturbed_3[i, j] += -_FD_EPSILON_SECOND
+ FIM_perturbed_3[k, l] += _FD_EPSILON_SECOND
+
+ FIM_perturbed_4[i, j] += -_FD_EPSILON_SECOND
+ FIM_perturbed_4[k, l] += -_FD_EPSILON_SECOND
+
+ new_values = np.array([0.0, 0.0, 0.0, 0.0])
+ # Test which method is being used:
+ if grey_box_object.objective_option == ObjectiveLib.trace:
+ new_values[0] = np.trace(np.linalg.inv(FIM_perturbed_1))
+ new_values[1] = np.trace(np.linalg.inv(FIM_perturbed_2))
+ new_values[2] = np.trace(np.linalg.inv(FIM_perturbed_3))
+ new_values[3] = np.trace(np.linalg.inv(FIM_perturbed_4))
+ elif grey_box_object.objective_option == ObjectiveLib.determinant:
+ new_values[0] = np.log(np.linalg.det(FIM_perturbed_1))
+ new_values[1] = np.log(np.linalg.det(FIM_perturbed_2))
+ new_values[2] = np.log(np.linalg.det(FIM_perturbed_3))
+ new_values[3] = np.log(np.linalg.det(FIM_perturbed_4))
+ elif (
+ grey_box_object.objective_option
+ == ObjectiveLib.minimum_eigenvalue
+ ):
+ vals, vecs = np.linalg.eig(FIM_perturbed_1)
+ new_values[0] = np.min(vals)
+ vals, vecs = np.linalg.eig(FIM_perturbed_2)
+ new_values[1] = np.min(vals)
+ vals, vecs = np.linalg.eig(FIM_perturbed_3)
+ new_values[2] = np.min(vals)
+ vals, vecs = np.linalg.eig(FIM_perturbed_4)
+ new_values[3] = np.min(vals)
+ elif (
+ grey_box_object.objective_option
+ == ObjectiveLib.condition_number
+ ):
+ new_values[0] = np.linalg.cond(FIM_perturbed_1)
+ new_values[1] = np.linalg.cond(FIM_perturbed_2)
+ new_values[2] = np.linalg.cond(FIM_perturbed_3)
+ new_values[3] = np.linalg.cond(FIM_perturbed_4)
+
+ # Calculate the derivative value from second order difference formula
+ diff = (
+ new_values[0] - new_values[1] - new_values[2] + new_values[3]
+ ) / (4 * _FD_EPSILON_SECOND**2)
+
+ numerical_derivative[i, j, k, l] = diff
+
+ if return_reduced:
+ # This considers a 4-parameter system
+ # which is what these tests are based
+ # upon. This can be generalized but
+ # requires checking the parameter length.
+ #
+ # Make ordered quads with no repeats
+ # of the ordered pairs
+ ordered_pairs = itertools.combinations_with_replacement(range(4), 2)
+ ordered_pairs_list = list(itertools.combinations_with_replacement(range(4), 2))
+ ordered_quads = itertools.combinations_with_replacement(ordered_pairs, 2)
+
+ numerical_derivative_reduced = np.zeros((10, 10))
+
+ for i in ordered_quads:
+ row = ordered_pairs_list.index(i[0])
+ col = ordered_pairs_list.index(i[1])
+ numerical_derivative_reduced[row, col] = numerical_derivative[
+ i[0][0], i[0][1], i[1][0], i[1][1]
+ ]
+
+ numerical_derivative_reduced += (
+ numerical_derivative_reduced.transpose()
+ - np.diag(np.diag(numerical_derivative_reduced))
+ )
+ return numerical_derivative_reduced
+
+ # Otherwise return numerical derivative as normal
+ return numerical_derivative
+
+
+def get_standard_args(experiment, fd_method, obj_used):
+ args = {}
+ args['experiment'] = experiment
+ args['fd_formula'] = fd_method
+ args['step'] = 1e-3
+ args['objective_option'] = obj_used
+ args['scale_constant_value'] = 1
+ args['scale_nominal_param_value'] = True
+ args['prior_FIM'] = None
+ args['jac_initial'] = None
+ args['fim_initial'] = None
+ args['L_diagonal_lower_bound'] = 1e-7
+ solver = SolverFactory("ipopt")
+ args['solver'] = solver
+ # Make solver object with
+ # good linear subroutines
+ solver = SolverFactory("ipopt")
+ solver.options["linear_solver"] = "ma57"
+ solver.options["halt_on_ampl_error"] = "yes"
+ solver.options["max_iter"] = 3000
+ args['solver'] = solver
+ # Make greybox solver object with
+ # good linear subroutines
+ grey_box_solver = SolverFactory("cyipopt")
+ grey_box_solver.config.options["linear_solver"] = "ma57"
+ grey_box_solver.config.options['tol'] = 1e-4
+ grey_box_solver.config.options['mu_strategy'] = "monotone"
+ args['grey_box_solver'] = grey_box_solver
+ args['tee'] = False
+ args['grey_box_tee'] = True
+ args['get_labeled_model_args'] = None
+ args['_Cholesky_option'] = True
+ args['_only_compute_fim_lower'] = True
+ return args
+
+
+def make_greybox_and_doe_objects(objective_option):
+ fd_method = "central"
+ obj_used = objective_option
+
+ experiment = FullReactorExperiment(data_ex, 10, 3)
+
+ DoE_args = get_standard_args(experiment, fd_method, obj_used)
+ DoE_args["use_grey_box_objective"] = True
+ DoE_args["prior_FIM"] = testing_matrix
+
+ doe_obj = DesignOfExperiments(**DoE_args)
+ doe_obj.create_doe_model()
+
+ grey_box_object = FIMExternalGreyBox(
+ doe_object=doe_obj, objective_option=doe_obj.objective_option
+ )
+
+ return doe_obj, grey_box_object
+
+
+def make_greybox_and_doe_objects_rooney_biegler(objective_option):
+ fd_method = "central"
+ obj_used = objective_option
+
+ experiment = RooneyBieglerExperimentDoE(data={'hour': 2, 'y': 10.3})
+
+ DoE_args = get_standard_args(experiment, fd_method, obj_used)
+ DoE_args["use_grey_box_objective"] = True
+
+ # Make a custom grey box solver
+ grey_box_solver = SolverFactory("cyipopt")
+ grey_box_solver.config.options["linear_solver"] = "ma57"
+ grey_box_solver.config.options['tol'] = 1e-4
+ grey_box_solver.config.options['mu_strategy'] = "monotone"
+
+ # Add the grey box solver to DoE_args
+ DoE_args["grey_box_solver"] = grey_box_solver
+
+ data = [[1, 8.3], [7, 19.8]]
+ FIM_prior = np.zeros((2, 2))
+ # Calculate prior using existing experiments
+ for i in range(len(data)):
+ prev_experiment = RooneyBieglerExperimentDoE(
+ data={'hour': data[i][0], 'y': data[i][1]}
+ )
+ doe_obj = DesignOfExperiments(
+ **get_standard_args(prev_experiment, fd_method, obj_used)
+ )
+
+ FIM_prior += doe_obj.compute_FIM(method='sequential')
+ DoE_args["prior_FIM"] = FIM_prior
+
+ doe_obj = DesignOfExperiments(**DoE_args)
+ doe_obj.create_doe_model()
+
+ grey_box_object = FIMExternalGreyBox(
+ doe_object=doe_obj, objective_option=doe_obj.objective_option
+ )
+
+ return doe_obj, grey_box_object
+
+
+# Test whether or not cyipopt
+# is appropriately calling the
+# linear solvers.
+bad_message = "Invalid option encountered."
+cyipopt_call_working = True
+if numpy_available and scipy_available and ipopt_available and cyipopt_available:
+ try:
+ objective_option = "determinant"
+ doe_object, _ = make_greybox_and_doe_objects_rooney_biegler(
+ objective_option=objective_option
+ )
+
+ # Use the grey box objective
+ doe_object.use_grey_box = True
+
+ # Run for 1 iteration to see if
+ # cyipopt was called.
+ doe_object.grey_box_solver.config.options["max_iter"] = 1
+
+ doe_object.run_doe()
+
+ cyipopt_call_working = not (
+ bad_message in doe_object.results["Termination Message"]
+ )
+ except:
+ cyipopt_call_working = False
+
+
+@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
+@unittest.skipIf(not numpy_available, "Numpy is not available")
+@unittest.skipIf(not scipy_available, "scipy is not available")
+@unittest.skipIf(not cyipopt_available, "'cyipopt' is not available")
+class TestFIMExternalGreyBox(unittest.TestCase):
+ # Test that we can properly
+ # set the inputs for the
+ # Grey Box object
+ def test_set_inputs(self):
+ objective_option = "trace"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the values from get_FIM
+ grey_box_FIM = grey_box_object._get_FIM()
+
+ self.assertTrue(np.all(np.isclose(grey_box_FIM, testing_matrix)))
+
+ # Testing that getting the
+ # input names works properly
+ def test_input_names(self):
+ objective_option = "trace"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Hard-coded names of the inputs, in the order we expect
+ input_names = [
+ ('A1', 'A1'),
+ ('A1', 'A2'),
+ ('A1', 'E1'),
+ ('A1', 'E2'),
+ ('A2', 'A2'),
+ ('A2', 'E1'),
+ ('A2', 'E2'),
+ ('E1', 'E1'),
+ ('E1', 'E2'),
+ ('E2', 'E2'),
+ ]
+
+ # Grabbing input names from grey box object
+ input_names_gb = grey_box_object.input_names()
+
+ self.assertListEqual(input_names, input_names_gb)
+
+ # Testing that getting the
+ # output names works properly
+ def test_output_names(self):
+ # Need to test for each objective type
+ # A-opt
+ objective_option = "trace"
+ doe_obj_A, grey_box_object_A = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # D-opt
+ objective_option = "determinant"
+ doe_obj_D, grey_box_object_D = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # E-opt
+ objective_option = "minimum_eigenvalue"
+ doe_obj_E, grey_box_object_E = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # ME-opt
+ objective_option = "condition_number"
+ doe_obj_ME, grey_box_object_ME = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Hard-coded names of the outputs
+ # There is one element per
+ # objective type
+ output_names = ['A-opt', 'log-D-opt', 'E-opt', 'ME-opt']
+
+ # Grabbing input names from grey box object
+ output_names_gb = []
+ output_names_gb.extend(grey_box_object_A.output_names())
+ output_names_gb.extend(grey_box_object_D.output_names())
+ output_names_gb.extend(grey_box_object_E.output_names())
+ output_names_gb.extend(grey_box_object_ME.output_names())
+
+ self.assertListEqual(output_names, output_names_gb)
+
+ # Testing output computation
+ def test_outputs_A_opt(self):
+ objective_option = "trace"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ grey_box_A_opt = grey_box_object.evaluate_outputs()
+
+ A_opt = np.trace(np.linalg.inv(testing_matrix))
+
+ self.assertTrue(np.isclose(grey_box_A_opt, A_opt))
+
+ def test_outputs_D_opt(self):
+ objective_option = "determinant"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ grey_box_D_opt = grey_box_object.evaluate_outputs()
+
+ D_opt = np.log(np.linalg.det(testing_matrix))
+
+ self.assertTrue(np.isclose(grey_box_D_opt, D_opt))
+
+ def test_outputs_E_opt(self):
+ objective_option = "minimum_eigenvalue"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ grey_box_E_opt = grey_box_object.evaluate_outputs()
+
+ vals, vecs = np.linalg.eig(testing_matrix)
+ E_opt = np.min(vals)
+
+ self.assertTrue(np.isclose(grey_box_E_opt, E_opt))
+
+ def test_outputs_ME_opt(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ grey_box_ME_opt = grey_box_object.evaluate_outputs()
+
+ ME_opt = np.linalg.cond(testing_matrix)
+
+ self.assertTrue(np.isclose(grey_box_ME_opt, ME_opt))
+
+ # Testing Jacobian computation
+ def test_jacobian_A_opt(self):
+ objective_option = "trace"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ utri_vals_jac = grey_box_object.evaluate_jacobian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ jac = np.zeros_like(grey_box_object._get_FIM())
+ jac[np.triu_indices_from(jac)] = utri_vals_jac
+ jac += jac.transpose() - np.diag(np.diag(jac))
+
+ # Get numerical derivative matrix
+ jac_FD = get_numerical_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(jac, jac_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_jacobian_D_opt(self):
+ objective_option = "determinant"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ utri_vals_jac = grey_box_object.evaluate_jacobian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ jac = np.zeros_like(grey_box_object._get_FIM())
+ jac[np.triu_indices_from(jac)] = utri_vals_jac
+ jac += jac.transpose() - np.diag(np.diag(jac))
+
+ # Get numerical derivative matrix
+ jac_FD = get_numerical_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(jac, jac_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_jacobian_E_opt(self):
+ objective_option = "minimum_eigenvalue"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ utri_vals_jac = grey_box_object.evaluate_jacobian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ jac = np.zeros_like(grey_box_object._get_FIM())
+ jac[np.triu_indices_from(jac)] = utri_vals_jac
+ jac += jac.transpose() - np.diag(np.diag(jac))
+
+ # Get numerical derivative matrix
+ jac_FD = get_numerical_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(jac, jac_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_jacobian_ME_opt(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ utri_vals_jac = grey_box_object.evaluate_jacobian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ jac = np.zeros_like(grey_box_object._get_FIM())
+ jac[np.triu_indices_from(jac)] = utri_vals_jac
+ jac += jac.transpose() - np.diag(np.diag(jac))
+
+ # Get numerical derivative matrix
+ jac_FD = get_numerical_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(jac, jac_FD, rtol=1e-4, atol=1e-4)))
+
+ # Testing Hessian Computation
+ def test_hessian_A_opt(self):
+ objective_option = "trace"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ hess_vals_from_gb = grey_box_object.evaluate_hessian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ hess_gb = hess_vals_from_gb
+ hess_gb += hess_gb.transpose() - np.diag(np.diag(hess_gb))
+
+ # Get numerical derivative matrix
+ hess_FD = get_numerical_second_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(hess_gb, hess_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_hessian_D_opt(self):
+ objective_option = "determinant"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ hess_vals_from_gb = grey_box_object.evaluate_hessian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ hess_gb = hess_vals_from_gb
+ hess_gb += hess_gb.transpose() - np.diag(np.diag(hess_gb))
+
+ # Get numerical derivative matrix
+ hess_FD = get_numerical_second_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(hess_gb, hess_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_hessian_E_opt(self):
+ objective_option = "minimum_eigenvalue"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Set input values to the random testing matrix
+ grey_box_object.set_input_values(testing_matrix[masking_matrix > 0])
+
+ # Grab the Jacobian values
+ hess_vals_from_gb = grey_box_object.evaluate_hessian_outputs().toarray()
+
+ # Recover the Jacobian in Matrix Form
+ hess_gb = hess_vals_from_gb
+ hess_gb += hess_gb.transpose() - np.diag(np.diag(hess_gb))
+
+ # Get numerical derivative matrix
+ hess_FD = get_numerical_second_derivative(grey_box_object)
+
+ # assert that each component is close
+ self.assertTrue(np.all(np.isclose(hess_gb, hess_FD, rtol=1e-4, atol=1e-4)))
+
+ def test_equality_constraint_names(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Grab equality constraint names
+ eq_con_names_gb = grey_box_object.equality_constraint_names()
+
+ # Equality constraint names should be an
+ # empty list.
+ self.assertListEqual(eq_con_names_gb, [])
+
+ def test_evaluate_equality_constraints(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Grab equality constraint names
+ eq_con_vals_gb = grey_box_object.evaluate_equality_constraints()
+
+ # Equality constraint values should be `None`
+ # There are no equality constraints.
+ self.assertIsNone(eq_con_vals_gb)
+
+ def test_evaluate_jacobian_equality_constraints(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Grab equality constraint names
+ jac_eq_con_vals_gb = grey_box_object.evaluate_jacobian_equality_constraints()
+
+ # Jacobian of equality constraints
+ # should be `None` as there are no
+ # equality constraints
+ self.assertIsNone(jac_eq_con_vals_gb)
+
+ def test_evaluate_hessian_equality_constraints(self):
+ objective_option = "condition_number"
+ doe_obj, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ # Grab equality constraint names
+ hess_eq_con_vals_gb = grey_box_object.evaluate_hessian_equality_constraints()
+
+ # Jacobian of equality constraints
+ # should be `None` as there are no
+ # equality constraints
+ self.assertIsNone(hess_eq_con_vals_gb)
+
+ # The following few tests will test whether
+ # the DoE problem with grey box is built
+ # properly.
+ def test_A_opt_greybox_build(self):
+ objective_option = "trace"
+ doe_obj, _ = make_greybox_and_doe_objects(objective_option=objective_option)
+
+ # Build the greybox objective block
+ # on the DoE object
+ doe_obj.create_grey_box_objective_function()
+
+ # Check to see if each component exists
+ all_exist = True
+
+ # Check output and value
+ # FIM Initial will be the prior FIM
+ # added with the identity matrix.
+ A_opt_val = np.trace(np.linalg.inv(testing_matrix + np.eye(4)))
+
+ try:
+ A_opt_val_gb = doe_obj.model.obj_cons.egb_fim_block.outputs["A-opt"].value
+ except:
+ A_opt_val_gb = -10.0 # Trace should never be negative
+ all_exist = False
+
+ # Intermediate check for output existence
+ self.assertTrue(all_exist)
+ self.assertAlmostEqual(A_opt_val, A_opt_val_gb)
+
+ # Check inputs and values
+ try:
+ input_values = []
+ for i in _.input_names():
+ input_values.append(doe_obj.model.obj_cons.egb_fim_block.inputs[i]())
+ except:
+ input_values = np.zeros_like(testing_matrix)
+ all_exist = False
+
+ # Final check on existence of inputs
+ self.assertTrue(all_exist)
+ # Rebuild the current FIM from the input
+ # values taken from the egb_fim_block
+ current_FIM = np.zeros_like(testing_matrix)
+ current_FIM[np.triu_indices_from(current_FIM)] = input_values
+ current_FIM += current_FIM.transpose() - np.diag(np.diag(current_FIM))
+
+ self.assertTrue(np.all(np.isclose(current_FIM, testing_matrix + np.eye(4))))
+
+ def test_D_opt_greybox_build(self):
+ objective_option = "determinant"
+ doe_obj, _ = make_greybox_and_doe_objects(objective_option=objective_option)
+
+ # Build the greybox objective block
+ # on the DoE object
+ doe_obj.create_grey_box_objective_function()
+
+ # Check to see if each component exists
+ all_exist = True
+
+ # Check output and value
+ # FIM Initial will be the prior FIM
+ # added with the identity matrix.
+ D_opt_val = np.log(np.linalg.det(testing_matrix + np.eye(4)))
+
+ try:
+ D_opt_val_gb = doe_obj.model.obj_cons.egb_fim_block.outputs[
+ "log-D-opt"
+ ].value
+ except:
+ D_opt_val_gb = -100.0 # Determinant should never be negative beyond -64
+ all_exist = False
+
+ # Intermediate check for output existence
+ self.assertTrue(all_exist)
+ self.assertAlmostEqual(D_opt_val, D_opt_val_gb)
+
+ # Check inputs and values
+ try:
+ input_values = []
+ for i in _.input_names():
+ input_values.append(doe_obj.model.obj_cons.egb_fim_block.inputs[i]())
+ except:
+ input_values = np.zeros_like(testing_matrix)
+ all_exist = False
+
+ # Final check on existence of inputs
+ self.assertTrue(all_exist)
+ # Rebuild the current FIM from the input
+ # values taken from the egb_fim_block
+ current_FIM = np.zeros_like(testing_matrix)
+ current_FIM[np.triu_indices_from(current_FIM)] = input_values
+ current_FIM += current_FIM.transpose() - np.diag(np.diag(current_FIM))
+
+ self.assertTrue(np.all(np.isclose(current_FIM, testing_matrix + np.eye(4))))
+
+ def test_E_opt_greybox_build(self):
+ objective_option = "minimum_eigenvalue"
+ doe_obj, _ = make_greybox_and_doe_objects(objective_option=objective_option)
+
+ # Build the greybox objective block
+ # on the DoE object
+ doe_obj.create_grey_box_objective_function()
+
+ # Check to see if each component exists
+ all_exist = True
+
+ # Check output and value
+ # FIM Initial will be the prior FIM
+ # added with the identity matrix.
+ vals, vecs = np.linalg.eig(testing_matrix + np.eye(4))
+ E_opt_val = np.min(vals)
+
+ try:
+ E_opt_val_gb = doe_obj.model.obj_cons.egb_fim_block.outputs["E-opt"].value
+ except:
+ E_opt_val_gb = -10.0 # Determinant should never be negative
+ all_exist = False
+
+ # Intermediate check for output existence
+ self.assertTrue(all_exist)
+ self.assertAlmostEqual(E_opt_val, E_opt_val_gb)
+
+ # Check inputs and values
+ try:
+ input_values = []
+ for i in _.input_names():
+ input_values.append(doe_obj.model.obj_cons.egb_fim_block.inputs[i]())
+ except:
+ input_values = np.zeros_like(testing_matrix)
+ all_exist = False
+
+ # Final check on existence of inputs
+ self.assertTrue(all_exist)
+ # Rebuild the current FIM from the input
+ # values taken from the egb_fim_block
+ current_FIM = np.zeros_like(testing_matrix)
+ current_FIM[np.triu_indices_from(current_FIM)] = input_values
+ current_FIM += current_FIM.transpose() - np.diag(np.diag(current_FIM))
+
+ self.assertTrue(np.all(np.isclose(current_FIM, testing_matrix + np.eye(4))))
+
+ def test_ME_opt_greybox_build(self):
+ objective_option = "condition_number"
+ doe_obj, _ = make_greybox_and_doe_objects(objective_option=objective_option)
+
+ # Build the greybox objective block
+ # on the DoE object
+ doe_obj.create_grey_box_objective_function()
+
+ # Check to see if each component exists
+ all_exist = True
+
+ # Check output and value
+ # FIM Initial will be the prior FIM
+ # added with the identity matrix.
+ ME_opt_val = np.linalg.cond(testing_matrix + np.eye(4))
+
+ try:
+ ME_opt_val_gb = doe_obj.model.obj_cons.egb_fim_block.outputs["ME-opt"].value
+ except:
+ ME_opt_val_gb = -10.0 # Condition number should not be negative
+ all_exist = False
+
+ # Intermediate check for output existence
+ self.assertTrue(all_exist)
+ self.assertAlmostEqual(ME_opt_val, ME_opt_val_gb)
+
+ # Check inputs and values
+ try:
+ input_values = []
+ for i in _.input_names():
+ input_values.append(doe_obj.model.obj_cons.egb_fim_block.inputs[i]())
+ except:
+ input_values = np.zeros_like(testing_matrix)
+ all_exist = False
+
+ # Final check on existence of inputs
+ self.assertTrue(all_exist)
+ # Rebuild the current FIM from the input
+ # values taken from the egb_fim_block
+ current_FIM = np.zeros_like(testing_matrix)
+ current_FIM[np.triu_indices_from(current_FIM)] = input_values
+ current_FIM += current_FIM.transpose() - np.diag(np.diag(current_FIM))
+
+ self.assertTrue(np.all(np.isclose(current_FIM, testing_matrix + np.eye(4))))
+
+ # Testing all the error messages
+ def test_constructor_doe_object_error(self):
+ with self.assertRaisesRegex(
+ ValueError,
+ "DoE Object must be provided to build external grey box of the FIM.",
+ ):
+ grey_box_object = FIMExternalGreyBox(doe_object=None)
+
+ def test_constructor_objective_lib_error(self):
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+ with self.assertRaisesRegex(
+ ValueError, "'Bad Objective Option' is not a valid ObjectiveLib"
+ ):
+ bad_grey_box_object = FIMExternalGreyBox(
+ doe_object=doe_object, objective_option="Bad Objective Option"
+ )
+
+ def test_output_names_obj_lib_error(self):
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ grey_box_object.objective_option = "Bad Objective Option"
+
+ with self.assertRaisesRegex(
+ ValueError, "'Bad Objective Option' is not a valid ObjectiveLib"
+ ):
+ grey_box_object.output_names()
+
+ def test_evaluate_outputs_obj_lib_error(self):
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ grey_box_object.objective_option = "Bad Objective Option"
+
+ with self.assertRaisesRegex(
+ ValueError, "'Bad Objective Option' is not a valid ObjectiveLib"
+ ):
+ grey_box_object.evaluate_outputs()
+
+ def test_evaluate_jacobian_outputs_obj_lib_error(self):
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ grey_box_object.objective_option = "Bad Objective Option"
+
+ with self.assertRaisesRegex(
+ ValueError, "'Bad Objective Option' is not a valid ObjectiveLib"
+ ):
+ grey_box_object.evaluate_jacobian_outputs()
+
+ def test_evaluate_hessian_outputs_obj_lib_error(self):
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects(
+ objective_option=objective_option
+ )
+
+ grey_box_object.objective_option = "Bad Objective Option"
+
+ with self.assertRaisesRegex(
+ ValueError, "'Bad Objective Option' is not a valid ObjectiveLib"
+ ):
+ grey_box_object.evaluate_hessian_outputs()
+
+ # Test all versions of solving
+ # using grey box
+ @unittest.skipIf(
+ not cyipopt_call_working, "cyipopt is not properly accessing linear solvers"
+ )
+ def test_solve_D_optimality_log_determinant(self):
+ # Two locally optimal design points exist
+ # (time, optimal objective value)
+ # Here, the objective value is
+ # log-10(determinant) of the FIM
+ optimal_experimental_designs = [np.array([2.24, 4.33]), np.array([10.00, 4.35])]
+ objective_option = "determinant"
+ doe_object, grey_box_object = make_greybox_and_doe_objects_rooney_biegler(
+ objective_option=objective_option
+ )
+
+ # Set to use the grey box objective
+ doe_object.use_grey_box = True
+
+ # Solve the model
+ doe_object.run_doe()
+
+ print("Termination Message")
+ print(doe_object.results["Termination Message"])
+ print(cyipopt_call_working)
+ print(bad_message in doe_object.results["Termination Message"])
+ print("End Message")
+
+ optimal_time_val = doe_object.results["Experiment Design"][0]
+ optimal_obj_val = np.log10(np.exp(pyo.value(doe_object.model.objective)))
+
+ optimal_design_np_array = np.array([optimal_time_val, optimal_obj_val])
+
+ self.assertTrue(
+ np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[0], 1e-1
+ )
+ )
+ or np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[1], 1e-1
+ )
+ )
+ )
+
+ @unittest.skipIf(
+ not cyipopt_call_working, "cyipopt is not properly accessing linear solvers"
+ )
+ def test_solve_A_optimality_trace_of_inverse(self):
+ # Two locally optimal design points exist
+ # (time, optimal objective value)
+ # Here, the objective value is
+ # trace(inverse(FIM))
+ optimal_experimental_designs = [
+ np.array([1.94, 0.0295]),
+ np.array([9.9, 0.0366]),
+ ]
+ objective_option = "trace"
+ doe_object, grey_box_object = make_greybox_and_doe_objects_rooney_biegler(
+ objective_option=objective_option
+ )
+
+ # Set to use the grey box objective
+ doe_object.use_grey_box = True
+
+ # Solve the model
+ doe_object.run_doe()
+
+ optimal_time_val = doe_object.results["Experiment Design"][0]
+ optimal_obj_val = doe_object.model.objective()
+
+ optimal_design_np_array = np.array([optimal_time_val, optimal_obj_val])
+
+ self.assertTrue(
+ np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[0], 1e-1
+ )
+ )
+ or np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[1], 1e-1
+ )
+ )
+ )
+
+ @unittest.skipIf(
+ not cyipopt_call_working, "cyipopt is not properly accessing linear solvers"
+ )
+ def test_solve_E_optimality_minimum_eigenvalue(self):
+ # Two locally optimal design points exist
+ # (time, optimal objective value)
+ # Here, the objective value is
+ # minimum eigenvalue of the FIM
+ optimal_experimental_designs = [
+ np.array([1.92, 36.018]),
+ np.array([10.00, 28.349]),
+ ]
+ objective_option = "minimum_eigenvalue"
+ doe_object, grey_box_object = make_greybox_and_doe_objects_rooney_biegler(
+ objective_option=objective_option
+ )
+
+ # Set to use the grey box objective
+ doe_object.use_grey_box = True
+
+ # Solve the model
+ doe_object.run_doe()
+
+ optimal_time_val = doe_object.results["Experiment Design"][0]
+ optimal_obj_val = doe_object.model.objective()
+
+ optimal_design_np_array = np.array([optimal_time_val, optimal_obj_val])
+
+ self.assertTrue(
+ np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[0], 1e-1
+ )
+ )
+ or np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[1], 1e-1
+ )
+ )
+ )
+
+ @unittest.skipIf(
+ not cyipopt_call_working, "cyipopt is not properly accessing linear solvers"
+ )
+ def test_solve_ME_optimality_condition_number(self):
+ # Two locally optimal design points exist
+ # (time, optimal objective value)
+ # Here, the objective value is
+ # condition number of the FIM
+ optimal_experimental_designs = [
+ np.array([1.59, 15.22]),
+ np.array([10.00, 27.675]),
+ ]
+ objective_option = "condition_number"
+ doe_object, grey_box_object = make_greybox_and_doe_objects_rooney_biegler(
+ objective_option=objective_option
+ )
+
+ # Set to use the grey box objective
+ doe_object.use_grey_box = True
+
+ # Solve the model
+ doe_object.run_doe()
+
+ optimal_time_val = doe_object.results["Experiment Design"][0]
+ optimal_obj_val = doe_object.model.objective()
+
+ optimal_design_np_array = np.array([optimal_time_val, optimal_obj_val])
+
+ self.assertTrue(
+ np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[0], 1e-1
+ )
+ )
+ or np.all(
+ np.isclose(
+ optimal_design_np_array, optimal_experimental_designs[1], 1e-1
+ )
+ )
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/pyomo/contrib/doe/utils.py b/pyomo/contrib/doe/utils.py
index 26b991a753a..437bdcbdc43 100644
--- a/pyomo/contrib/doe/utils.py
+++ b/pyomo/contrib/doe/utils.py
@@ -92,39 +92,6 @@ def rescale_FIM(FIM, param_vals):
return scaled_FIM
-# TODO: Add swapping parameters for variables helper function
-# def get_parameters_from_suffix(suffix, fix_vars=False):
-# """
-# Finds the Params within the suffix provided. It will also check to see
-# if there are Vars in the suffix provided. ``fix_vars`` will indicate
-# if we should fix all the Vars in the set or not.
-#
-# Parameters
-# ----------
-# suffix: pyomo Suffix object, contains the components to be checked
-# as keys
-# fix_vars: boolean, whether or not to fix the Vars, default = False
-#
-# Returns
-# -------
-# param_list: list of Param
-# """
-# param_list = []
-#
-# # FIX THE MODEL TREE ISSUE WHERE I GET base_model. INSTEAD OF
-# # Check keys if they are Param or Var. Fix the vars if ``fix_vars`` is True
-# for k, v in suffix.items():
-# if isinstance(k, ParamData):
-# param_list.append(k.name)
-# elif isinstance(k, VarData):
-# if fix_vars:
-# k.fix()
-# else:
-# pass # ToDo: Write error for suffix keys that aren't ParamData or VarData
-#
-# return param_list
-
-
def check_FIM(FIM):
"""
Checks that the FIM is square, positive definite, and symmetric.