Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 23 additions & 7 deletions ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,18 @@
ObjectivePFeasibleFrontierPlot,
)
from ax.core.objective import MultiObjective, Objective
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import ScalarizedOutcomeConstraint
from ax.core.types import ComparisonOp
from ax.exceptions.core import UnsupportedError, UserInputError
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_branin_experiment, get_branin_metric
from ax.utils.testing.core_stubs import (
get_branin_experiment_with_multi_objective,
get_branin_metric,
)
from ax.utils.testing.mock import mock_botorch_optimize
from botorch.utils.testing import skip_if_import_error
from pyre_extensions import none_throws
Expand All @@ -29,8 +35,12 @@ def setUp(self) -> None:
super().setUp()

torch.manual_seed(0)
self.experiment = get_branin_experiment(
with_completed_batch=True, with_absolute_constraint=True
self.experiment = get_branin_experiment_with_multi_objective(
with_completed_batch=True, with_absolute_constraint=True, num_objectives=3
)
self.experiment.optimization_config = OptimizationConfig(
objective=Objective(metric=self.experiment.metrics["branin_a"]),
outcome_constraints=self.experiment.optimization_config.outcome_constraints,
)
oc = none_throws(self.experiment.optimization_config).outcome_constraints[0]
oc.bound = 10.0
Expand All @@ -50,13 +60,13 @@ def test_compute(self) -> None:
experiment=self.experiment, adapter=adapter
)
self.assertEqual(
json.loads(card.blob)["layout"]["xaxis"]["title"]["text"], "branin"
json.loads(card.blob)["layout"]["xaxis"]["title"]["text"], "branin_a"
)
self.assertEqual(
json.loads(card.blob)["layout"]["yaxis"]["title"]["text"],
"% Chance of Satisfying the Constraints",
)
self.assertFalse(card.df["branin_sem"].isna().any())
self.assertFalse(card.df["branin_a_sem"].isna().any())
self.assertTrue(card.df["p_feasible_sem"].isna().all())

def test_no_exceptions(self) -> None:
Expand Down Expand Up @@ -90,9 +100,15 @@ def test_no_exceptions(self) -> None:
):
ObjectivePFeasibleFrontierPlot().compute(experiment=self.experiment)
self.experiment.add_tracking_metric(get_branin_metric("branin2"))
# Get only tracking metrics, excluding the objective metric to avoid
# "Cannot constrain on objective metric" error
constraint_metrics = [
self.experiment.metrics["branin_b"],
self.experiment.metrics["branin_c"],
]
opt_config.outcome_constraints = [
ScalarizedOutcomeConstraint(
metrics=list(self.experiment.metrics.values()),
metrics=constraint_metrics,
weights=[1.0, 1.0],
relative=False,
bound=10.0,
Expand Down
30 changes: 15 additions & 15 deletions ax/core/optimization_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,12 +180,6 @@ def _validate_transformed_optimization_config(
"Use MultiObjectiveOptimizationConfig instead."
)
outcome_constraints = outcome_constraints or []
# Only vaidate `outcome_constraints`
outcome_constraints = [
constraint
for constraint in outcome_constraints
if isinstance(constraint, ScalarizedOutcomeConstraint) is False
]
unconstrainable_metrics = objective.get_unconstrainable_metrics()
OptimizationConfig._validate_outcome_constraints(
unconstrainable_metrics=unconstrainable_metrics,
Expand All @@ -197,20 +191,26 @@ def _validate_outcome_constraints(
unconstrainable_metrics: list[Metric],
outcome_constraints: list[OutcomeConstraint],
) -> None:
constraint_metrics = [
constraint.metric.name for constraint in outcome_constraints
]
constraint_metrics = []
for oc in outcome_constraints:
if isinstance(oc, ScalarizedOutcomeConstraint):
constraint_metrics.extend([m.name for m in oc.metrics])
else:
constraint_metrics.append(oc.metric.name)

for metric in unconstrainable_metrics:
if metric.name in constraint_metrics:
raise ValueError("Cannot constrain on objective metric.")

def get_metric_name(oc: OutcomeConstraint) -> str:
return oc.metric.name
def constraint_key(oc: OutcomeConstraint) -> str:
return (
str(oc)
if isinstance(oc, ScalarizedOutcomeConstraint)
else oc.metric.name
)

sorted_constraints = sorted(outcome_constraints, key=get_metric_name)
for metric_name, constraints_itr in groupby(
sorted_constraints, get_metric_name
):
sorted_constraints = sorted(outcome_constraints, key=constraint_key)
for metric_name, constraints_itr in groupby(sorted_constraints, constraint_key):
constraints: list[OutcomeConstraint] = list(constraints_itr)
constraints_len = len(constraints)
if constraints_len == 2:
Expand Down
57 changes: 57 additions & 0 deletions ax/core/tests/test_optimization_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,30 @@ def test_ConstraintValidation(self) -> None:
config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
)

# Test with ScalarizedOutcomeConstraint
# should work when not constraining obj
config_with_scalarized = OptimizationConfig(
objective=self.objective,
outcome_constraints=[self.scalarized_outcome_constraint],
)
self.assertEqual(len(config_with_scalarized.outcome_constraints), 1)

# Can't constrain on metric in ScalarizedOutcomeConstraint
# that overlaps with objective
scalarized_with_objective_metric = ScalarizedOutcomeConstraint(
metrics=[self.metrics["m1"], self.metrics["m4"]], # m1 is objective metric
weights=[0.5, 0.5],
op=ComparisonOp.GEQ,
bound=0.0,
)
with self.assertRaisesRegex(
ValueError, "Cannot constrain on objective metric."
):
OptimizationConfig(
objective=self.objective,
outcome_constraints=[scalarized_with_objective_metric],
)

def test_Clone(self) -> None:
config1 = OptimizationConfig(
objective=self.objective,
Expand Down Expand Up @@ -518,6 +542,39 @@ def test_ConstraintValidation(self) -> None:
config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
)

# Test with ScalarizedOutcomeConstraint
# should work when not constraining objective
scalarized_constraint = ScalarizedOutcomeConstraint(
metrics=[self.metrics["m3"]], # m3 is not in multi_objective (m1, m2)
weights=[1.0],
op=ComparisonOp.GEQ,
bound=0.0,
)
config_with_scalarized = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=[scalarized_constraint],
)
self.assertEqual(len(config_with_scalarized.outcome_constraints), 1)

# Can't constrain on metric in ScalarizedOutcomeConstraint
# that overlaps with objective
scalarized_with_objective_metric = ScalarizedOutcomeConstraint(
metrics=[
self.metrics["m1"],
self.metrics["m3"],
], # m1 is in multi_objective
weights=[0.5, 0.5],
op=ComparisonOp.GEQ,
bound=0.0,
)
with self.assertRaisesRegex(
ValueError, "Cannot constrain on objective metric."
):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=[scalarized_with_objective_metric],
)

def test_Clone(self) -> None:
config1 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, outcome_constraints=self.outcome_constraints
Expand Down