Skip to content

Commit 886a4cc

Browse files
esantorellafacebook-github-bot
authored andcommitted
Stop allowing some arguments to be ignored in acqf input constructors (#2356)
Summary: Stop silently ignoring arguments that can't be safely ignored so that an exception will be naturally raised instead. Carve-outs had to be added for `AnalyticExpectedUtilityOfBestOption` because it is often used in a BOPE loop where the same arguments are passed in the preference learning and experiment candidate-generation stages. Differential Revision: D57909958
1 parent 5f58208 commit 886a4cc

File tree

2 files changed

+16
-14
lines changed

2 files changed

+16
-14
lines changed

botorch/acquisition/input_constructors.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
from botorch.acquisition.objective import (
8181
ConstrainedMCObjective,
8282
IdentityMCObjective,
83+
LearnedObjective,
8384
MCAcquisitionObjective,
8485
PosteriorTransform,
8586
)
@@ -97,7 +98,7 @@
9798
)
9899
from botorch.exceptions.errors import UnsupportedError
99100
from botorch.models.cost import AffineFidelityCostModel
100-
from botorch.models.deterministic import DeterministicModel, FixedSingleSampleModel
101+
from botorch.models.deterministic import FixedSingleSampleModel
101102
from botorch.models.gpytorch import GPyTorchModel
102103
from botorch.models.model import Model
103104
from botorch.optim.optimize import optimize_acqf
@@ -213,14 +214,15 @@ def allow_only_specific_variable_kwargs(f: Callable[..., T]) -> Callable[..., T]
213214
in the signature of `f`. Any other keyword arguments will raise an error.
214215
"""
215216
allowed = {
217+
# `training_data` and/or `X_baseline` are needed to compute baselines
218+
# for some EI-type acquisition functions.
216219
"training_data",
217-
"objective",
218-
"posterior_transform",
219220
"X_baseline",
220-
"X_pending",
221+
# Objective thresholds are needed for defining hypervolumes in
222+
# multi-objective optimization.
221223
"objective_thresholds",
222-
"constraints",
223-
"target_fidelities",
224+
# Used in input constructors for some lookahead acquisition functions
225+
# such as qKnowledgeGradient.
224226
"bounds",
225227
}
226228

@@ -859,7 +861,6 @@ def construct_inputs_EHVI(
859861
model: Model,
860862
training_data: MaybeDict[SupervisedDataset],
861863
objective_thresholds: Tensor,
862-
objective: Optional[MCMultiOutputObjective] = None,
863864
posterior_transform: Optional[PosteriorTransform] = None,
864865
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
865866
alpha: Optional[float] = None,
@@ -1248,12 +1249,7 @@ def construct_inputs_qMFMES(
12481249
training_data: MaybeDict[SupervisedDataset],
12491250
bounds: List[Tuple[float, float]],
12501251
target_fidelities: Dict[int, Union[int, float]],
1251-
objective: Optional[MCAcquisitionObjective] = None,
1252-
posterior_transform: Optional[PosteriorTransform] = None,
12531252
num_fantasies: int = 64,
1254-
X_baseline: Optional[Tensor] = None,
1255-
X_pending: Optional[Tensor] = None,
1256-
objective_thresholds: Optional[Tensor] = None,
12571253
fidelity_weights: Optional[Dict[int, float]] = None,
12581254
cost_intercept: float = 1.0,
12591255
num_trace_observations: int = 0,
@@ -1285,6 +1281,8 @@ def construct_inputs_analytic_eubo(
12851281
pref_model: Optional[Model] = None,
12861282
previous_winner: Optional[Tensor] = None,
12871283
sample_multiplier: Optional[float] = 1.0,
1284+
objective: Optional[LearnedObjective] = None,
1285+
posterior_transform: Optional[PosteriorTransform] = None,
12881286
) -> Dict[str, Any]:
12891287
r"""Construct kwargs for the `AnalyticExpectedUtilityOfBestOption` constructor.
12901288
@@ -1305,6 +1303,11 @@ def construct_inputs_analytic_eubo(
13051303
BOPE; if None, we are doing PBO and model is the preference model.
13061304
previous_winner: The previous winner of the best option.
13071305
sample_multiplier: The scale factor for the single-sample model.
1306+
objective: Ignored. This argument is allowed to be passed then ignored
1307+
because of the way that EUBO is typically used in a BOPE loop.
1308+
posterior_transform: Ignored. This argument is allowed to be passed then
1309+
ignored because of the way that EUBO is typically used in a BOPE
1310+
loop.
13081311
13091312
Returns:
13101313
A dict mapping kwarg names of the constructor to values.
@@ -1335,7 +1338,6 @@ def construct_inputs_analytic_eubo(
13351338
def construct_inputs_qeubo(
13361339
model: Model,
13371340
pref_model: Optional[Model] = None,
1338-
outcome_model: Optional[DeterministicModel] = None,
13391341
sample_multiplier: Optional[float] = 1.0,
13401342
sampler: Optional[MCSampler] = None,
13411343
objective: Optional[MCAcquisitionObjective] = None,
@@ -1606,6 +1608,7 @@ def construct_inputs_qJES(
16061608
X_pending: Optional[Tensor] = None,
16071609
estimation_type: str = "LB",
16081610
num_samples: int = 64,
1611+
objective=None,
16091612
):
16101613
dtype = model.train_targets.dtype
16111614
optimal_inputs, optimal_outputs = get_optimal_samples(

test/acquisition/test_input_constructors.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1291,7 +1291,6 @@ def test_construct_inputs_mfmes(self) -> None:
12911291
constructor_args = {
12921292
"model": None,
12931293
"training_data": self.blockX_blockY,
1294-
"objective": None,
12951294
"bounds": self.bounds,
12961295
"candidate_size": 17,
12971296
"target_fidelities": target_fidelities,

0 commit comments

Comments
 (0)