Skip to content

Commit d841f2b

Browse files
ltiaofacebook-github-bot
authored andcommitted
Fix input constructor for qMultiFidelityKnowledgeGradient by fixing fidelity dimensions when computing current_value (#2519)
Summary: Pull Request resolved: #2519 The input constructor for `qMultiFidelityKnowledgeGradient` was computing `current_value` in a manner that was effectively not fidelity-aware (the fidelity dimension was not fixed in the optimization done to obtain `current_value`), since it was simply re-using functionality from the input constructor for `construct_inputs_qKG`. This diff addresses this by making use of `target_fidelities` as the `fixed_features` argument to `D62380369`. Reviewed By: Balandat Differential Revision: D62391106 fbshipit-source-id: acf4f4a9a3644bb467decac70ce38a39c1c9ffc8
1 parent 33e11f4 commit d841f2b

File tree

2 files changed

+62
-17
lines changed

2 files changed

+62
-17
lines changed

botorch/acquisition/input_constructors.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1255,26 +1255,38 @@ def construct_inputs_qMFKG(
12551255
cost_intercept: float = 1.0,
12561256
num_trace_observations: int = 0,
12571257
num_fantasies: int = 64,
1258+
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
12581259
) -> dict[str, Any]:
12591260
r"""Construct kwargs for `qMultiFidelityKnowledgeGradient` constructor."""
12601261

1262+
X = _get_dataset_field(training_data, "X", first_only=True)
1263+
_bounds = torch.as_tensor(bounds, dtype=X.dtype, device=X.device)
1264+
12611265
inputs_mf = construct_inputs_mf_base(
12621266
target_fidelities=target_fidelities,
12631267
fidelity_weights=fidelity_weights,
12641268
cost_intercept=cost_intercept,
12651269
num_trace_observations=num_trace_observations,
12661270
)
12671271

1268-
inputs_kg = construct_inputs_qKG(
1272+
_, current_value = optimize_objective(
12691273
model=model,
1270-
training_data=training_data,
1271-
bounds=bounds,
1274+
bounds=_bounds.t(),
1275+
q=1,
12721276
objective=objective,
12731277
posterior_transform=posterior_transform,
1274-
num_fantasies=num_fantasies,
1278+
fixed_features=target_fidelities,
1279+
**optimize_objective_kwargs,
12751280
)
12761281

1277-
return {**inputs_mf, **inputs_kg}
1282+
return {
1283+
"model": model,
1284+
"objective": objective,
1285+
"posterior_transform": posterior_transform,
1286+
"num_fantasies": num_fantasies,
1287+
"current_value": current_value.detach().cpu().max(),
1288+
**inputs_mf,
1289+
}
12781290

12791291

12801292
@acqf_input_constructor(qMultiFidelityMaxValueEntropy)

test/acquisition/test_input_constructors.py

Lines changed: 45 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1304,28 +1304,49 @@ def test_construct_inputs_mf_base(self) -> None:
13041304
)
13051305

13061306
def test_construct_inputs_mfkg(self) -> None:
1307+
current_value = torch.tensor(1.23)
1308+
13071309
constructor_args = {
1308-
"model": None,
1310+
"model": self.mock_model,
13091311
"training_data": self.blockX_blockY,
1310-
"objective": None,
13111312
"bounds": self.bounds,
1312-
"num_fantasies": 123,
13131313
"target_fidelities": {0: 0.987},
1314+
"objective": None,
13141315
"fidelity_weights": {0: 0.654},
13151316
"cost_intercept": 0.321,
1317+
"num_fantasies": 123,
13161318
}
13171319

13181320
input_constructor = get_acqf_input_constructor(qMultiFidelityKnowledgeGradient)
13191321
with mock.patch(
1320-
target="botorch.acquisition.input_constructors.construct_inputs_mf_base",
1321-
return_value={"foo": 0},
1322-
), mock.patch(
1323-
target="botorch.acquisition.input_constructors.construct_inputs_qKG",
1324-
return_value={"bar": 1},
1325-
):
1322+
target="botorch.acquisition.input_constructors.optimize_acqf",
1323+
return_value=(None, current_value),
1324+
) as mock_optimize_acqf:
13261325
inputs_mfkg = input_constructor(**constructor_args)
1327-
inputs_test = {"foo": 0, "bar": 1}
1328-
self.assertEqual(inputs_mfkg, inputs_test)
1326+
1327+
mock_optimize_acqf_kwargs = mock_optimize_acqf.call_args.kwargs
1328+
1329+
self.assertIsInstance(
1330+
mock_optimize_acqf_kwargs["acq_function"],
1331+
FixedFeatureAcquisitionFunction,
1332+
)
1333+
self.assertLessEqual(
1334+
{
1335+
"model",
1336+
"objective",
1337+
"current_value",
1338+
"project",
1339+
"expand",
1340+
"cost_aware_utility",
1341+
"posterior_transform",
1342+
"num_fantasies",
1343+
},
1344+
set(inputs_mfkg.keys()),
1345+
)
1346+
self.assertEqual(
1347+
inputs_mfkg["num_fantasies"], constructor_args["num_fantasies"]
1348+
)
1349+
self.assertEqual(inputs_mfkg["current_value"], current_value)
13291350

13301351
def test_construct_inputs_mfmes(self) -> None:
13311352
target_fidelities = {0: 0.987}
@@ -1467,7 +1488,19 @@ def setUp(self, suppress_input_warnings: bool = True) -> None:
14671488
},
14681489
)
14691490
self.cases["MF look-ahead"] = (
1470-
[qMultiFidelityKnowledgeGradient, qMultiFidelityMaxValueEntropy],
1491+
[qMultiFidelityMaxValueEntropy],
1492+
{
1493+
"model": kg_model,
1494+
"training_data": self.blockX_blockY,
1495+
"bounds": bounds,
1496+
"target_fidelities": {0: 0.987},
1497+
"num_fantasies": 30,
1498+
},
1499+
)
1500+
bounds = torch.ones((2, 2))
1501+
kg_model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 1)))
1502+
self.cases["MF look-ahead (KG)"] = (
1503+
[qMultiFidelityKnowledgeGradient],
14711504
{
14721505
"model": kg_model,
14731506
"training_data": self.blockX_blockY,

0 commit comments

Comments
 (0)