|
20 | 20 | from botorch.acquisition.active_learning import qNegIntegratedPosteriorVariance |
21 | 21 | from botorch.acquisition.analytic import ( |
22 | 22 | ExpectedImprovement, |
| 23 | + LogConstrainedExpectedImprovement, |
23 | 24 | LogExpectedImprovement, |
24 | 25 | LogNoisyExpectedImprovement, |
25 | 26 | LogProbabilityOfFeasibility, |
@@ -361,28 +362,63 @@ def construct_inputs_pof( |
361 | 362 | Returns: |
362 | 363 | A dict mapping kwarg names of the constructor to values. |
363 | 364 | """ |
364 | | - # Construct a dictionary of the form `{i: [lower, upper]}`, |
365 | | - # where `i` is the output index, and `lower` and `upper` are |
366 | | - # lower and upper bounds on that output (resp. interpreted |
367 | | - # as -Inf / Inf if None). |
368 | | - weights, bounds = constraints_tuple |
369 | | - constraints_dict = {} |
370 | | - for w, b in zip(weights, bounds): |
371 | | - nonzero_w = w.nonzero() |
372 | | - if nonzero_w.numel() != 1: |
373 | | - raise BotorchError( |
374 | | - "LogProbabilityOfFeasibility only support constraints on single" |
375 | | - " outcomes." |
376 | | - ) |
377 | | - i = nonzero_w.item() |
378 | | - w_i = w[i] |
379 | | - is_ub = torch.sign(w_i) == 1.0 |
380 | | - b = b.item() |
381 | | - bounds = (None, b / w_i) if is_ub else (b / w_i, None) |
382 | | - constraints_dict[i] = bounds |
| 365 | + # Construct a constraint dictionary from constraint_tuple |
| 366 | + constraints_dict = _construct_constraint_dict_from_tuple( |
| 367 | + constraints_tuple, LogProbabilityOfFeasibility |
| 368 | + ) |
| 369 | + |
383 | 370 | return {"model": model, "constraints": constraints_dict} |
384 | 371 |
|
385 | 372 |
|
| 373 | +@acqf_input_constructor(LogConstrainedExpectedImprovement) |
| 374 | +def construct_inputs_logcei( |
| 375 | + model: Model, |
| 376 | + training_data: MaybeDict[SupervisedDataset], |
| 377 | + objective_index: int, |
| 378 | + constraints_tuple: tuple[Tensor, Tensor], |
| 379 | + best_f: float | Tensor | None = None, |
| 380 | + maximize: bool = True, |
| 381 | +) -> dict[str, Any]: |
| 382 | + r"""Construct kwargs for the log constrained expected improvement |
| 383 | + acquisition function. |
| 384 | +
|
| 385 | + Args: |
| 386 | + model: The model to be used in the acquisition function. |
| 387 | + training_data: Dataset(s) used to train the model. |
| 388 | + Used to determine default value for `best_f`. |
| 389 | + objective_index: The index of the objective. |
| 390 | + constraints_tuple: A tuple of `(A, b)`. For `k` outcome constraints |
| 391 | + and `m` outputs at `f(x)``, `A` is `k x m` and `b` is `k x 1` such |
| 392 | + that `A f(x) <= b`. |
| 393 | + best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing |
| 394 | + the best feasible function value observed so far (assumed noiseless). |
| 395 | + maximize: If True, consider the problem a maximization problem. |
| 396 | +
|
| 397 | + Returns: |
| 398 | + A dict mapping kwarg names of the constructor to values. |
| 399 | + """ |
| 400 | + |
| 401 | + # If no best_f provided, compute it from the training data |
| 402 | + # For LogCEI, posterior_transform is not used. |
| 403 | + if best_f is None: |
| 404 | + best_f = get_best_f_analytic( |
| 405 | + training_data=training_data, |
| 406 | + ) |
| 407 | + |
| 408 | + # Construct a constraint dictionary from constraint_tuple |
| 409 | + constraints_dict = _construct_constraint_dict_from_tuple( |
| 410 | + constraints_tuple, LogConstrainedExpectedImprovement |
| 411 | + ) |
| 412 | + |
| 413 | + return { |
| 414 | + "model": model, |
| 415 | + "best_f": best_f, |
| 416 | + "objective_index": objective_index, |
| 417 | + "constraints": constraints_dict, |
| 418 | + "maximize": maximize, |
| 419 | + } |
| 420 | + |
| 421 | + |
386 | 422 | @acqf_input_constructor(UpperConfidenceBound) |
387 | 423 | def construct_inputs_ucb( |
388 | 424 | model: Model, |
@@ -1984,3 +2020,30 @@ def _get_ref_point( |
1984 | 2020 | ref_point = objective(objective_thresholds) |
1985 | 2021 |
|
1986 | 2022 | return ref_point |
| 2023 | + |
| 2024 | + |
| 2025 | +def _construct_constraint_dict_from_tuple( |
| 2026 | + constraints_tuple: tuple, acqf_class: type[AcquisitionFunction] |
| 2027 | +) -> dict[str, Any]: |
| 2028 | + """ |
| 2029 | + Construct a dictionary of the form `{i: [lower, upper]}`, |
| 2030 | + where `i` is the output index, and `lower` and `upper` are |
| 2031 | + lower and upper bounds on that output (resp. interpreted |
| 2032 | + as -Inf / Inf if None). |
| 2033 | + """ |
| 2034 | + weights, bounds = constraints_tuple |
| 2035 | + constraints_dict = {} |
| 2036 | + for w, b in zip(weights, bounds): |
| 2037 | + nonzero_w = w.nonzero() |
| 2038 | + if nonzero_w.numel() != 1: |
| 2039 | + raise BotorchError( |
| 2040 | + f"{acqf_class.__name__} only support constraints on single outcomes." |
| 2041 | + ) |
| 2042 | + i = nonzero_w.item() |
| 2043 | + w_i = w[i] |
| 2044 | + is_ub = torch.sign(w_i) == 1.0 |
| 2045 | + b = b.item() |
| 2046 | + bounds = (None, b / w_i) if is_ub else (b / w_i, None) |
| 2047 | + constraints_dict[i] = bounds |
| 2048 | + |
| 2049 | + return constraints_dict |
0 commit comments