Skip to content

Commit eccfa9f

Browse files
David Erikssonfacebook-github-bot
authored andcommitted
Use Standardize/Normalize by default for SingleTaskGP (#2458)
Summary: X-link: facebook/Ax#2630 Pull Request resolved: #2458 D60080819 recently updated the default `SingleTaskGP` BoTorch priors. One significant change was to remove the use of an outputscale, which may not work well if the outputs aren't standardized. This diff changes the `SingleTaskGP` to use `Standardize` and `Normalize` by default if no input/outcome transforms are specified (this allows users to explicitly pass in `None` if they don't want to use any transforms). Differential Revision: D60492937
1 parent e44280e commit eccfa9f

15 files changed

+195
-73
lines changed

botorch/acquisition/analytic.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,15 +1091,17 @@ def _get_noiseless_fantasy_model(
10911091
# are used across all batches (by default, a GP with batched training data
10921092
# uses independent hyperparameters for each batch).
10931093

1094-
# Don't apply `outcome_transform` and `input_transform` here,
1095-
# since the data being passed has already been transformed.
1096-
# So we will instead set them afterwards.
1094+
# We don't want to use the true `outcome_transform` and `input_transform` here
1095+
# since the data being passed has already been transformed. We thus pass `None`
1096+
# and will instead set them afterwards.
10971097
fantasy_model = SingleTaskGP(
10981098
train_X=model.train_inputs[0],
10991099
train_Y=model.train_targets.unsqueeze(-1),
11001100
train_Yvar=model.likelihood.noise_covar.noise.unsqueeze(-1),
11011101
covar_module=deepcopy(model.covar_module),
11021102
mean_module=deepcopy(model.mean_module),
1103+
outcome_transform=None,
1104+
input_transform=None,
11031105
)
11041106

11051107
Yvar = torch.full_like(Y_fantasized, 1e-7)

botorch/acquisition/multi_objective/max_value_entropy_search.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ class qMultiObjectiveMaxValueEntropy(
6464
_default_sample_shape: The `sample_shape` for the default sampler.
6565
6666
Example:
67-
>>> model = SingleTaskGP(train_X, train_Y)
67+
>>> model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
6868
>>> MESMO = qMultiObjectiveMaxValueEntropy(model, sample_pfs)
6969
>>> mesmo = MESMO(test_X)
7070
"""

botorch/models/contextual.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,13 @@ def __init__(
102102
dimension is set to 1 for each categorical variable.
103103
context_weight_dict: Known population weights of each context.
104104
"""
105-
super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
105+
super().__init__(
106+
train_X=train_X,
107+
train_Y=train_Y,
108+
train_Yvar=train_Yvar,
109+
input_transform=None,
110+
outcome_transform=None,
111+
)
106112
self.covar_module = LCEAKernel(
107113
decomposition=decomposition,
108114
batch_shape=self._aug_batch_shape,

botorch/models/converter.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import torch
1818
from botorch.exceptions import UnsupportedError
1919
from botorch.exceptions.warnings import BotorchWarning
20+
from botorch.models import SingleTaskGP
2021
from botorch.models.gp_regression import HeteroskedasticSingleTaskGP
2122
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
2223
from botorch.models.gp_regression_mixed import MixedSingleTaskGP
@@ -179,6 +180,8 @@ def model_list_to_batched(model_list: ModelListGP) -> BatchedMultiOutputGPyTorch
179180
batch_length = len(models)
180181
covar_module = _batched_kernel(models[0].covar_module, batch_length)
181182
kwargs["covar_module"] = covar_module
183+
if isinstance(models[0], SingleTaskGP):
184+
kwargs["outcome_transform"] = None
182185

183186
# construct the batched GP model
184187
input_transform = getattr(models[0], "input_transform", None)
@@ -331,6 +334,8 @@ def batched_to_model_list(batch_model: BatchedMultiOutputGPyTorchModel) -> Model
331334
)
332335
if isinstance(batch_model, SingleTaskMultiFidelityGP):
333336
kwargs.update(batch_model._init_args)
337+
if isinstance(batch_model, SingleTaskGP):
338+
kwargs["outcome_transform"] = None
334339
# NOTE: Adding outcome transform to kwargs to avoid the multiple
335340
# values for same kwarg issue with SingleTaskMultiFidelityGP.
336341
if outcome_transform is not None:
@@ -418,6 +423,8 @@ def batched_multi_output_to_single_output(
418423
kwargs["train_Yvar"] = noise_covar.noise.clone().unsqueeze(-1)
419424
if isinstance(batch_mo_model, SingleTaskMultiFidelityGP):
420425
kwargs.update(batch_mo_model._init_args)
426+
if isinstance(batch_mo_model, SingleTaskGP):
427+
kwargs["outcome_transform"] = None
421428
single_outcome_model = batch_mo_model.__class__(
422429
input_transform=input_transform, **kwargs
423430
)

botorch/models/gp_regression.py

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
import torch
3737
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
3838
from botorch.models.model import FantasizeMixin
39-
from botorch.models.transforms.input import InputTransform
40-
from botorch.models.transforms.outcome import Log, OutcomeTransform
39+
from botorch.models.transforms.input import InputTransform, Normalize
40+
from botorch.models.transforms.outcome import Log, OutcomeTransform, Standardize
4141
from botorch.models.utils import validate_input_scaling
4242
from botorch.models.utils.gpytorch_modules import (
4343
get_covar_module_with_dim_scaled_prior,
@@ -46,6 +46,7 @@
4646
)
4747
from botorch.utils.containers import BotorchContainer
4848
from botorch.utils.datasets import SupervisedDataset
49+
from botorch.utils.types import _DefaultType, DEFAULT
4950
from gpytorch.constraints.constraints import GreaterThan
5051
from gpytorch.distributions.multivariate_normal import MultivariateNormal
5152
from gpytorch.likelihoods.gaussian_likelihood import (
@@ -134,8 +135,8 @@ def __init__(
134135
likelihood: Optional[Likelihood] = None,
135136
covar_module: Optional[Module] = None,
136137
mean_module: Optional[Mean] = None,
137-
outcome_transform: Optional[OutcomeTransform] = None,
138-
input_transform: Optional[InputTransform] = None,
138+
outcome_transform: Optional[Union[OutcomeTransform, _DefaultType]] = DEFAULT,
139+
input_transform: Optional[Union[InputTransform, _DefaultType]] = DEFAULT,
139140
) -> None:
140141
r"""
141142
Args:
@@ -154,16 +155,27 @@ def __init__(
154155
outcome_transform: An outcome transform that is applied to the
155156
training data during instantiation and to the posterior during
156157
inference (that is, the `Posterior` obtained by calling
157-
`.posterior` on the model will be on the original scale).
158-
input_transform: An input transform that is applied in the model's
159-
forward pass.
158+
`.posterior` on the model will be on the original scale). We use a
159+
`Standardize` transform if no `outcome_transform` is specified.
160+
Pass down `None` to use no outcome transform.
161+
input_transform: An input transform that is applied in the model's forward
162+
pass. We use a `Normalize` transform if no `input_transform` is
163+
specified. Pass down `None` to use no input transform.
160164
"""
165+
self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar)
166+
if outcome_transform == DEFAULT:
167+
outcome_transform = Standardize(
168+
m=train_Y.shape[-1], batch_shape=train_X.shape[:-2]
169+
)
170+
if input_transform == DEFAULT:
171+
input_transform = Normalize(d=train_X.shape[-1], transform_on_train=True)
161172
with torch.no_grad():
162173
transformed_X = self.transform_inputs(
163174
X=train_X, input_transform=input_transform
164175
)
165176
if outcome_transform is not None:
166177
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
178+
# Validate again after applying the transforms
167179
self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar)
168180
ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None)
169181
validate_input_scaling(
@@ -352,6 +364,7 @@ def __init__(
352364
train_X=train_X,
353365
train_Y=train_Y,
354366
likelihood=likelihood,
367+
outcome_transform=None,
355368
input_transform=input_transform,
356369
)
357370
self.register_added_loss_term("noise_added_loss")

test/acquisition/multi_objective/test_max_value_entropy_search.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,14 +71,14 @@ def test_multi_objective_max_value_entropy(self):
7171
# test batched model
7272
train_X = torch.rand(1, 1, 2, dtype=dtype, device=self.device)
7373
train_Y = torch.rand(1, 1, m, dtype=dtype, device=self.device)
74-
model = SingleTaskGP(train_X, train_Y)
74+
model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
7575
with self.assertRaises(NotImplementedError):
7676
qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers)
7777
# test initialization
7878
train_X = torch.rand(4, 2, dtype=dtype, device=self.device)
7979
train_Y = torch.rand(4, m, dtype=dtype, device=self.device)
8080
# test batched MO model
81-
model = SingleTaskGP(train_X, train_Y)
81+
model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
8282
mesmo = qMultiObjectiveMaxValueEntropy(model, dummy_sample_pareto_frontiers)
8383
self.assertEqual(mesmo.num_fantasies, 16)
8484
# Initialize the sampler.
@@ -98,7 +98,10 @@ def test_multi_objective_max_value_entropy(self):
9898
)
9999
# test ModelListGP
100100
model = ModelListGP(
101-
*[SingleTaskGP(train_X, train_Y[:, i : i + 1]) for i in range(m)]
101+
*[
102+
SingleTaskGP(train_X, train_Y[:, i : i + 1], outcome_transform=None)
103+
for i in range(m)
104+
]
102105
)
103106
mock_sample_pfs = mock.Mock()
104107
mock_sample_pfs.return_value = dummy_sample_pareto_frontiers(model=model)
@@ -156,7 +159,7 @@ def test_multi_objective_max_value_entropy(self):
156159
],
157160
dim=1,
158161
)
159-
fantasy_model = SingleTaskGP(fant_X, fant_Y)
162+
fantasy_model = SingleTaskGP(fant_X, fant_Y, outcome_transform=None)
160163

161164
# test with X_pending is not None
162165
with mock.patch.object(

test/acquisition/test_proximal.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,9 @@ def test_proximal_model_list(self):
245245
train_X = torch.rand(5, 3, device=self.device, dtype=dtype)
246246
train_Y = train_X.norm(dim=-1, keepdim=True)
247247

248-
gp = SingleTaskGP(train_X, train_Y).to(device=self.device)
248+
gp = SingleTaskGP(train_X, train_Y, input_transform=None).to(
249+
device=self.device
250+
)
249251
model = ModelListGP(gp, gp)
250252

251253
scalarized_posterior_transform = ScalarizedPosteriorTransform(

0 commit comments

Comments
 (0)