Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion botorch/acquisition/analytic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ def _get_noiseless_fantasy_model(
# Not transforming Yvar because 1e-7 is already close to 0 and it is a
# relative, not absolute, value.
Y_fantasized, _ = outcome_transform(
Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1)
Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1), X=batch_X_observed
)
Y_fantasized = Y_fantasized.squeeze(-1)
input_transform = getattr(model, "input_transform", None)
Expand Down
4 changes: 2 additions & 2 deletions botorch/models/approximate_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def posterior(

posterior = GPyTorchPosterior(distribution=dist)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
posterior = posterior_transform(posterior)
return posterior
Expand Down Expand Up @@ -397,7 +397,7 @@ def __init__(
UserInputWarning,
stacklevel=3,
)
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=transformed_X)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
validate_input_scaling(train_X=transformed_X, train_Y=train_Y)
if train_Y.shape[-1] != num_outputs:
Expand Down
2 changes: 1 addition & 1 deletion botorch/models/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def posterior(
# `posterior` (as is done in GP models). This is more general since it works
# even if the transform doesn't support `untransform_posterior`.
if hasattr(self, "outcome_transform"):
values, _ = self.outcome_transform.untransform(values)
values, _ = self.outcome_transform.untransform(values, X=X)
if output_indices is not None:
values = values[..., output_indices]
posterior = EnsemblePosterior(values=values)
Expand Down
4 changes: 3 additions & 1 deletion botorch/models/fully_bayesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,9 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
validate_input_scaling(
train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar
Expand Down
4 changes: 3 additions & 1 deletion botorch/models/fully_bayesian_multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,9 @@ def __init__(
)
if outcome_transform is not None:
outcome_transform.train() # Ensure we learn parameters here on init
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
if train_Yvar is not None: # Clamp after transforming
train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL)

Expand Down
4 changes: 3 additions & 1 deletion botorch/models/gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,9 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
# Validate again after applying the transforms
self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar)
ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None)
Expand Down
10 changes: 5 additions & 5 deletions botorch/models/gpytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def posterior(
mvn = self.likelihood(mvn, X)
posterior = GPyTorchPosterior(distribution=mvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
Expand Down Expand Up @@ -244,7 +244,7 @@ def condition_on_observations(
# (unless we've already trasnformed if BatchedMultiOutputGPyTorchModel)
if not isinstance(self, BatchedMultiOutputGPyTorchModel):
# `noise` is assumed to already be outcome-transformed.
Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar)
Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar, X=X)
# Validate using strict=False, since we cannot tell if Y has an explicit
# output dimension. Do not check shapes when fantasizing as they are
# not expected to match.
Expand Down Expand Up @@ -467,7 +467,7 @@ def posterior(

posterior = GPyTorchPosterior(distribution=mvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
Expand Down Expand Up @@ -511,7 +511,7 @@ def condition_on_observations(
if hasattr(self, "outcome_transform"):
# We need to apply transforms before shifting batch indices around.
# `noise` is assumed to already be outcome-transformed.
Y, _ = self.outcome_transform(Y)
Y, _ = self.outcome_transform(Y, X=X)
# Do not check shapes when fantasizing as they are not expected to match.
if fantasize_flag.off():
self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
Expand Down Expand Up @@ -924,7 +924,7 @@ def posterior(
)
posterior = GPyTorchPosterior(distribution=mtmvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
Expand Down
14 changes: 7 additions & 7 deletions botorch/models/higher_order_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def _return_to_output_shape(self, tsr: Tensor) -> Tensor:
return out

def forward(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
Expand All @@ -107,21 +107,21 @@ def forward(
return Y_out, Yvar_out

def untransform(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
Yvar = self._squeeze_to_single_output(Yvar)

Y, Yvar = super().untransform(Y, Yvar)
Y, Yvar = super().untransform(Y=Y, Yvar=Yvar, X=X)

Y = self._return_to_output_shape(Y)
if Yvar is not None:
Yvar = self._return_to_output_shape(Yvar)
return Y, Yvar

def untransform_posterior(
self, posterior: HigherOrderGPPosterior
self, posterior: HigherOrderGPPosterior, X: Tensor | None = None
) -> TransformedPosterior:
# TODO: return a HigherOrderGPPosterior once rescaling constant
# muls * LinearOperators won't force a dense decomposition rather than a
Expand Down Expand Up @@ -227,7 +227,7 @@ def __init__(
output_shape=train_Y.shape[-num_output_dims:],
batch_shape=batch_shape,
)
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=train_X)

self._aug_batch_shape = batch_shape
self._num_dimensions = num_output_dims + 1
Expand Down Expand Up @@ -416,7 +416,7 @@ def condition_on_observations(
"""
if hasattr(self, "outcome_transform"):
# we need to apply transforms before shifting batch indices around
Y, noise = self.outcome_transform(Y=Y, Yvar=noise)
Y, noise = self.outcome_transform(Y=Y, Yvar=noise, X=X)
# Do not check shapes when fantasizing as they are not expected to match.
if fantasize_flag.off():
self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
Expand Down Expand Up @@ -540,7 +540,7 @@ def posterior(
num_outputs=self._num_outputs,
)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
return posterior

def make_posterior_variances(
Expand Down
9 changes: 6 additions & 3 deletions botorch/models/latent_kronecker_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def __init__(
self._use_min = use_min

def forward(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
r"""Standardize outcomes.

Expand All @@ -93,6 +93,7 @@ def forward(
Y: A `batch_shape x n x m`-dim tensor of training targets.
Yvar: A `batch_shape x n x m`-dim tensor of observation noises
associated with the training targets (if applicable).
X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable).

Returns:
A two-tuple with the transformed outcomes:
Expand Down Expand Up @@ -240,7 +241,9 @@ def __init__(
outcome_transform = MinMaxStandardize(batch_shape=batch_shape)
if outcome_transform is not None:
# transform outputs once and keep the results
train_Y = outcome_transform(train_Y.unsqueeze(-1))[0].squeeze(-1)
train_Y = outcome_transform(train_Y.unsqueeze(-1), X=transformed_X)[
0
].squeeze(-1)

ExactGP.__init__(
self,
Expand Down Expand Up @@ -506,7 +509,7 @@ def _rsample_from_base_samples(
)
# samples.shape = (*sample_shape, *broadcast_shape, n_test_x, n_t)
if hasattr(self, "outcome_transform") and self.outcome_transform is not None:
samples, _ = self.outcome_transform.untransform(samples)
samples, _ = self.outcome_transform.untransform(samples, X=X)
return samples

def condition_on_observations(
Expand Down
2 changes: 1 addition & 1 deletion botorch/models/model_list_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def condition_on_observations(
else:
noise_i = torch.cat([noise[..., k] for k in range(i, j)], dim=-1)
if hasattr(model, "outcome_transform"):
y_i, noise_i = model.outcome_transform(y_i, noise_i)
y_i, noise_i = model.outcome_transform(y_i, noise_i, X=X_i)
if noise_i is not None:
noise_i = noise_i.squeeze(0)
targets.append(y_i)
Expand Down
43 changes: 6 additions & 37 deletions botorch/models/multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
from botorch.models.model import FantasizeMixin
from botorch.models.transforms.input import InputTransform
from botorch.models.transforms.outcome import OutcomeTransform, Standardize
from botorch.models.utils.assorted import get_task_value_remapping
from botorch.models.utils.gpytorch_modules import (
get_covar_module_with_dim_scaled_prior,
get_gaussian_likelihood_with_lognormal_prior,
Expand Down Expand Up @@ -82,40 +83,6 @@
from torch import Tensor


def get_task_value_remapping(task_values: Tensor, dtype: torch.dtype) -> Tensor | None:
"""Construct an mapping of discrete task values to contiguous int-valued floats.

Args:
task_values: A sorted long-valued tensor of task values.
dtype: The dtype of the model inputs (e.g. `X`), which the new
task values should have mapped to (e.g. float, double).

Returns:
A tensor of shape `task_values.max() + 1` that maps task values
to new task values. The indexing operation `mapper[task_value]`
will produce a tensor of new task values, of the same shape as
the original. The elements of the `mapper` tensor that do not
appear in the original `task_values` are mapped to `nan`. The
return value will be `None`, when the task values are contiguous
integers starting from zero.
"""
task_range = torch.arange(
len(task_values), dtype=task_values.dtype, device=task_values.device
)
mapper = None
if not torch.equal(task_values, task_range):
# Create a tensor that maps task values to new task values.
# The number of tasks should be small, so this should be quite efficient.
mapper = torch.full(
(int(task_values.max().item()) + 1,),
float("nan"),
dtype=dtype,
device=task_values.device,
)
mapper[task_values] = task_range.to(dtype=dtype)
return mapper


class MultiTaskGP(ExactGP, MultiTaskGPyTorchModel, FantasizeMixin):
r"""Multi-Task exact GP model using an ICM (intrinsic co-regionalization model)
kernel. See [Bonilla2007MTGP]_ and [Swersky2013MTBO]_ for a reference on the
Expand Down Expand Up @@ -219,7 +186,9 @@ def __init__(
if outcome_transform == DEFAULT:
outcome_transform = Standardize(m=1, batch_shape=train_X.shape[:-2])
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(Y=train_Y, Yvar=train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)

# squeeze output dim
train_Y = train_Y.squeeze(-1)
Expand Down Expand Up @@ -464,7 +433,7 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=transformed_X)

self._validate_tensor_args(X=transformed_X, Y=train_Y)
self._num_outputs = train_Y.shape[-1]
Expand Down Expand Up @@ -772,7 +741,7 @@ def posterior(
)

if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
return posterior

def train(self, val=True, *args, **kwargs):
Expand Down
Loading
Loading