Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions botorch/models/contextual.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def construct_inputs(
cls,
training_data: SupervisedDataset,
decomposition: Dict[str, List[int]],
**kwargs: Any,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.

Expand All @@ -56,7 +55,7 @@ def construct_inputs(
decomposition: Dictionary of context names and their indexes of the
corresponding active context parameters.
"""
base_inputs = super().construct_inputs(training_data=training_data, **kwargs)
base_inputs = super().construct_inputs(training_data=training_data)
return {
**base_inputs,
"decomposition": decomposition,
Expand Down Expand Up @@ -127,7 +126,6 @@ def construct_inputs(
embs_feature_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
context_weight_dict: Optional[Dict] = None,
**kwargs: Any,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.

Expand All @@ -147,7 +145,7 @@ def construct_inputs(
dimension is set to 1 for each categorical variable.
context_weight_dict: Known population weights of each context.
"""
base_inputs = super().construct_inputs(training_data=training_data, **kwargs)
base_inputs = super().construct_inputs(training_data=training_data)
index_decomp = {
c: [training_data.feature_names.index(i) for i in v]
for c, v in decomposition.items()
Expand Down
29 changes: 28 additions & 1 deletion botorch/models/gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from __future__ import annotations

import warnings
from typing import NoReturn, Optional
from typing import Dict, NoReturn, Optional, Union

import torch
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
Expand All @@ -44,6 +44,8 @@
get_matern_kernel_with_gamma_prior,
MIN_INFERRED_NOISE_LEVEL,
)
from botorch.utils.containers import BotorchContainer
from botorch.utils.datasets import SupervisedDataset
from gpytorch.constraints.constraints import GreaterThan
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.likelihoods.gaussian_likelihood import (
Expand Down Expand Up @@ -207,6 +209,31 @@ def __init__(
self.input_transform = input_transform
self.to(train_X)

@classmethod
def construct_inputs(
cls, training_data: SupervisedDataset, *, task_feature: Optional[int] = None
) -> Dict[str, Union[BotorchContainer, Tensor]]:
r"""Construct `SingleTaskGP` keyword arguments from a `SupervisedDataset`.

Args:
training_data: A `SupervisedDataset`, with attributes `train_X`,
`train_Y`, and, optionally, `train_Yvar`.
task_feature: Deprecated and allowed only for backward
compatibility; ignored.

Returns:
A dict of keyword arguments that can be used to initialize a `SingleTaskGP`,
with keys `train_X`, `train_Y`, and, optionally, `train_Yvar`.
"""
if task_feature is not None:
warnings.warn(
"`task_feature` is deprecated and will be ignored. In the "
"future, this will be an error.",
DeprecationWarning,
stacklevel=2,
)
return super().construct_inputs(training_data=training_data)

def forward(self, x: Tensor) -> MultivariateNormal:
if self.training:
x = self.transform_inputs(x)
Expand Down
3 changes: 1 addition & 2 deletions botorch/models/gp_regression_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,15 +167,14 @@ def construct_inputs(
cls,
training_data: SupervisedDataset,
fidelity_features: List[int],
**kwargs,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.

Args:
training_data: Dictionary of `SupervisedDataset`.
fidelity_features: Index of fidelity parameter as input columns.
"""
inputs = super().construct_inputs(training_data=training_data, **kwargs)
inputs = super().construct_inputs(training_data=training_data)
inputs["data_fidelities"] = fidelity_features
return inputs

Expand Down
3 changes: 1 addition & 2 deletions botorch/models/gp_regression_mixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def construct_inputs(
training_data: SupervisedDataset,
categorical_features: List[int],
likelihood: Optional[Likelihood] = None,
**kwargs: Any,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.

Expand All @@ -196,7 +195,7 @@ def construct_inputs(
categorical_features: Column indices of categorical features.
likelihood: Optional likelihood used to constuct the model.
"""
base_inputs = super().construct_inputs(training_data=training_data, **kwargs)
base_inputs = super().construct_inputs(training_data=training_data)
return {
**base_inputs,
"cat_dims": categorical_features,
Expand Down
2 changes: 0 additions & 2 deletions botorch/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,15 +186,13 @@ def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Mode
def construct_inputs(
cls,
training_data: SupervisedDataset,
**kwargs: Any,
) -> Dict[str, Union[BotorchContainer, Tensor]]:
"""
Construct `Model` keyword arguments from a `SupervisedDataset`.

Args:
training_data: A `SupervisedDataset`, with attributes `train_X`,
`train_Y`, and, optionally, `train_Yvar`.
kwargs: Ignored.

Returns:
A dict of keyword arguments that can be used to initialize a `Model`,
Expand Down
6 changes: 2 additions & 4 deletions botorch/models/multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,6 @@ def construct_inputs(
task_covar_prior: Optional[Prior] = None,
prior_config: Optional[dict] = None,
rank: Optional[int] = None,
**kwargs,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dataset and other args.

Expand Down Expand Up @@ -367,9 +366,8 @@ def construct_inputs(
raise ValueError(f"eta must be a real number, your eta was {eta}.")
task_covar_prior = LKJCovariancePrior(num_tasks, eta, sd_prior)

base_inputs = super().construct_inputs(
training_data=training_data, task_feature=task_feature, **kwargs
)
# Call Model.construct_inputs to parse training data
base_inputs = super().construct_inputs(training_data=training_data)
if isinstance(training_data, MultiTaskDataset):
all_tasks = list(range(len(training_data.datasets)))
base_inputs["all_tasks"] = all_tasks
Expand Down
2 changes: 0 additions & 2 deletions botorch/models/pairwise_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -781,15 +781,13 @@ def batch_shape(self) -> torch.Size:
def construct_inputs(
cls,
training_data: SupervisedDataset,
**kwargs: Any,
) -> Dict[str, Tensor]:
r"""
Construct `Model` keyword arguments from a `RankingDataset`.

Args:
training_data: A `RankingDataset`, with attributes `train_X`,
`train_Y`, and, optionally, `train_Yvar`.
kwargs: Ignored.

Returns:
A dict of keyword arguments that can be used to initialize a
Expand Down
26 changes: 24 additions & 2 deletions test/models/test_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from gpytorch.priors import GammaPrior


class TestSingleTaskGP(BotorchTestCase):
class TestGPRegressionBase(BotorchTestCase):
def _get_model_and_data(
self,
batch_shape,
Expand Down Expand Up @@ -398,6 +398,28 @@ def test_set_transformed_inputs(self):
self.assertEqual(X.shape, tf_X.shape)


class TestSingleTaskGP(TestGPRegressionBase):
model_class = SingleTaskGP

def test_construct_inputs_task_feature_deprecated(self) -> None:
model, model_kwargs = self._get_model_and_data(
batch_shape=torch.Size([]),
m=1,
device=self.device,
dtype=torch.double,
)
X = model_kwargs["train_X"]
Y = model_kwargs["train_Y"]
training_data = SupervisedDataset(
X,
Y,
feature_names=[f"x{i}" for i in range(X.shape[-1])],
outcome_names=["y"],
)
with self.assertWarnsRegex(DeprecationWarning, "`task_feature` is deprecated"):
model.construct_inputs(training_data, task_feature=0)


class TestFixedNoiseGP(TestSingleTaskGP):
model_class = FixedNoiseGP

Expand Down Expand Up @@ -542,7 +564,7 @@ class TestFixedNoiseSingleTaskGP(TestFixedNoiseGP):
model_class = SingleTaskGP


class TestHeteroskedasticSingleTaskGP(TestSingleTaskGP):
class TestHeteroskedasticSingleTaskGP(TestGPRegressionBase):
def _get_model_and_data(
self, batch_shape, m, outcome_transform=None, input_transform=None, **tkwargs
):
Expand Down