52
52
from botorch .models .transforms import Normalize , Standardize
53
53
from botorch .posteriors .fully_bayesian import batched_bisect , GaussianMixturePosterior
54
54
from botorch .sampling .get_sampler import get_sampler
55
+ from botorch .sampling .normal import SobolQMCNormalSampler
55
56
from botorch .utils .datasets import SupervisedDataset
56
57
from botorch .utils .multi_objective .box_decompositions .non_dominated import (
57
58
NondominatedPartitioning ,
@@ -125,15 +126,28 @@ def _get_unnormalized_data(self, infer_noise: bool, **tkwargs):
125
126
return train_X , train_Y , train_Yvar , test_X
126
127
127
128
def _get_unnormalized_condition_data (
128
- self , num_models : int , infer_noise : bool , ** tkwargs
129
+ self , num_models : int , num_cond : int , infer_noise : bool , ** tkwargs
129
130
):
130
131
with torch .random .fork_rng ():
131
132
torch .manual_seed (0 )
132
- cond_X = 5 + 5 * torch .rand (num_models , 2 , 4 , ** tkwargs )
133
+ cond_X = 5 + 5 * torch .rand (num_models , num_cond , 4 , ** tkwargs )
133
134
cond_Y = 10 + torch .sin (cond_X [..., :1 ])
134
- cond_Yvar = None if infer_noise else 0.1 * torch .ones (cond_Y .shape )
135
+ cond_Yvar = (
136
+ None if infer_noise else 0.1 * torch .ones (cond_Y .shape , ** tkwargs )
137
+ )
135
138
return cond_X , cond_Y , cond_Yvar
136
139
140
+ def _get_unnormalized_fantasy_data (
141
+ self : int , num_cond : int , infer_noise : bool , ** tkwargs
142
+ ):
143
+ with torch .random .fork_rng ():
144
+ torch .manual_seed (0 )
145
+ fantasy_X = 5 + 5 * torch .rand (num_cond , 4 , ** tkwargs )
146
+ fantasy_Yvar = (
147
+ None if infer_noise else 0.1 * torch .ones ((num_cond , 1 ), ** tkwargs )
148
+ )
149
+ return fantasy_X , fantasy_Yvar
150
+
137
151
def _get_mcmc_samples (
138
152
self , num_samples : int , dim : int , infer_noise : bool , ** tkwargs
139
153
):
@@ -671,7 +685,7 @@ def test_condition_on_observation(self):
671
685
num_models = 3
672
686
num_cond = 2
673
687
for infer_noise , dtype in itertools .product (
674
- (True ,), (torch .float , torch .double )
688
+ (True , False ), (torch .float , torch .double )
675
689
):
676
690
tkwargs = {"device" : self .device , "dtype" : dtype }
677
691
train_X , train_Y , train_Yvar , test_X = self ._get_unnormalized_data (
@@ -681,7 +695,10 @@ def test_condition_on_observation(self):
681
695
# condition on different observations per model to obtain num_models sets
682
696
# of training data
683
697
cond_X , cond_Y , cond_Yvar = self ._get_unnormalized_condition_data (
684
- num_models = num_models , infer_noise = infer_noise , ** tkwargs
698
+ num_models = num_models ,
699
+ num_cond = num_cond ,
700
+ infer_noise = infer_noise ,
701
+ ** tkwargs
685
702
)
686
703
model = SaasFullyBayesianSingleTaskGP (
687
704
train_X = train_X ,
@@ -712,8 +729,12 @@ def test_condition_on_observation(self):
712
729
cond_model .train_inputs [0 ].shape ,
713
730
torch .Size ([num_models , num_train + num_cond , num_dims ]),
714
731
)
732
+
733
+ # the batch shape of the condition model is added during conditioning
734
+ self .assertEqual (cond_model .batch_shape , torch .Size ([num_models ]))
735
+
715
736
# condition on identical sets of data (i.e. one set) for all models
716
- # i.e, with no batch shape. This should not work .
737
+ # i.e, with no batch shape. This infers the batch shape .
717
738
cond_X_nobatch , cond_Y_nobatch = cond_X [0 ], cond_Y [0 ]
718
739
model = SaasFullyBayesianSingleTaskGP (
719
740
train_X = train_X ,
@@ -728,14 +749,74 @@ def test_condition_on_observation(self):
728
749
)
729
750
model .load_mcmc_samples (mcmc_samples )
730
751
731
- # This should __NOT__ work - conditioning must have a batch size for the
732
- # conditioned point and is not supported (the training data by default
733
- # does not have a batch size)
752
+ # conditioning without a batch size - the resulting conditioned model
753
+ # will still have a batch size
734
754
model .posterior (train_X )
735
- with self .assertRaises (ValueError ):
736
- model .condition_on_observations (
737
- cond_X_nobatch , cond_Y_nobatch , noise = cond_Yvar
738
- )
755
+ cond_model = model .condition_on_observations (
756
+ cond_X_nobatch , cond_Y_nobatch , noise = cond_Yvar
757
+ )
758
+ self .assertEqual (
759
+ cond_model .train_inputs [0 ].shape ,
760
+ torch .Size ([num_models , num_train + num_cond , num_dims ]),
761
+ )
762
+
763
+ # test repeated conditining
764
+ repeat_cond_X = cond_X + 5
765
+ repeat_cond_model = cond_model .condition_on_observations (
766
+ repeat_cond_X , cond_Y , noise = cond_Yvar
767
+ )
768
+ self .assertEqual (
769
+ repeat_cond_model .train_inputs [0 ].shape ,
770
+ torch .Size ([num_models , num_train + 2 * num_cond , num_dims ]),
771
+ )
772
+
773
+ # test repeated conditioning without a batch size
774
+ repeat_cond_X_nobatch = cond_X_nobatch + 10
775
+ repeat_cond_model2 = repeat_cond_model .condition_on_observations (
776
+ repeat_cond_X_nobatch , cond_Y_nobatch , noise = cond_Yvar
777
+ )
778
+ self .assertEqual (
779
+ repeat_cond_model2 .train_inputs [0 ].shape ,
780
+ torch .Size ([num_models , num_train + 3 * num_cond , num_dims ]),
781
+ )
782
+
783
+ def test_fantasize (self ):
784
+ num_models = 3
785
+ fantasy_size = 19
786
+ num_cond = 2
787
+ for infer_noise , dtype in itertools .product (
788
+ (True , False ), (torch .float , torch .double )
789
+ ):
790
+ tkwargs = {"device" : self .device , "dtype" : dtype }
791
+ train_X , train_Y , train_Yvar , _ = self ._get_unnormalized_data (
792
+ infer_noise = infer_noise , ** tkwargs
793
+ )
794
+ num_train , num_dims = train_X .shape
795
+
796
+ # fantasized X should not have a batch dim
797
+ fantasy_X , fantasy_Yvar = self ._get_unnormalized_fantasy_data (
798
+ infer_noise = infer_noise , num_cond = num_cond , ** tkwargs
799
+ )
800
+ model = SaasFullyBayesianSingleTaskGP (
801
+ train_X = train_X ,
802
+ train_Y = train_Y ,
803
+ train_Yvar = train_Yvar ,
804
+ )
805
+ mcmc_samples = self ._get_mcmc_samples (
806
+ num_samples = num_models ,
807
+ dim = train_X .shape [- 1 ],
808
+ infer_noise = infer_noise ,
809
+ ** tkwargs
810
+ )
811
+ model .load_mcmc_samples (mcmc_samples )
812
+ sampler = SobolQMCNormalSampler (torch .Size ([fantasy_size ]))
813
+ fantasy_model = model .fantasize (
814
+ fantasy_X , sampler , observation_noise = fantasy_Yvar
815
+ )
816
+ self .assertEqual (
817
+ fantasy_model .train_inputs [0 ].shape ,
818
+ torch .Size ([fantasy_size , num_models , num_train + num_cond , num_dims ]),
819
+ )
739
820
740
821
def test_bisect (self ):
741
822
def f (x ):
0 commit comments