Skip to content

Commit 40b49fe

Browse files
authored
Updating search space (#156)
* Updating search space * fix typo * Bug fix * Fixing buggy implementation of predict when using gpu
1 parent f4c1fe2 commit 40b49fe

File tree

11 files changed

+38
-24
lines changed

11 files changed

+38
-24
lines changed

autoPyTorch/datasets/resampling_strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def holdout_validation(val_share: float, indices: np.ndarray, **kwargs: Any) ->
9797

9898
def stratified_holdout_validation(val_share: float, indices: np.ndarray, **kwargs: Any) \
9999
-> Tuple[np.ndarray, np.ndarray]:
100-
train, val = train_test_split(indices, test_size=val_share, shuffle=False, stratify=kwargs["stratify"])
100+
train, val = train_test_split(indices, test_size=val_share, shuffle=True, stratify=kwargs["stratify"])
101101
return train, val
102102

103103

autoPyTorch/pipeline/components/setup/network/base_network.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ def predict(self, loader: torch.utils.data.DataLoader) -> torch.Tensor:
125125
return Y_snapshot_preds_tensor.mean(dim=0).cpu().numpy()
126126

127127
def _predict(self, network: torch.nn.Module, loader: torch.utils.data.DataLoader) -> torch.Tensor:
128+
network.to(self.device)
128129
network.float()
129130
network.eval()
130131
# Batch prediction
@@ -133,10 +134,10 @@ def _predict(self, network: torch.nn.Module, loader: torch.utils.data.DataLoader
133134
for i, (X_batch, Y_batch) in enumerate(loader):
134135
# Predict on batch
135136
X_batch = X_batch.float().to(self.device)
136-
Y_batch_pred = network(X_batch).detach().cpu()
137+
Y_batch_pred = network(X_batch)
137138
if self.final_activation is not None:
138139
Y_batch_pred = self.final_activation(Y_batch_pred)
139-
Y_batch_preds.append(Y_batch_pred)
140+
Y_batch_preds.append(Y_batch_pred.detach().cpu())
140141

141142
return torch.cat(Y_batch_preds, 0)
142143

autoPyTorch/pipeline/components/setup/network_backbone/MLPBackbone.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
7373
activation: Tuple[Tuple, str] = (tuple(_activations.keys()),
7474
list(_activations.keys())[0]),
7575
use_dropout: Tuple[Tuple, bool] = ((True, False), False),
76-
num_units: Tuple[Tuple, int] = ((10, 1024), 200),
76+
num_units: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
7777
dropout: Tuple[Tuple, float] = ((0, 0.8), 0.5)
7878
) -> ConfigurationSpace:
7979

@@ -102,7 +102,8 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
102102
n_units_hp = UniformIntegerHyperparameter("num_units_%d" % i,
103103
lower=num_units[0][0],
104104
upper=num_units[0][1],
105-
default_value=num_units[1])
105+
default_value=num_units[1],
106+
log=num_units[2],)
106107
cs.add_hyperparameter(n_units_hp)
107108

108109
if i > min_mlp_layers:

autoPyTorch/pipeline/components/setup/network_backbone/ResNetBackbone.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
9797
use_batch_norm: Tuple[Tuple, bool] = ((True, False), True),
9898
use_dropout: Tuple[Tuple, bool] = ((True, False), False),
9999
use_skip_connection: Tuple[Tuple, bool] = ((True, False), True),
100-
num_units: Tuple[Tuple, int] = ((10, 1024), 200),
100+
num_units: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
101101
activation: Tuple[Tuple, str] = (tuple(_activations.keys()),
102102
list(_activations.keys())[0]),
103103
blocks_per_group: Tuple[Tuple, int] = ((1, 4), 2),
@@ -162,7 +162,8 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
162162
"num_units_%d" % i,
163163
lower=num_units[0][0],
164164
upper=num_units[0][1],
165-
default_value=num_units[1]
165+
default_value=num_units[1],
166+
log=num_units[2],
166167
)
167168
blocks_per_group = UniformIntegerHyperparameter(
168169
"blocks_per_group_%d" % i,

autoPyTorch/pipeline/components/setup/network_backbone/ShapedMLPBackbone.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
7979
num_groups: Tuple[Tuple, int] = ((1, 15), 5),
8080
max_dropout: Tuple[Tuple, float] = ((0, 1), 0.5),
8181
use_dropout: Tuple[Tuple, bool] = ((True, False), False),
82-
max_units: Tuple[Tuple, int] = ((10, 1024), 200),
83-
output_dim: Tuple[Tuple, int] = ((10, 1024), 200),
82+
max_units: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
83+
output_dim: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
8484
mlp_shape: Tuple[Tuple, str] = (('funnel', 'long_funnel',
8585
'diamond', 'hexagon',
8686
'brick', 'triangle', 'stairs'), 'funnel'),
@@ -104,18 +104,21 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
104104
default_value=activation[1]
105105
)
106106
(min_num_units, max_num_units), default_units = max_units[:2]
107+
log_nr_max_units = max_units[2]
107108
max_units = UniformIntegerHyperparameter(
108109
"max_units",
109110
lower=min_num_units,
110111
upper=max_num_units,
111112
default_value=default_units,
113+
log=log_nr_max_units,
112114
)
113115

114116
output_dim = UniformIntegerHyperparameter(
115117
"output_dim",
116118
lower=output_dim[0][0],
117119
upper=output_dim[0][1],
118-
default_value=output_dim[1]
120+
default_value=output_dim[1],
121+
log=output_dim[2],
119122
)
120123

121124
cs.add_hyperparameters([num_groups, activation, mlp_shape, max_units, output_dim])

autoPyTorch/pipeline/components/setup/network_backbone/ShapedResNetBackbone.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
8585
use_batch_norm: Tuple[Tuple, bool] = ((True, False), True),
8686
use_dropout: Tuple[Tuple, bool] = ((True, False), False),
8787
use_skip_connection: Tuple[Tuple, bool] = ((True, False), True),
88-
max_units: Tuple[Tuple, int] = ((10, 1024), 200),
88+
max_units: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
8989
blocks_per_group: Tuple[Tuple, int] = ((1, 4), 2),
9090
max_dropout: Tuple[Tuple, float] = ((0, 0.8), 0.5),
9191
multi_branch_choice: Tuple[Tuple, str] = (('shake-drop', 'shake-shake',
@@ -96,7 +96,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
9696
'brick', 'triangle', 'stairs'), 'funnel'),
9797
activation: Tuple[Tuple, str] = (
9898
tuple(_activations.keys()), list(_activations.keys())[0]),
99-
output_dim: Tuple[Tuple, int] = ((10, 1024), 200),
99+
output_dim: Tuple[Tuple, int, bool] = ((10, 1024), 200, True),
100100
) -> ConfigurationSpace:
101101
cs = ConfigurationSpace()
102102

@@ -124,11 +124,13 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
124124
default_value=activation[1]
125125
)
126126
(min_num_units, max_num_units), default_units = max_units[:2]
127+
log_num_units = max_units[2]
127128
output_dim = UniformIntegerHyperparameter(
128129
"output_dim",
129130
lower=output_dim[0][0],
130131
upper=output_dim[0][1],
131-
default_value=output_dim[1]
132+
default_value=output_dim[1],
133+
log=output_dim[2],
132134
)
133135

134136
cs.add_hyperparameters([num_groups, blocks_per_group, activation, output_dim])
@@ -161,7 +163,8 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
161163
"max_units",
162164
lower=min_num_units,
163165
upper=max_num_units,
164-
default_value=default_units
166+
default_value=default_units,
167+
log=log_num_units,
165168
)
166169
cs.add_hyperparameters([max_units])
167170

autoPyTorch/pipeline/components/setup/optimizer/AdamOptimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
8383
beta1: Tuple[Tuple, float] = ((0.85, 0.999), 0.9),
8484
beta2: Tuple[Tuple, float] = ((0.9, 0.9999), 0.9),
8585
use_weight_decay: Tuple[Tuple, bool] = ((True, False), True),
86-
weight_decay: Tuple[Tuple, float] = ((0.0, 0.1), 0.0)
86+
weight_decay: Tuple[Tuple, float, bool] = ((1E-7, 0.1), 1E-4, True)
8787
) -> ConfigurationSpace:
8888

8989
cs = ConfigurationSpace()
@@ -105,7 +105,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
105105
)
106106

107107
weight_decay = UniformFloatHyperparameter('weight_decay', lower=weight_decay[0][0], upper=weight_decay[0][1],
108-
default_value=weight_decay[1])
108+
default_value=weight_decay[1], log=weight_decay[2])
109109

110110
cs.add_hyperparameters([lr, beta1, beta2, use_wd, weight_decay])
111111

autoPyTorch/pipeline/components/setup/optimizer/AdamWOptimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
8383
beta1: Tuple[Tuple, float] = ((0.85, 0.999), 0.9),
8484
beta2: Tuple[Tuple, float] = ((0.9, 0.9999), 0.9),
8585
use_weight_decay: Tuple[Tuple, bool] = ((True, False), True),
86-
weight_decay: Tuple[Tuple, float] = ((0.0, 0.1), 0.0)
86+
weight_decay: Tuple[Tuple, float, bool] = ((1E-7, 0.1), 1E-4, True)
8787
) -> ConfigurationSpace:
8888

8989
cs = ConfigurationSpace()
@@ -105,7 +105,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
105105
)
106106

107107
weight_decay = UniformFloatHyperparameter('weight_decay', lower=weight_decay[0][0], upper=weight_decay[0][1],
108-
default_value=weight_decay[1])
108+
default_value=weight_decay[1], log=weight_decay[2])
109109

110110
cs.add_hyperparameters([lr, beta1, beta2, weight_decay, use_wd])
111111

autoPyTorch/pipeline/components/setup/optimizer/RMSpropOptimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
8585
lr: Tuple[Tuple, float, bool] = ((1e-5, 1e-1), 1e-2, True),
8686
alpha: Tuple[Tuple, float] = ((0.1, 0.99), 0.99),
8787
use_weight_decay: Tuple[Tuple, bool] = ((True, False), True),
88-
weight_decay: Tuple[Tuple, float] = ((0.0, 0.1), 0.0),
88+
weight_decay: Tuple[Tuple, float, bool] = ((1E-7, 0.1), 1E-4, True),
8989
momentum: Tuple[Tuple, float] = ((0.0, 0.99), 0.0),
9090
) -> ConfigurationSpace:
9191

@@ -105,7 +105,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
105105
)
106106

107107
weight_decay = UniformFloatHyperparameter('weight_decay', lower=weight_decay[0][0], upper=weight_decay[0][1],
108-
default_value=weight_decay[1])
108+
default_value=weight_decay[1], log=weight_decay[2])
109109

110110
momentum = UniformFloatHyperparameter('momentum', lower=momentum[0][0], upper=momentum[0][1],
111111
default_value=momentum[1])

autoPyTorch/pipeline/components/setup/optimizer/SGDOptimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def get_properties(dataset_properties: Optional[Dict[str, Any]] = None) -> Dict[
7777
def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
7878
lr: Tuple[Tuple, float, bool] = ((1e-5, 1e-1), 1e-2, True),
7979
use_weight_decay: Tuple[Tuple, bool] = ((True, False), True),
80-
weight_decay: Tuple[Tuple, float] = ((0.0, 0.1), 0.0),
80+
weight_decay: Tuple[Tuple, float, bool] = ((1E-7, 0.1), 1E-4, True),
8181
momentum: Tuple[Tuple, float] = ((0.0, 0.99), 0.0),
8282
) -> ConfigurationSpace:
8383

@@ -94,7 +94,7 @@ def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
9494
)
9595

9696
weight_decay = UniformFloatHyperparameter('weight_decay', lower=weight_decay[0][0], upper=weight_decay[0][1],
97-
default_value=weight_decay[1])
97+
default_value=weight_decay[1], log=weight_decay[2])
9898

9999
momentum = UniformFloatHyperparameter('momentum', lower=momentum[0][0], upper=momentum[0][1],
100100
default_value=momentum[1])

autoPyTorch/pipeline/components/training/data_loader/base_data_loader.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,10 +249,15 @@ def get_torchvision_datasets(self) -> Dict[str, torchvision.datasets.VisionDatas
249249

250250
@staticmethod
251251
def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
252-
batch_size: Tuple[Tuple, int] = ((32, 320), 64)
252+
batch_size: Tuple[Tuple, int, bool] = ((32, 320), 64, True)
253253
) -> ConfigurationSpace:
254254
batch_size = UniformIntegerHyperparameter(
255-
"batch_size", batch_size[0][0], batch_size[0][1], default_value=batch_size[1])
255+
"batch_size",
256+
batch_size[0][0],
257+
batch_size[0][1],
258+
default_value=batch_size[1],
259+
log=batch_size[2],
260+
)
256261
cs = ConfigurationSpace()
257262
cs.add_hyperparameters([batch_size])
258263
return cs

0 commit comments

Comments
 (0)