1
1
# bo
2
2
# author: Jungtaek Kim (jtkim@postech.ac.kr)
3
- # last updated: April 21 , 2020
3
+ # last updated: April 29 , 2020
4
4
5
5
import numpy as np
6
6
import time
15
15
cma = None
16
16
import sobol_seq
17
17
18
- from bayeso import gp
19
18
from bayeso import acquisition
19
+ from bayeso import constants
20
+ from bayeso .gp import gp
21
+ from bayeso .gp import gp_common
20
22
from bayeso .utils import utils_common
21
23
from bayeso .utils import utils_covariance
22
- from bayeso import constants
24
+ from bayeso .utils import utils_logger
25
+
26
+ logger = utils_logger .get_logger ('bo' )
23
27
24
28
25
29
def get_grids (arr_ranges , int_grids ):
@@ -110,17 +114,14 @@ def _check_optimizer_method_bo(str_optimizer_method_bo, num_dim, debug):
110
114
assert str_optimizer_method_bo in constants .ALLOWED_OPTIMIZER_METHOD_BO
111
115
112
116
if str_optimizer_method_bo == 'DIRECT' and directminimize is None : # pragma: no cover
113
- if debug :
114
- print ('[DEBUG] _check_optimizer_method_bo in bo.py: DIRECT is selected, but it is not installed.' )
117
+ logger .warning ('DIRECT is selected, but it is not installed.' )
115
118
str_optimizer_method_bo = 'L-BFGS-B'
116
119
elif str_optimizer_method_bo == 'CMA-ES' and cma is None : # pragma: no cover
117
- if debug :
118
- print ('[DEBUG] _check_optimizer_method_bo in bo.py: CMA-ES is selected, but it is not installed.' )
120
+ logger .warning ('CMA-ES is selected, but it is not installed.' )
119
121
str_optimizer_method_bo = 'L-BFGS-B'
120
122
# TODO: It should be checked.
121
123
elif str_optimizer_method_bo == 'CMA-ES' and num_dim == 1 : # pragma: no cover
122
- if debug :
123
- print ('[DEBUG] _check_optimizer_method_bo in bo.py: CMA-ES is selected, but a dimension of bounds is 1.' )
124
+ logger .warning ('CMA-ES is selected, but a dimension of bounds is 1.' )
124
125
str_optimizer_method_bo = 'L-BFGS-B'
125
126
return str_optimizer_method_bo
126
127
@@ -349,8 +350,7 @@ def _get_initial_sobol(self, int_samples, int_seed=None):
349
350
350
351
if int_seed is None :
351
352
int_seed = np .random .randint (0 , 10000 )
352
- if self .debug :
353
- print ('[DEBUG] _get_initial_sobol in bo.py: int_seed' , int_seed )
353
+ if self .debug : logger .debug ('seed: {}' .format (int_seed ))
354
354
arr_samples = sobol_seq .i4_sobol_generate (self .num_dim , int_samples , int_seed )
355
355
arr_samples = arr_samples * (self .arr_range [:, 1 ].flatten () - self .arr_range [:, 0 ].flatten ()) + self .arr_range [:, 0 ].flatten ()
356
356
return arr_samples
@@ -404,8 +404,7 @@ def get_initial(self, str_initial_method,
404
404
405
405
if str_initial_method == 'grid' :
406
406
assert fun_objective is not None
407
- if self .debug :
408
- print ('[DEBUG] get_initial in bo.py: int_samples is ignored, because grid is chosen.' )
407
+ if self .debug : logger .debug ('int_samples is ignored, because grid is chosen.' )
409
408
arr_initials = self ._get_initial_grid ()
410
409
arr_initials = get_best_acquisition (arr_initials , fun_objective )
411
410
elif str_initial_method == 'uniform' :
@@ -416,9 +415,9 @@ def get_initial(self, str_initial_method,
416
415
raise NotImplementedError ('get_initial: latin' )
417
416
else :
418
417
raise NotImplementedError ('get_initial: allowed str_initial_method, but it is not implemented.' )
419
- if self . debug :
420
- print ( '[DEBUG] get_initial in bo.py: arr_initials' )
421
- print ( arr_initials )
418
+
419
+ if self . debug : logger . debug ( ' arr_initials: \n {}' . format ( utils_logger . get_str_array ( arr_initials )) )
420
+
422
421
return arr_initials
423
422
424
423
def _optimize_objective (self , fun_acquisition , X_train , Y_train , X_test , cov_X_X , inv_cov_X_X , hyps ):
@@ -496,8 +495,7 @@ def _optimize(self, fun_negative_acquisition, str_initial_method, int_samples):
496
495
)
497
496
next_point_x = next_point .x
498
497
list_next_point .append (next_point_x )
499
- if self .debug :
500
- print ('[DEBUG] _optimize in bo.py: optimized point for acq' , next_point_x )
498
+ if self .debug : logger .debug ('acquired sample: {}' .format (utils_logger .get_str_array (next_point_x )))
501
499
elif self .str_optimizer_method_bo == 'DIRECT' : # pragma: no cover
502
500
list_bounds = self ._get_bounds ()
503
501
next_point = directminimize (
@@ -542,8 +540,8 @@ def optimize(self, X_train, Y_train,
542
540
:param str_mlm_method: the name of marginal likelihood maximization method for Gaussian process regression.
543
541
:type str_mlm_method: str., optional
544
542
545
- :returns: acquired example, candidates of acquired examples, acquisition function values over the candidates, covariance matrix by `hyps`, inverse matrix of the covariance matrix, hyperparameters optimized, and execution times . Shape: ((d, ), (`int_samples`, d), (`int_samples`, ), (n, n), (n, n), dict. , dict.).
546
- :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, dict., dict.)
543
+ :returns: acquired example and dictionary of information . Shape: ((d, ), dict.).
544
+ :rtype: (numpy.ndarray, dict.)
547
545
548
546
:raises: AssertionError
549
547
@@ -565,7 +563,7 @@ def optimize(self, X_train, Y_train,
565
563
566
564
time_start = time .time ()
567
565
568
- if self .is_normalized :
566
+ if self .is_normalized and not np . max ( Y_train ) == np . min ( Y_train ) :
569
567
Y_train = (Y_train - np .min (Y_train )) / (np .max (Y_train ) - np .min (Y_train )) * constants .MULTIPLIER_RESPONSE
570
568
571
569
time_start_gp = time .time ()
@@ -578,11 +576,9 @@ def optimize(self, X_train, Y_train,
578
576
cov_X_X , inv_cov_X_X , hyps = gp .get_optimized_kernel (X_train , Y_train , self .prior_mu , self .str_cov , str_optimizer_method = self .str_optimizer_method_gp , str_modelselection_method = self .str_modelselection_method , debug = self .debug )
579
577
self .is_optimize_hyps = not _check_hyps_convergence (self .historical_hyps , hyps , self .str_cov , is_fixed_noise )
580
578
else : # pragma: no cover
581
- print ( '[DEBUG] optimize in bo.py: hyps are converged.' )
579
+ if self . debug : logger . debug ( ' hyps converged.' )
582
580
hyps = self .historical_hyps [- 1 ]
583
- cov_X_X , inv_cov_X_X , _ = gp .get_kernel_inverse (X_train , hyps , self .str_cov , is_fixed_noise = is_fixed_noise , debug = self .debug )
584
- elif str_mlm_method == 'probabilistic' : # pragma: no cover
585
- raise NotImplementedError ('optimize: it will be added.' )
581
+ cov_X_X , inv_cov_X_X , _ = gp_common .get_kernel_inverse (X_train , hyps , self .str_cov , is_fixed_noise = is_fixed_noise , debug = self .debug )
586
582
else : # pragma: no cover
587
583
raise ValueError ('optimize: missing condition for str_mlm_method.' )
588
584
time_end_gp = time .time ()
@@ -600,13 +596,17 @@ def optimize(self, X_train, Y_train,
600
596
601
597
time_end = time .time ()
602
598
603
- times = {
604
- 'overall' : time_end - time_start ,
605
- 'gp' : time_end_gp - time_start_gp ,
606
- 'acq' : time_end_acq - time_start_acq ,
599
+ dict_info = {
600
+ 'next_points' : next_points ,
601
+ 'acquisitions' : acquisitions ,
602
+ 'cov_X_X' : cov_X_X ,
603
+ 'inv_cov_X_X' : inv_cov_X_X ,
604
+ 'hyps' : hyps ,
605
+ 'time_overall' : time_end - time_start ,
606
+ 'time_gp' : time_end_gp - time_start_gp ,
607
+ 'time_acq' : time_end_acq - time_start_acq ,
607
608
}
608
609
609
- if self .debug :
610
- print ('[DEBUG] optimize in bo.py: time consumed' , time_end - time_start , 'sec.' )
610
+ if self .debug : logger .debug ('overall time consumed to acquire: {:.4f} sec.' .format (time_end - time_start ))
611
611
612
- return next_point , next_points , acquisitions , cov_X_X , inv_cov_X_X , hyps , times
612
+ return next_point , dict_info
0 commit comments