From 1a4d4d799dd69491f30e13428cd556d8e2623993 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 15:57:05 +0100 Subject: [PATCH 1/7] Enable CI for other branches --- .github/workflows/deploy-docs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index ad207fd..0165a18 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -6,8 +6,8 @@ name: Deploy docs # build the documentation whenever there are new commits on main on: push: - branches: - - main + # branches: + # - main # Alternative: only build for tags. # tags: # - '*' From 4e866b624dc219b8ae1500b160fd8cc59008dd44 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 15:55:33 +0100 Subject: [PATCH 2/7] Format --- setup.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 9775744..66aa2cb 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,25 @@ author='', author_email='krfo@norceresearch.no', description='Python Ensemble Toolbox', - install_requires=['numpy', 'scipy', 'matplotlib', 'h5py', 'mako', 'tqdm', 'PyWavelets', - 'psutil', 'pdoc', 'pytest', 'pandas', 'p_tqdm', 'mat73', 'opencv-python', 'rips', 'tomli', - 'tomli-w', 'pyyaml', 'libecalc'], + install_requires=[ + 'numpy', + 'scipy', + 'matplotlib', + 'h5py', + 'mako', + 'tqdm', + 'PyWavelets', + 'psutil', + 'pdoc', + 'pytest', + 'pandas', + 'p_tqdm', + 'mat73', + 'opencv-python', + 'rips', + 'tomli', + 'tomli-w', + 'pyyaml', + 'libecalc', + ], ) From 06fe81254731c80aa94822ef7422e2b7af02e833 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 17:43:30 +0100 Subject: [PATCH 3/7] Use fork that pinpoints malformed docstring --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 66aa2cb..c3c9f30 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ 'tqdm', 'PyWavelets', 'psutil', - 'pdoc', + 'pdoc @ git+https://github.com/patnr/pdoc@main', 'pytest', 'pandas', 'p_tqdm', From 965676880d0c787a502e88fd0bc7e58a15a68a61 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 18:03:49 +0100 Subject: [PATCH 4/7] Fix indentation causing pdoc/numpy crash --- popt/loop/dist.py | 117 +++++++++++----------- popt/loop/ensemble.py | 56 +++++------ popt/loop/optimize.py | 46 ++++----- popt/misc_tools/optim_tools.py | 90 ++++++++--------- popt/update_schemes/cma.py | 60 ++++++------ popt/update_schemes/enopt.py | 76 +++++++------- popt/update_schemes/genopt.py | 32 +++--- popt/update_schemes/optimizers.py | 158 +++++++++++++++--------------- popt/update_schemes/smcopt.py | 54 +++++----- 9 files changed, 344 insertions(+), 345 deletions(-) diff --git a/popt/loop/dist.py b/popt/loop/dist.py index 69cd38d..0ae26a9 100644 --- a/popt/loop/dist.py +++ b/popt/loop/dist.py @@ -15,22 +15,21 @@ def __init__(self, x, cov, theta0=[20.0, 20.0], func=None, ne=None): ''' Parameters ---------- - x : array_like, shape (d,) - Initial control vector. Used initally to get the dimensionality of the problem. - - cov : array_like, shape (d,d) - Initial covaraince matrix. Used to construct the correlation matrix and - epsilon parameter of GenOpt - - theta0 : list, of length 2 ([alpha, beta]) - Initial alpha and beta parameter of the marginal Beta distributions. - - func : callable (optional) - An objective function that can be used later for the gradeint. - Can also be passed directly to the gradeint fucntion. - - ne : int - + x : array_like, shape (d,) + Initial control vector. Used initally to get the dimensionality of the problem. + + cov : array_like, shape (d,d) + Initial covaraince matrix. Used to construct the correlation matrix and + epsilon parameter of GenOpt + + theta0 : list, of length 2 ([alpha, beta]) + Initial alpha and beta parameter of the marginal Beta distributions. + + func : callable (optional) + An objective function that can be used later for the gradeint. + Can also be passed directly to the gradeint fucntion. + + ne : int ''' self.dim = x.size # dimension of state self.corr = ot.cov2corr(cov) # initial correlation @@ -46,12 +45,12 @@ def update_distribution(self, theta, corr): Parameters ---------- - theta : array_like, shape (d,2) - Contains the alpha (first column) and beta (second column) - of the marginal distirbutions. + theta : array_like, shape (d,2) + Contains the alpha (first column) and beta (second column) + of the marginal distirbutions. - corr : array_like, shape (d,d) - Correlation matrix + corr : array_like, shape (d,d) + Correlation matrix ''' self.theta = theta self.corr = corr @@ -77,8 +76,8 @@ def sample(self, size): Parameters ---------- - size: int - Ensemble size (ne). Size of the sample to be drawn. + size: int + Ensemble size (ne). Size of the sample to be drawn. Returns ------- @@ -115,16 +114,16 @@ def epsilon_transformation(self, x, enX): Parameters ---------- - x : array_like, shape (d,) - Current state vector. - - enX : array_like, shape (ne,d) - Ensemble matrix X sampled from GenOpt distribution + x : array_like, shape (d,) + Current state vector. + + enX : array_like, shape (ne,d) + Ensemble matrix X sampled from GenOpt distribution Returns ------- - out : array_like, shape (ne,d) - Epsilon transfromed ensemble matrix, Y + out : array_like, shape (ne,d) + Epsilon transfromed ensemble matrix, Y ''' enY = np.zeros_like(enX) # tranfomred ensemble @@ -151,21 +150,21 @@ def gradient(self, x, *args, **kwargs): Parameters ---------- - x : array_like, shape (d,) - Current state vector. - - args : (theta, corr) - theta (parameters of distribution), shape (d,2) - corr (correlation matrix), shape (d,d) - - kwargs : - func : callable objectvie function - ne : ensemble size + x : array_like, shape (d,) + Current state vector. + args : (theta, corr) + theta (parameters of distribution), shape (d,2) + corr (correlation matrix), shape (d,d) + + kwargs : + func : callable objectvie function + ne : ensemble size + Returns ------- - out : array_like, shape (d,) - The average gradient. + out : array_like, shape (d,) + The average gradient. ''' # check for objective fucntion if 'func' in kwargs: @@ -236,16 +235,16 @@ def mutation_gradient(self, x=None, *args, **kwargs): Parameters ---------- - kwargs: - return_ensemble : bool - If True, all the ensemble matrices are also returned in a dictionary. + kwargs: + return_ensemble : bool + If True, all the ensemble matrices are also returned in a dictionary. Returns ------- - out : array_like, shape (d,2) - Mutation gradeint of theta - - NB! If return_ensembles=True, the ensmebles are also returned! + out : array_like, shape (d,2) + Mutation gradeint of theta + + NB! If return_ensembles=True, the ensmebles are also returned! ''' if 'return_ensembles' in kwargs: ensembles = {'gaussian' : self.enZ, @@ -279,16 +278,16 @@ def fisher_matrix(self, alpha, beta): Parameters ---------------------------------------------- - alpha : float - alpha parameter in Beta distribution + alpha : float + alpha parameter in Beta distribution - beta : float - beta parameter in Beta distribution + beta : float + beta parameter in Beta distribution Returns ---------------------------------------------- - out : array_like, of shape (2, 2) - Fisher matrix + out : array_like, of shape (2, 2) + Fisher matrix ''' a = alpha b = beta @@ -315,13 +314,13 @@ def delA(theta): Parameters -------------------------------------------- - a : float - b : float + a : float + b : float Returns -------------------------------------------- - out : float + out : float ''' a = theta[0] b = theta[1] - return digamma(a)-digamma(a+b) \ No newline at end of file + return digamma(a)-digamma(a+b) diff --git a/popt/loop/ensemble.py b/popt/loop/ensemble.py index 4544b55..c0fdf35 100644 --- a/popt/loop/ensemble.py +++ b/popt/loop/ensemble.py @@ -19,20 +19,20 @@ class Ensemble(PETEnsemble): Methods ------- - get_state() - Returns control vector as ndarray + get_state() + Returns control vector as ndarray - get_cov() - Returns the ensemble covariance matrix + get_cov() + Returns the ensemble covariance matrix - function(x,*args) - Objective function called during optimization + function(x,*args) + Objective function called during optimization - gradient(x,*args) - Ensemble gradient + gradient(x,*args) + Ensemble gradient - hessian(x,*args) - Ensemble hessian + hessian(x,*args) + Ensemble hessian """ @@ -176,13 +176,13 @@ def function(self, x, *args): Parameters ---------- - x : ndarray - Control vector, shape (number of controls, number of perturbations) + x : ndarray + Control vector, shape (number of controls, number of perturbations) Returns ------- - obj_func_values : numpy.ndarray - Objective function values, shape (number of perturbations, ) + obj_func_values : numpy.ndarray + Objective function values, shape (number of perturbations, ) """ self._aux_input() @@ -229,15 +229,15 @@ def gradient(self, x, *args): Parameters ---------- - x : ndarray - Control vector, shape (number of controls, ) + x : ndarray + Control vector, shape (number of controls, ) - args : tuple - Covarice (:math:`C_x`), shape (number of controls, number of controls) + args : tuple + Covarice (:math:`C_x`), shape (number of controls, number of controls) Returns ------- - gradient : numpy.ndarray + gradient : numpy.ndarray The gradient evaluated at x, shape (number of controls, ) """ @@ -297,13 +297,13 @@ def hessian(self, x=None, *args): Parameters ---------- - x : ndarray - Control vector, shape (number of controls, number of perturbations) + x : ndarray + Control vector, shape (number of controls, number of perturbations) Returns ------- - hessian: numpy.ndarray - The hessian evaluated at x, shape (number of controls, number of controls) + hessian: numpy.ndarray + The hessian evaluated at x, shape (number of controls, number of controls) References ---------- @@ -344,15 +344,15 @@ def calc_ensemble_weights(self, x, *args): Parameters ---------- - x : ndarray - Control vector, shape (number of controls, ) + x : ndarray + Control vector, shape (number of controls, ) - args : tuple - Inflation factor and covarice (:math:`C_x`), shape (number of controls, number of controls) + args : tuple + Inflation factor and covarice (:math:`C_x`), shape (number of controls, number of controls) Returns ------- - sens_matrix, best_ens, best_func : tuple + sens_matrix, best_ens, best_func : tuple The weighted ensemble, the best ensemble member, and the best objective function value """ diff --git a/popt/loop/optimize.py b/popt/loop/optimize.py index 2f3992b..00c9dc5 100644 --- a/popt/loop/optimize.py +++ b/popt/loop/optimize.py @@ -25,40 +25,40 @@ class Optimize: Attributes ---------- - logger : Logger - Print output to screen and log-file + logger : Logger + Print output to screen and log-file - pickle_restart_file : str - Save name for pickle dump/load + pickle_restart_file : str + Save name for pickle dump/load - optimize_result : OptimizeResult - Dictionary with results for the current iteration + optimize_result : OptimizeResult + Dictionary with results for the current iteration - iteration : int - Iteration index + iteration : int + Iteration index - max_iter : int - Max number of iterations + max_iter : int + Max number of iterations - restart : bool - Restart flag + restart : bool + Restart flag - restartsave : bool - Save restart information flag + restartsave : bool + Save restart information flag Methods ------- - run_loop() - The main optimization loop + run_loop() + The main optimization loop - save() - Save restart file + save() + Save restart file - load() - Load restart file + load() + Load restart file - calc_update() - Empty dummy function, actual functionality must be defined by the subclasses + calc_update() + Empty dummy function, actual functionality must be defined by the subclasses """ @@ -66,7 +66,7 @@ def __init__(self, **options): """ Parameters ---------- - options: dict + options: dict Optimization options """ diff --git a/popt/misc_tools/optim_tools.py b/popt/misc_tools/optim_tools.py index 72fc80a..1e07f43 100644 --- a/popt/misc_tools/optim_tools.py +++ b/popt/misc_tools/optim_tools.py @@ -72,16 +72,16 @@ def corr2BlockDiagonal(state, corr): Parameters ---------- - state: dict - Current control state, including state names + state: dict + Current control state, including state names - corr : array_like - Correlation matrix, of shape (d, d) + corr : array_like + Correlation matrix, of shape (d, d) Returns ------- - corr_blocks : list - block matrices, one for each variable type + corr_blocks : list + block matrices, one for each variable type """ @@ -108,22 +108,22 @@ def time_correlation(a, state, n_timesteps, dt=1.0): Parameters ------------------------------------------------------------- - a : float - Correlation coef, in range (0, 1). - - state : dict - Control state (represented in a dict). - - n_timesteps : int - Number of time-steps to correlate for each component. - - dt : float or int - Duration between each time-step. Default is 1. + a : float + Correlation coef, in range (0, 1). + + state : dict + Control state (represented in a dict). + + n_timesteps : int + Number of time-steps to correlate for each component. + dt : float or int + Duration between each time-step. Default is 1. + Returns ------------------------------------------------------------- - out : numpy.ndarray - Correlation matrix with time correlation + out : numpy.ndarray + Correlation matrix with time correlation """ dim_states = [int(state[name].size/n_timesteps) for name in list(state.keys())] blocks = [] @@ -152,13 +152,13 @@ def cov2corr(cov): Parameters ------------- - cov : array_like - The covaraince matrix, of shape (d,d). + cov : array_like + The covaraince matrix, of shape (d,d). Returns ------------- - out : numpy.ndarray - The correlation matrix, of shape (d,d) + out : numpy.ndarray + The correlation matrix, of shape (d,d) """ std = np.sqrt(np.diag(cov)) corr = np.divide(cov, np.outer(std, std)) @@ -171,16 +171,16 @@ def corr2cov(corr, std): Parameters ---------- - corr : array_like - The correlation matrix, of shape (d,d). + corr : array_like + The correlation matrix, of shape (d,d). - std : array_like - Array of the standard deviations, of shape (d, ). + std : array_like + Array of the standard deviations, of shape (d, ). Returns ------- - out : numpy.ndarray - The covaraince matrix, of shape (d,d) + out : numpy.ndarray + The covaraince matrix, of shape (d,d) """ cov = np.multiply(corr, np.outer(std, std)) return cov @@ -192,13 +192,13 @@ def get_sym_pos_semidef(a): Parameters ---------- - a : array_like - The input matrix, of shape (d,d) + a : array_like + The input matrix, of shape (d,d) Returns ------- - a : numpy.ndarray - The positive semidefinite matrix, of shape (d,d) + a : numpy.ndarray + The positive semidefinite matrix, of shape (d,d) """ rtol = 1e-05 @@ -218,16 +218,16 @@ def clip_state(x, bounds): Parameters ---------- - x : array_like - The input state + x : array_like + The input state - bounds : array_like - (min, max) pairs for each element in x. None is used to specify no bound. + bounds : array_like + (min, max) pairs for each element in x. None is used to specify no bound. Returns ------- - x : numpy.ndarray - The state after truncation + x : numpy.ndarray + The state after truncation """ if bounds is not None: @@ -245,13 +245,13 @@ def get_optimize_result(obj): Parameters ---------- - obj : popt.loop.optimize.Optimize - An instance of an optimization class + obj : popt.loop.optimize.Optimize + An instance of an optimization class Returns ------- - save_dict : scipy.optimize.OptimizeResult - The requested optimization results + save_dict : scipy.optimize.OptimizeResult + The requested optimization results """ # Initialize dictionary of variables to save @@ -288,8 +288,8 @@ def save_optimize_results(intermediate_result): Parameters ---------- - intermediate_result : scipy.optimize.OptimizeResult - An instance of an OptimizeResult class + intermediate_result : scipy.optimize.OptimizeResult + An instance of an OptimizeResult class """ # Cast to OptimizeResult if a ndarray is passed as argument diff --git a/popt/update_schemes/cma.py b/popt/update_schemes/cma.py index e634208..6e77845 100644 --- a/popt/update_schemes/cma.py +++ b/popt/update_schemes/cma.py @@ -9,39 +9,39 @@ def __init__(self, ne, dim, alpha_mu=None, n_mu=None, alpha_1=None, alpha_c=None Parameters ---------------------------------------------------------------------------------------------------------- - ne : int - Ensemble size - - dim : int - Dimensions of control vector - - alpha_mu : float - Learning rate for rank-mu update. If None, value proposed in [1] is used. - - n_mu : int, `n_mu < ne` - Number of best samples of ne, to be used for rank-mu update. - Default is int(ne/2). - - alpha_1 : float - Learning rate fro rank-one update. If None, value proposed in [1] is used. - - alpha_c : float - Parameter (inverse if backwards time horizen)for evolution path update - in the rank-one update. See [1] for more info. If None, value proposed in [1] is used. + ne : int + Ensemble size + + dim : int + Dimensions of control vector + + alpha_mu : float + Learning rate for rank-mu update. If None, value proposed in [1] is used. + + n_mu : int, `n_mu < ne` + Number of best samples of ne, to be used for rank-mu update. + Default is int(ne/2). + + alpha_1 : float + Learning rate fro rank-one update. If None, value proposed in [1] is used. + + alpha_c : float + Parameter (inverse if backwards time horizen)for evolution path update + in the rank-one update. See [1] for more info. If None, value proposed in [1] is used. - corr_update : bool - If True, CMA is used to update a correlation matrix. Default is False. - - equal_weights : bool - If True, all n_mu members are assign equal weighting, `w_i = 1/n_mu`. - If False, the weighting scheme proposed in [1], where `w_i = log(n_mu + 1)-log(i)`, - and normalized such that they sum to one. Defualt is True. + corr_update : bool + If True, CMA is used to update a correlation matrix. Default is False. + + equal_weights : bool + If True, all n_mu members are assign equal weighting, `w_i = 1/n_mu`. + If False, the weighting scheme proposed in [1], where `w_i = log(n_mu + 1)-log(i)`, + and normalized such that they sum to one. Defualt is True. References ---------------------------------------------------------------------------------------------------------- - [1] Hansen, N. (2006). The CMA evolution strategy: a comparing review. - In J. Lozano, P. Larranaga, I. Inza & E. Bengoetxea (ed.), Towards a new evolutionary computation. - Advances on estimation of distribution algorithms (pp. 75--102) . Springer . + [1] Hansen, N. (2006). The CMA evolution strategy: a comparing review. + In J. Lozano, P. Larranaga, I. Inza & E. Bengoetxea (ed.), Towards a new evolutionary computation. + Advances on estimation of distribution algorithms (pp. 75--102) . Springer . ''' self.alpha_mu = alpha_mu self.n_mu = n_mu @@ -130,4 +130,4 @@ def __call__(self, cov, step, X, J): C_one = self._rank_one(step) cov = (1 - a_one - a_mu)*cov + a_one*C_one + a_mu*C_mu - return cov \ No newline at end of file + return cov diff --git a/popt/update_schemes/enopt.py b/popt/update_schemes/enopt.py index 5e3ce41..4350103 100644 --- a/popt/update_schemes/enopt.py +++ b/popt/update_schemes/enopt.py @@ -23,8 +23,8 @@ class EnOpt(Optimize): Methods ------- - calc_update() - Update using steepest ascent method with ensemble gradient + calc_update() + Update using steepest ascent method with ensemble gradient References ---------- @@ -36,42 +36,42 @@ def __init__(self, fun, x, args, jac, hess, bounds=None, **options): """ Parameters ---------- - fun: callable - objective function - - x: ndarray - Initial state - - args: tuple - Initial covariance - - jac: callable - Gradient function - - hess: callable - Hessian function - - bounds: list, optional - (min, max) pairs for each element in x. None is used to specify no bound. - - options: dict - Optimization options - - - maxiter: maximum number of iterations (default 10) - - restart: restart optimization from a restart file (default false) - - restartsave: save a restart file after each successful iteration (defalut false) - - tol: convergence tolerance for the objective function (default 1e-6) - - alpha: step size for the steepest decent method (default 0.1) - - beta: momentum coefficient for running accelerated optimization (default 0.0) - - alpha_maxiter: maximum number of backtracing trials (default 5) - - resample: number indicating how many times resampling is tried if no improvement is found - - optimizer: 'GA' (gradient accent) or Adam (default 'GA') - - nesterov: use Nesterov acceleration if true (default false) - - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) - - normalize: normalize the gradient if true (default true) - - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) - - savedata: specify which class variables to save to the result files (state, objective - function value, iteration number, number of function evaluations, and number + fun: callable + objective function + + x: ndarray + Initial state + + args: tuple + Initial covariance + + jac: callable + Gradient function + + hess: callable + Hessian function + + bounds: list, optional + (min, max) pairs for each element in x. None is used to specify no bound. + + options: dict + Optimization options + + - maxiter: maximum number of iterations (default 10) + - restart: restart optimization from a restart file (default false) + - restartsave: save a restart file after each successful iteration (defalut false) + - tol: convergence tolerance for the objective function (default 1e-6) + - alpha: step size for the steepest decent method (default 0.1) + - beta: momentum coefficient for running accelerated optimization (default 0.0) + - alpha_maxiter: maximum number of backtracing trials (default 5) + - resample: number indicating how many times resampling is tried if no improvement is found + - optimizer: 'GA' (gradient accent) or Adam (default 'GA') + - nesterov: use Nesterov acceleration if true (default false) + - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) + - normalize: normalize the gradient if true (default true) + - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) + - savedata: specify which class variables to save to the result files (state, objective + function value, iteration number, number of function evaluations, and number of gradient evaluations, are always saved) """ diff --git a/popt/update_schemes/genopt.py b/popt/update_schemes/genopt.py index 966b45b..c746b21 100644 --- a/popt/update_schemes/genopt.py +++ b/popt/update_schemes/genopt.py @@ -17,28 +17,28 @@ def __init__(self, fun, x, args, jac, jac_mut, corr_adapt=None, bounds=None, **o """ Parameters ---------- - fun: callable - objective function + fun: callable + objective function - x: ndarray - Initial state + x: ndarray + Initial state - args: tuple - Initial covariance + args: tuple + Initial covariance - jac: callable - Gradient function + jac: callable + Gradient function - jac_mut: callable - Mutation gradient function - - corr_adapt : callable - Function for correalation matrix adaption + jac_mut: callable + Mutation gradient function + + corr_adapt : callable + Function for correalation matrix adaption - bounds: list, optional - (min, max) pairs for each element in x. None is used to specify no bound. + bounds: list, optional + (min, max) pairs for each element in x. None is used to specify no bound. - options: dict + options: dict Optimization options """ diff --git a/popt/update_schemes/optimizers.py b/popt/update_schemes/optimizers.py index d50a487..b52a4c0 100644 --- a/popt/update_schemes/optimizers.py +++ b/popt/update_schemes/optimizers.py @@ -13,45 +13,45 @@ class GradientAscent: Attributes ----------------------------------------------------------------------------------- - step_size : float - The initial step size provided during initialization. + step_size : float + The initial step size provided during initialization. - momentum : float - The initial momentum factor provided during initialization. + momentum : float + The initial momentum factor provided during initialization. - velocity : array_like - Current velocity of the optimization process. + velocity : array_like + Current velocity of the optimization process. - temp_velocity : array_like - Temporary velocity + temp_velocity : array_like + Temporary velocity - _step_size : float - Private attribute for temporarily modifying step size. + _step_size : float + Private attribute for temporarily modifying step size. - _momentum : float - Private attribute for temporarily modifying momentum. + _momentum : float + Private attribute for temporarily modifying momentum. Methods ----------------------------------------------------------------------------------- - apply_update(control, gradient, **kwargs): - Apply a gradient update to the control parameter. + apply_update(control, gradient, **kwargs): + Apply a gradient update to the control parameter. - apply_backtracking(): - Apply backtracking by reducing step size and momentum temporarily. + apply_backtracking(): + Apply backtracking by reducing step size and momentum temporarily. - restore_parameters(): - Restore the original step size and momentum values. + restore_parameters(): + Restore the original step size and momentum values. """ def __init__(self, step_size, momentum): r""" Arguments ----------------------------------------------------------------------------------- - step_size : float - The step size (learning rate) for the gradient ascent. + step_size : float + The step size (learning rate) for the gradient ascent. - momentum : float - The momentum factor to apply during updates. + momentum : float + The momentum factor to apply during updates. """ self.step_size = step_size @@ -69,19 +69,19 @@ def apply_update(self, control, gradient, **kwargs): Arguments ------------------------------------------------------------------------------------- - control : array_like - The current value of the parameter being optimized. + control : array_like + The current value of the parameter being optimized. - gradient : array_like - The gradient of the objective function with respect to the control parameter. + gradient : array_like + The gradient of the objective function with respect to the control parameter. - **kwargs : dict - Additional keyword arguments. + **kwargs : dict + Additional keyword arguments. Returns ------------------------------------------------------------------------------------- - new_control, temp_velocity: tuple - The new value of the control parameter after the update, and the current state step. + new_control, temp_velocity: tuple + The new value of the control parameter after the update, and the current state step. """ alpha = self._step_size beta = self._momentum @@ -97,19 +97,19 @@ def apply_smc_update(self, control, gradient, **kwargs): Arguments ------------------------------------------------------------------------------------- - control : array_like - The current value of the parameter being optimized. + control : array_like + The current value of the parameter being optimized. - gradient : array_like - The gradient of the objective function with respect to the control parameter. + gradient : array_like + The gradient of the objective function with respect to the control parameter. - **kwargs : dict - Additional keyword arguments. + **kwargs : dict + Additional keyword arguments. Returns ------------------------------------------------------------------------------------- - new_control: numpy.ndarray - The new value of the control parameter after the update. + new_control: numpy.ndarray + The new value of the control parameter after the update. """ alpha = self._step_size @@ -153,49 +153,49 @@ class Adam: Attributes ------------------------------------------------------------------------------------- - step_size : float - The initial step size provided during initialization. + step_size : float + The initial step size provided during initialization. - beta1 : float - The exponential decay rate for the first moment estimates. + beta1 : float + The exponential decay rate for the first moment estimates. - beta2 : float - The exponential decay rate for the second moment estimates. + beta2 : float + The exponential decay rate for the second moment estimates. - vel1 : 1-D array_like - First moment estimate. + vel1 : 1-D array_like + First moment estimate. - vel2 : 1-D array_like - Second moment estimate. + vel2 : 1-D array_like + Second moment estimate. - eps : float - Small constant to prevent division by zero. + eps : float + Small constant to prevent division by zero. - _step_size : float - Private attribute for temporarily modifying step size. + _step_size : float + Private attribute for temporarily modifying step size. - temp_vel1 : 1-D array_like - Temporary first moment estimate. + temp_vel1 : 1-D array_like + Temporary first moment estimate. - temp_vel2 : 1-D array_like - Temporary Second moment estimate. + temp_vel2 : 1-D array_like + Temporary Second moment estimate. Methods ------------------------------------------------------------------------------------- - apply_update(control, gradient, **kwargs): - Apply an Adam update to the control parameter. + apply_update(control, gradient, **kwargs): + Apply an Adam update to the control parameter. - apply_backtracking(): - Apply backtracking by reducing step size temporarily. + apply_backtracking(): + Apply backtracking by reducing step size temporarily. - restore_parameters(): - Restore the original step size. + restore_parameters(): + Restore the original step size. References ------------------------------------------------------------------------------------- - [1] Kingma, D. P., & Ba, J. (2014). - Adam: A Method for Stochastic Optimization. - arXiv preprint arXiv:1412.6980. + [1] Kingma, D. P., & Ba, J. (2014). + Adam: A Method for Stochastic Optimization. + arXiv preprint arXiv:1412.6980. """ def __init__(self, step_size, beta1=0.9, beta2=0.999): @@ -212,14 +212,14 @@ def __init__(self, step_size, beta1=0.9, beta2=0.999): Arguments ------------------------------------------------------------------------------------- - step_size : float - The step size (learning rate) for the optimization. + step_size : float + The step size (learning rate) for the optimization. - beta1 : float, optional - The exponential decay rate for the first moment estimates (default is 0.9). + beta1 : float, optional + The exponential decay rate for the first moment estimates (default is 0.9). - beta2 : float, optional - The exponential decay rate for the second moment estimates (default is 0.999). + beta2 : float, optional + The exponential decay rate for the second moment estimates (default is 0.999). """ self.step_size = step_size @@ -240,19 +240,19 @@ def apply_update(self, control, gradient, **kwargs): Arguments ------------------------------------------------------------------------------------- - control : array_like - The current value of the parameter being optimized. + control : array_like + The current value of the parameter being optimized. - gradient : array_like - The gradient of the objective function with respect to the control parameter. + gradient : array_like + The gradient of the objective function with respect to the control parameter. - **kwargs : dict - Additional keyword arguments, including 'iter' for the current iteration. + **kwargs : dict + Additional keyword arguments, including 'iter' for the current iteration. Returns ------------------------------------------------------------------------------------- - new_control, temp_velocity: tuple - The new value of the control parameter after the update, and the current state step. + new_control, temp_velocity: tuple + The new value of the control parameter after the update, and the current state step. """ iter = kwargs['iter'] alpha = self._step_size diff --git a/popt/update_schemes/smcopt.py b/popt/update_schemes/smcopt.py index 540a3e8..e575de2 100644 --- a/popt/update_schemes/smcopt.py +++ b/popt/update_schemes/smcopt.py @@ -20,33 +20,33 @@ def __init__(self, fun, x, args, sens, bounds=None, **options): """ Parameters ---------- - fun: callable - objective function - - x: ndarray - Initial state - - sens: callable - Ensemble sensitivity - - bounds: list, optional - (min, max) pairs for each element in x. None is used to specify no bound. - - options: dict - Optimization options - - - maxiter: maximum number of iterations (default 10) - - restart: restart optimization from a restart file (default false) - - restartsave: save a restart file after each successful iteration (defalut false) - - tol: convergence tolerance for the objective function (default 1e-6) - - alpha: weight between previous and new step (default 0.1) - - alpha_maxiter: maximum number of backtracing trials (default 5) - - resample: number indicating how many times resampling is tried if no improvement is found - - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) - - inflation_factor: term used to weight down prior influence (defalult 1) - - savedata: specify which class variables to save to the result files (state, objective function - value, iteration number, number of function evaluations, and number of gradient - evaluations, are always saved) + fun: callable + objective function + + x: ndarray + Initial state + + sens: callable + Ensemble sensitivity + + bounds: list, optional + (min, max) pairs for each element in x. None is used to specify no bound. + + options: dict + Optimization options + + - maxiter: maximum number of iterations (default 10) + - restart: restart optimization from a restart file (default false) + - restartsave: save a restart file after each successful iteration (defalut false) + - tol: convergence tolerance for the objective function (default 1e-6) + - alpha: weight between previous and new step (default 0.1) + - alpha_maxiter: maximum number of backtracing trials (default 5) + - resample: number indicating how many times resampling is tried if no improvement is found + - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) + - inflation_factor: term used to weight down prior influence (defalult 1) + - savedata: specify which class variables to save to the result files (state, objective function + value, iteration number, number of function evaluations, and number of gradient + evaluations, are always saved) """ # init PETEnsemble From e3c6eed94661d6b91a864b82e913fa36d3158e28 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 18:14:38 +0100 Subject: [PATCH 5/7] Improve guide for doc generation/preview/debug amend --- docs/dev_guide.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/dev_guide.md b/docs/dev_guide.md index 6aea7e0..fb69bbb 100644 --- a/docs/dev_guide.md +++ b/docs/dev_guide.md @@ -4,12 +4,14 @@ Many python libraries use `sphinx` to generate docs via `readthedocs` (also host This setup is too powerful (and therefore complicated) for our purposes. Instead, we use [pdoc](https://github.com/mitmproxy/pdoc), run via **GitHub Actions**, as configured [here](./.github/workflows/deploy-docs.yml). -The resulting `html` is hosted with **Github Pages**. + +.. warning:: dirs must contain `__init__.py` file to be recognized by `pdoc`. `pdoc` grabs the docstrings of modules/classes/functions, and renders them into pretty html. -The docstrings should be written using markdown syntax. +The resulting `html` is hosted with **Github Pages**. +The docstrings should be written using **markdown** syntax. In general, you should also try to [reference other objects](https://pdoc.dev/docs/pdoc.html#link-to-other-identifiers) (if appropriate) by using backticks. And if you want to do it really well, you should follow @@ -23,6 +25,20 @@ To *live preview* your changes, do pdoc -t docs/templates --docformat=numpy --math pipt popt misc ensemble simulator input_output docs/dev_guide.py ``` +This will probably open a browser window with the rendered html. +You can also ctrl/cmd-cick the printed localhost link, or simply copy-paste it into your browser. + +If you want to reproduce errors that occur in **CI**, you'll want to include the option `-o docs-generated `. +Since this actually generates html *files*, it will processes **all** of the files by default +(without which you might not pick up on the error). + +.. note:: PS: it seems that the upstream `pdoc` does not report where parsing errors occur + (it simply quits with a traceback). + We therefore use my (`patnr`) fork which + + - skips the markdown conversion, + - prints the specific docstring that causes issues. + ## Tests Th test suite is orchestrated using `pytest`. Both in **CI** and locally. From e91e48aa354ebc6551fead12c76c7b708032eb41 Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 18:50:21 +0100 Subject: [PATCH 6/7] Document cost_functions/ amend --- popt/cost_functions/__init__.py | 0 popt/cost_functions/ecalc_npv.py | 12 ++++++------ 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 popt/cost_functions/__init__.py diff --git a/popt/cost_functions/__init__.py b/popt/cost_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/popt/cost_functions/ecalc_npv.py b/popt/cost_functions/ecalc_npv.py index 6e4f8d4..81c1adb 100644 --- a/popt/cost_functions/ecalc_npv.py +++ b/popt/cost_functions/ecalc_npv.py @@ -4,11 +4,6 @@ from pathlib import Path import pandas as pd -from libecalc.core.ecalc import EnergyCalculator -from libecalc.common.time_utils import Frequency -from libecalc.input.model import YamlModel - - HERE = Path().cwd() # fallback for ipynb's HERE = HERE.resolve() @@ -34,6 +29,11 @@ def ecalc_npv(pred_data, keys_opt, report): Objective function values (NPV) for all ensemble members. """ + from libecalc.core.ecalc import EnergyCalculator + from libecalc.common.time_utils import Frequency + from libecalc.input.model import YamlModel + + # Economic values npv_const = {} for name, value in keys_opt['npv_const']: @@ -124,4 +124,4 @@ def results_as_df(yaml_model, results, getter) -> pd.DataFrame: df = pd.DataFrame(df, index=res.timesteps) df.index.name = "dates" df.attrs = attrs - return df \ No newline at end of file + return df From 051944bfb9067fc0ed3e9eeb71441014fce6ee9d Mon Sep 17 00:00:00 2001 From: patnr Date: Thu, 14 Dec 2023 18:51:52 +0100 Subject: [PATCH 7/7] Improve docstrings --- popt/update_schemes/optimizers.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/popt/update_schemes/optimizers.py b/popt/update_schemes/optimizers.py index b52a4c0..d9c7879 100644 --- a/popt/update_schemes/optimizers.py +++ b/popt/update_schemes/optimizers.py @@ -7,9 +7,11 @@ class GradientAscent: The gradient descent update equation with momentum is given by: .. math:: - v_t = \beta * v_{t-1} + \alpha * gradient + \begin{align} + v_t &= \beta * v_{t-1} + \alpha * gradient \\\ + x_t &= x_{t-1} - v_t + \end{align} - x_t = x_{t-1} - v_t Attributes ----------------------------------------------------------------------------------- @@ -45,8 +47,8 @@ class GradientAscent: def __init__(self, step_size, momentum): r""" - Arguments - ----------------------------------------------------------------------------------- + Parameters + ---------- step_size : float The step size (learning rate) for the gradient ascent. @@ -62,12 +64,13 @@ def __init__(self, step_size, momentum): self._step_size = step_size self._momentum = momentum + def apply_update(self, control, gradient, **kwargs): """ Apply a gradient update to the control parameter. NOTE: This is the steepest decent update: x_new = x_old - x_step. - Arguments + Parameters ------------------------------------------------------------------------------------- control : array_like The current value of the parameter being optimized. @@ -95,7 +98,7 @@ def apply_smc_update(self, control, gradient, **kwargs): """ Apply a gradient update to the control parameter. - Arguments + Parameters ------------------------------------------------------------------------------------- control : array_like The current value of the parameter being optimized. @@ -210,7 +213,7 @@ def __init__(self, step_size, beta1=0.9, beta2=0.999): v_t_hat = v_t / (1 - β2^t) \n x_{t+1} = x_t - α * m_t_hat / (sqrt(v_t_hat) + ε) - Arguments + Parameters ------------------------------------------------------------------------------------- step_size : float The step size (learning rate) for the optimization. @@ -238,7 +241,7 @@ def apply_update(self, control, gradient, **kwargs): Apply a gradient update to the control parameter. NOTE: This is the steepest decent update: x_new = x_old - x_step. - Arguments + Parameters ------------------------------------------------------------------------------------- control : array_like The current value of the parameter being optimized.