Skip to content

Commit

Permalink
Not scale gradient when using Hessian
Browse files Browse the repository at this point in the history
  • Loading branch information
rolfjl committed Sep 15, 2023
1 parent ac07ca4 commit e4db972
Showing 1 changed file with 8 additions and 5 deletions.
13 changes: 8 additions & 5 deletions popt/update_schemes/enopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,14 @@ def calc_update(self, iteration, logger=None):
# Augment state
aug_state = ot.aug_optim_state(current_state, list_states)

# Compute the steepest ascent step. Scale the gradient with 2-norm (or inf-norm: np.inf)
normalize = np.maximum(la.norm(self.sens_matrix, np.inf), 1e-12)
search_direction = self.sens_matrix / normalize
H = 1
# Compute the steepest ascent step
if self.hessian:
search_direction = self.sens_matrix # choose to not normalize when using Hessian
H = 1 / np.diag(self.cov_sens_matrix)
search_direction *= H
search_direction *= H
else:
normalize = np.maximum(la.norm(self.sens_matrix, np.inf), 1e-12)
search_direction = self.sens_matrix / normalize # scale the gradient with 2-norm (or inf-norm: np.inf)
aug_state_upd = self.optimizer.apply_update(aug_state, search_direction, iter=iteration)

# Make sure update is within bounds
Expand Down Expand Up @@ -427,3 +428,5 @@ def save_analysis_debug(self, iteration):
if not os.path.exists(folder):
os.mkdir(folder)

# Save the variables
np.savez('debug_analysis_step_{0}'.format(str(iteration)), **save_dict)

0 comments on commit e4db972

Please sign in to comment.