Skip to content

Commit

Permalink
Moved the method that retracts all variables with a given delta to Ob…
Browse files Browse the repository at this point in the history
…jective (#214)

* Moved the method that retracts all variables with a given delta to Objective.

* Renamed step_optim_vars as retract_optim_vars.
  • Loading branch information
luisenp authored Jun 14, 2022
1 parent 1be3ead commit 649d3fb
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 33 deletions.
17 changes: 17 additions & 0 deletions theseus/core/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,3 +503,20 @@ def to(self, *args, **kwargs):
self.dtype = dtype or self.dtype
if self._vectorization_to is not None:
self._vectorization_to(*args, **kwargs)

def retract_optim_vars(
self,
delta: torch.Tensor,
ordering: Iterable[Manifold],
ignore_mask: Optional[torch.Tensor] = None,
force_update: bool = False,
):
var_idx = 0
for var in ordering:
new_var = var.retract(delta[:, var_idx : var_idx + var.dof()])
if ignore_mask is None or force_update:
var.update(new_var.data)
else:
var.update(new_var.data, batch_ignore_mask=ignore_mask)
var_idx += var.dof()
self.update_vectorization()
13 changes: 3 additions & 10 deletions theseus/optimizer/linear/linear_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,17 +69,10 @@ def _optimize_impl(
warnings.warn(msg, RuntimeWarning)
info.status[:] = LinearOptimizerStatus.FAIL
return info
self.retract_and_update_variables(delta)
self.objective.retract_optim_vars(
delta, self.linear_solver.linearization.ordering
)
info.status[:] = LinearOptimizerStatus.CONVERGED
for var in self.linear_solver.linearization.ordering:
info.best_solution[var.name] = var.data.clone().cpu()
return info

# retracts all variables in the given order and updates their values
# with the result
def retract_and_update_variables(self, delta: torch.Tensor):
var_idx = 0
for var in self.linear_solver.linearization.ordering:
new_var = var.retract(delta[:, var_idx : var_idx + var.dof()])
var.update(new_var.data)
var_idx += var.dof()
26 changes: 5 additions & 21 deletions theseus/optimizer/nonlinear/nonlinear_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,11 @@ def _optimize_loop(
step_size = self.params.step_size
force_update = False

self.retract_and_update_variables(
delta, converged_indices, step_size, force_update=force_update
self.objective.retract_optim_vars(
delta * step_size,
self.linear_solver.linearization.ordering,
ignore_mask=converged_indices,
force_update=force_update,
)

# check for convergence
Expand Down Expand Up @@ -376,22 +379,3 @@ def _optimize_impl(
@abc.abstractmethod
def compute_delta(self, **kwargs) -> torch.Tensor:
pass

# retracts all variables in the given order and updates their values
# with the result
def retract_and_update_variables(
self,
delta: torch.Tensor,
converged_indices: torch.Tensor,
step_size: float,
force_update: bool = False,
):
var_idx = 0
delta = step_size * delta
for var in self.linear_solver.linearization.ordering:
new_var = var.retract(delta[:, var_idx : var_idx + var.dof()])
if force_update:
var.update(new_var.data)
else:
var.update(new_var.data, batch_ignore_mask=converged_indices)
var_idx += var.dof()
4 changes: 2 additions & 2 deletions theseus/theseus_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ def backward(ctx, *grad_outputs):
with torch.no_grad():
bwd_optimizer.linear_solver.linearization.linearize()
delta = bwd_optimizer.linear_solver.solve()
bwd_optimizer.retract_and_update_variables(
delta, None, 1.0, force_update=True
bwd_optimizer.objective.retract_optim_vars(
delta, bwd_optimizer.linear_solver.linearization.ordering
)

# Compute gradients.
Expand Down

0 comments on commit 649d3fb

Please sign in to comment.