Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename BackwardMode.FULL --> UNROLL and simplify backward mode config #305

Merged
merged 2 commits into from
Sep 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ outer_optimizer = torch.optim.RMSprop([phi], lr=0.001)
for epoch in range(10):
solution, info = layer.forward(
input_tensors={"x": phi.clone(), "v": torch.ones(1, 1)},
optimizer_kwargs={"backward_mode": th.BackwardMode.IMPLICIT})
optimizer_kwargs={"backward_mode": "implicit"})
outer_loss = torch.nn.functional.mse_loss(solution["v"], v_true)
outer_loss.backward()
outer_optimizer.step()
Expand Down
16 changes: 8 additions & 8 deletions examples/backward_modes.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def quad_error_fn(optim_vars, aux_vars):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.FULL,
"backward_mode": "unroll",
},
)

Expand All @@ -103,7 +103,7 @@ def quad_error_fn(optim_vars, aux_vars):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.IMPLICIT,
"backward_mode": "implicit",
},
)

Expand All @@ -117,7 +117,7 @@ def quad_error_fn(optim_vars, aux_vars):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.TRUNCATED,
"backward_mode": "truncated",
"backward_num_iterations": 5,
},
)
Expand All @@ -134,7 +134,7 @@ def quad_error_fn(optim_vars, aux_vars):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.DLM,
"backward_mode": "dlm",
"dlm_epsilon": 1e-3,
},
)
Expand Down Expand Up @@ -175,7 +175,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.FULL,
"backward_mode": "unroll",
},
)
times["fwd"].append(time.time() - start)
Expand All @@ -191,7 +191,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.IMPLICIT,
"backward_mode": "implicit",
},
)
start = time.time()
Expand All @@ -205,7 +205,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.TRUNCATED,
"backward_mode": "truncated",
"backward_num_iterations": 5,
},
)
Expand All @@ -220,7 +220,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.DLM,
"backward_mode": "dlm",
"dlm_epsilon": 1e-3,
},
)
Expand Down
9 changes: 1 addition & 8 deletions examples/bundle_adjustment.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,6 @@
import theseus as th
import theseus.utils.examples as theg

BACKWARD_MODE = {
"implicit": th.BackwardMode.IMPLICIT,
"full": th.BackwardMode.FULL,
"truncated": th.BackwardMode.TRUNCATED,
}


# Logger
log = logging.getLogger(__name__)

Expand Down Expand Up @@ -211,7 +204,7 @@ def run(cfg: omegaconf.OmegaConf, results_path: pathlib.Path):
optimizer_kwargs={
"verbose": cfg.inner_optim.verbose,
"track_err_history": cfg.inner_optim.track_err_history,
"backward_mode": BACKWARD_MODE[cfg.inner_optim.backward_mode],
"backward_mode": cfg.inner_optim.backward_mode,
"__keep_final_step_size__": cfg.inner_optim.keep_step_size,
},
)
Expand Down
7 changes: 1 addition & 6 deletions examples/homography_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,6 @@
FONT_SZ = 0.5
FONT_PT = (5, 15)

BACKWARD_MODE = {
"implicit": th.BackwardMode.IMPLICIT,
"full": th.BackwardMode.FULL,
"truncated": th.BackwardMode.TRUNCATED,
}

# Logger
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -406,7 +401,7 @@ def run(
"verbose": verbose,
"track_err_history": True,
"track_state_history": True,
"backward_mode": BACKWARD_MODE["implicit"],
"backward_mode": "implicit",
},
)
end_event.record()
Expand Down
14 changes: 1 addition & 13 deletions examples/pose_graph/pose_graph_synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,6 @@
from theseus.optimizer.linear import LinearSolver
from theseus.optimizer.linearization import Linearization

BACKWARD_MODE = {
"implicit": th.BackwardMode.IMPLICIT,
"full": th.BackwardMode.FULL,
"truncated": th.BackwardMode.TRUNCATED,
}

LINEARIZATION_MODE: Dict[str, Type[Linearization]] = {
"sparse": th.SparseLinearization,
"dense": th.DenseLinearization,
Expand Down Expand Up @@ -98,12 +92,6 @@ def run(
dtype = torch.float64
pr = cProfile.Profile()

BACKWARD_MODE = {
"implicit": th.BackwardMode.IMPLICIT,
"full": th.BackwardMode.FULL,
"truncated": th.BackwardMode.TRUNCATED,
}

LINEARIZATION_MODE: Dict[str, Type[Linearization]] = {
"sparse": th.SparseLinearization,
"dense": th.DenseLinearization,
Expand Down Expand Up @@ -232,7 +220,7 @@ def run_batch(batch_idx: int):
optimizer_kwargs={
"verbose": cfg.inner_optim.verbose,
"track_err_history": cfg.inner_optim.track_err_history,
"backward_mode": BACKWARD_MODE[cfg.inner_optim.backward_mode],
"backward_mode": cfg.inner_optim.backward_mode,
"__keep_final_step_size__": cfg.inner_optim.keep_step_size,
},
)
Expand Down
2 changes: 1 addition & 1 deletion examples/simple_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def error_fn(optim_vars, aux_vars): # returns y - v * exp(x)
for epoch in range(20):
solution, info = layer.forward(
input_tensors={"x": phi.clone(), "v": torch.ones(1, 1)},
optimizer_kwargs={"backward_mode": th.BackwardMode.IMPLICIT},
optimizer_kwargs={"backward_mode": "implicit"},
)
outer_loss = torch.nn.functional.mse_loss(solution["v"], v_true)
outer_loss.backward()
Expand Down
34 changes: 30 additions & 4 deletions theseus/optimizer/nonlinear/nonlinear_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import warnings
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, NoReturn, Optional, Type
from typing import Any, Callable, Dict, NoReturn, Optional, Type, Union

import numpy as np
import torch
Expand Down Expand Up @@ -53,10 +53,35 @@ class NonlinearOptimizerInfo(OptimizerInfo):


class BackwardMode(Enum):
FULL = 0
UNROLL = 0
IMPLICIT = 1
TRUNCATED = 2
DLM = 3
FULL = -1

@staticmethod
def resolve(key: Union[str, "BackwardMode"]) -> "BackwardMode":
if isinstance(key, BackwardMode):
if key == BackwardMode.FULL:
warnings.warn(
"BackwardMode.FULL is deprecated and will be "
"replaced by BackwardMode.UNROLL in future versions.",
DeprecationWarning,
)
return BackwardMode.UNROLL
return key

if not isinstance(key, str):
raise ValueError("Backward mode must be th.BackwardMode or string.")

try:
backward_mode = BackwardMode[key.upper()]
except KeyError:
raise ValueError(
f"Unrecognized backward mode f{key}."
f"Valid choices are full, implicit, truncated, dlm."
)
return backward_mode


EndIterCallbackType = Callable[
Expand Down Expand Up @@ -351,10 +376,11 @@ def _optimize_impl(
track_err_history: bool = False,
track_state_history: bool = False,
verbose: bool = False,
backward_mode: BackwardMode = BackwardMode.FULL,
backward_mode: Union[str, BackwardMode] = BackwardMode.UNROLL,
end_iter_callback: Optional[EndIterCallbackType] = None,
**kwargs,
) -> OptimizerInfo:
backward_mode = BackwardMode.resolve(backward_mode)
with torch.no_grad():
info = self._init_info(
track_best_solution, track_err_history, track_state_history
Expand All @@ -366,7 +392,7 @@ def _optimize_impl(
f"Error: {info.last_err.mean().item()}"
)

if backward_mode in [BackwardMode.FULL, BackwardMode.DLM]:
if backward_mode in [BackwardMode.UNROLL, BackwardMode.DLM]:
self._optimize_loop(
start_iter=0,
num_iter=self.params.max_iterations,
Expand Down
4 changes: 2 additions & 2 deletions theseus/optimizer/nonlinear/tests/test_backwards.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.FULL,
"backward_mode": "unroll",
},
)
da_dx_full = torch.autograd.grad(updated_inputs["a"], data_x, retain_graph=True)[
Expand All @@ -111,7 +111,7 @@ def fit_x(data_x_np):
optimizer_kwargs={
"track_best_solution": True,
"verbose": False,
"backward_mode": th.BackwardMode.TRUNCATED,
"backward_mode": "TRUNCATED",
mhmukadam marked this conversation as resolved.
Show resolved Hide resolved
"backward_num_iterations": 5,
},
)
Expand Down
2 changes: 1 addition & 1 deletion theseus/tests/test_dlm_perturbation.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def test_backward_pass_se3_runs():
out, _ = layer.forward(
{"target": target_data},
optimizer_kwargs={
"backward_mode": th.BackwardMode.DLM,
"backward_mode": "dlm",
"verbose": False,
},
)
Expand Down
5 changes: 4 additions & 1 deletion theseus/theseus_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,10 @@ def forward(
"currently not supported."
)
optimizer_kwargs = optimizer_kwargs or {}
backward_mode = optimizer_kwargs.get("backward_mode", None)
# Defaults to "unroll" to avoid error, we only care to see if it's not dlm.
backward_mode = BackwardMode.resolve(
optimizer_kwargs.get("backward_mode", "unroll")
)
if backward_mode == BackwardMode.DLM:
dlm_epsilon = optimizer_kwargs.get(
TheseusLayerDLMForward._DLM_EPSILON_STR, 1e-2
Expand Down
2 changes: 1 addition & 1 deletion theseus/utils/examples/tactile_pose_estimation/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def _resolve_backward_mode(self, epoch: int) -> th.BackwardMode:
logger.info("Forcing IMPLICIT backward mode.")
return th.BackwardMode.IMPLICIT
else:
return getattr(th.BackwardMode, self.cfg.inner_optim.backward_mode)
return self.cfg.inner_optim.backward_mode

def compute_loss(
self, epoch: int, update: bool = True
Expand Down