Skip to content

Commit

Permalink
Some more cleanup before 0.1.3 (#353)
Browse files Browse the repository at this point in the history
* Fixed some test paths.

* Removed dependency on numdifftools.

* Fixed code causing warnings in unit tests.

* Replaced deprecated torch.triangular_solve with torch.linalg.solve_triangular.
  • Loading branch information
luisenp authored Nov 7, 2022
1 parent 65dc682 commit 3bbbb15
Show file tree
Hide file tree
Showing 18 changed files with 38 additions and 52 deletions.
5 changes: 0 additions & 5 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,6 @@ The motion planning and tactile estimation examples require `hydra` installation
```bash
pip install hydra-core
```
The backward modes example requires `numdifftools`, which you can install with

```bash
pip install numdifftools
```

The homography example requires `hydra`, `kornia` and `OpenCV`, which you can install with

Expand Down
28 changes: 14 additions & 14 deletions examples/backward_modes.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import time
from collections import defaultdict

import numdifftools as nd
import numpy as np
import torch

Expand Down Expand Up @@ -146,20 +145,21 @@ def quad_error_fn(optim_vars, aux_vars):


# Next we numerically check the derivative
def fit_x(data_x_np):
theseus_inputs["x"] = (
torch.from_numpy(data_x_np).float().clone().requires_grad_().unsqueeze(0)
with torch.no_grad():

def fn(data_x_torch):
theseus_inputs["x"] = data_x_torch
updated_inputs, _ = theseus_optim.forward(
theseus_inputs,
optimizer_kwargs={"track_best_solution": True, "verbose": False},
)
return updated_inputs["a"]

g = (
torch.autograd.functional.jacobian(fn, data_x.detach())[0, 0, 0]
.double()
.numpy()
)
updated_inputs, _ = theseus_optim.forward(
theseus_inputs, optimizer_kwargs={"track_best_solution": True, "verbose": False}
)
return updated_inputs["a"].item()


data_x_np = data_x.detach().clone().numpy()
dfit_x = nd.Gradient(fit_x)
g = dfit_x(data_x_np)

print("\n--- Numeric derivative")
print(g)

Expand Down
1 change: 0 additions & 1 deletion requirements/dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ pre-commit>=2.9.2
isort>=5.6.4
differentiable-robot-model>=0.2.3
types-PyYAML==5.4.3
numdifftools>=0.9.40
mock>=4.0.3
types-mock>=4.0.8
Sphinx==5.0.2
Expand Down
1 change: 0 additions & 1 deletion requirements/docs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ scipy>=1.5.3
scikit-sparse>=0.4.5
torch>=1.11
pytest>=6.2.1
numdifftools>=0.9.40
pybind11>=2.7.1
mock>=4.0.3
Sphinx==5.0.2
Expand Down
4 changes: 2 additions & 2 deletions tests/core/test_cost_weight.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_copy_scale_cost_weight():


def test_copy_diagonal_cost_weight():
diagonal = th.Variable(torch.ones(3))
diagonal = th.Variable(torch.ones(1, 3))
p1 = th.DiagonalCostWeight(diagonal, name="diagonal_cost_weight")
for the_copy in [p1.copy(), copy.deepcopy(p1)]:
check_another_theseus_function_is_copy(p1, the_copy, new_name=f"{p1.name}_copy")
Expand Down Expand Up @@ -89,9 +89,9 @@ def _check(cw):
assert error.allclose(expected_err)
assert jacobians[0].allclose(expected_jac)

diagonal = diagonal.unsqueeze(0) # add batch dimension
_check(th.DiagonalCostWeight(diagonal.tolist()))
_check(th.DiagonalCostWeight(diagonal))
_check(th.DiagonalCostWeight(diagonal.view(1, dim)))
_check(th.DiagonalCostWeight(th.Variable(diagonal)))

batched_diagonal = torch.randn(batch_size, dim)
Expand Down
4 changes: 2 additions & 2 deletions tests/core/test_vectorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def test_correct_schemas_and_shared_vars():
objective.add(cf6)

# Not grouped with anything cf1 and cf2 because weight type is different
w7 = th.DiagonalCostWeight([1.0])
w7 = th.DiagonalCostWeight([[1.0]])
cf7 = th.Difference(v1, tv, w7)
objective.add(cf7)

Expand Down Expand Up @@ -178,7 +178,7 @@ def test_vectorized_error():
for i in range(rng.choice([1, 10]))
]
s_target = th.SE3.rand(1, generator=generator)
ws = th.DiagonalCostWeight(torch.randn(6, generator=generator))
ws = th.DiagonalCostWeight(torch.randn(1, 6, generator=generator))
for s in se3s:
objective.add(th.Difference(s, s_target, ws))

Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion tests/embodied/collision/test_eff_obj_contact.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _create_sdf_data(sdf_idx=0):
cell_size = 0.01
origin_x, origin_y = 0.0, 0.0
sdf_data = np.loadtxt(
open("theseus/embodied/collision/tests/sdf_data.csv", "rb"),
open("tests/embodied/collision/sdf_data.csv", "rb"),
delimiter=",",
skiprows=0,
)
Expand Down
8 changes: 2 additions & 6 deletions tests/embodied/measurements/test_between.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ def test_error_between_so2():
).unsqueeze(1)

num_val = len(so2_data)
between_errors = np.load(
"theseus/embodied/measurements/tests/between_errors_so2.npy"
)
between_errors = np.load("tests/embodied/measurements/between_errors_so2.npy")
z = 0
# between_errors was generated using all 3-combinations of the 5 SO2 above
for i in range(num_val):
Expand All @@ -113,9 +111,7 @@ def test_error_between_se2():
]
)
num_val = len(se2_data)
between_errors = np.load(
"theseus/embodied/measurements/tests/between_errors_se2.npy"
)
between_errors = np.load("tests/embodied/measurements/between_errors_se2.npy")
z = 0
# between_errors was generated using all 3-combinations of the 5 SE2 above
for i in range(num_val):
Expand Down
File renamed without changes.
File renamed without changes.
4 changes: 2 additions & 2 deletions tests/embodied/misc/test_variable_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def test_error_local_cost_fn_so2():
).unsqueeze(1)

num_val = len(so2_data)
sq_dist_errors = np.load("theseus/embodied/misc/tests/sq_dist_errors_so2.npy")
sq_dist_errors = np.load("tests/embodied/misc/sq_dist_errors_so2.npy")
k = 0
# sq_dist_errors was generated using all 2-combinations of the 5 SO2 above
for i in range(num_val):
Expand All @@ -102,7 +102,7 @@ def test_error_local_cost_fn_se2():
]
)
num_val = len(se2_data)
sq_dist_errors = np.load("theseus/embodied/misc/tests/sq_dist_errors_se2.npy")
sq_dist_errors = np.load("tests/embodied/misc/sq_dist_errors_se2.npy")
k = 0
# sq_dist_errors was generated using all 2-combinations of the 5 SE2 above
for i in range(num_val):
Expand Down
27 changes: 12 additions & 15 deletions tests/optimizer/nonlinear/test_backwards.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import numdifftools as nd
import pytest # noqa: F401
import torch

Expand Down Expand Up @@ -63,21 +62,19 @@ def quad_error_fn(optim_vars, aux_vars):


def test_backwards():
# First we use numdifftools to numerically compute the gradient
# First we use torch.autograd.functional to numerically compute the gradient
# the optimal a w.r.t. the x part of the data
def fit_x(data_x_np):
theseus_inputs["x"] = (
torch.from_numpy(data_x_np).float().clone().requires_grad_().unsqueeze(0)
)
updated_inputs, _ = theseus_optim.forward(
theseus_inputs,
optimizer_kwargs={"track_best_solution": True, "verbose": False},
)
return updated_inputs["a"].item()

data_x_np = data_x.detach().clone().numpy()
dfit_x = nd.Gradient(fit_x)
da_dx_numeric = torch.from_numpy(dfit_x(data_x_np)).float()
with torch.no_grad():

def fn(data_x_torch):
theseus_inputs["x"] = data_x_torch
updated_inputs, _ = theseus_optim.forward(
theseus_inputs,
optimizer_kwargs={"track_best_solution": True, "verbose": False},
)
return updated_inputs["a"]

da_dx_numeric = torch.autograd.functional.jacobian(fn, data_x.detach())[0, 0, 0]

theseus_inputs["x"] = data_x
updated_inputs, _ = theseus_optim.forward(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_dlm_perturbation.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_backward_pass_se3_runs():
loss0 = None
for _ in range(5):
adam.zero_grad()
with th.enable_lie_tangent():
with th.enable_lie_tangent(), th.no_lie_group_check(silent=True):
out, _ = layer.forward(
{"target": target_data},
optimizer_kwargs={
Expand Down
2 changes: 1 addition & 1 deletion tests/test_theseus_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def _run_optimizer_test(
# need to pass updated values through "objective.update". I'm doing it this way
# to check that update works properly
cost_weight = th.DiagonalCostWeight(
th.Variable(torch.empty(num_points), name="cost_weight_values")
th.Variable(torch.empty(1, num_points), name="cost_weight_values")
)

# Here we create the outer loop models and optimizers for the cost weight
Expand Down
2 changes: 1 addition & 1 deletion theseus/theseus_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def compute_samples(
mean=torch.zeros((n_vars, n_samples), device=delta.device),
std=torch.ones((n_vars, n_samples), device=delta.device),
)
delta_samples = (torch.triangular_solve(y, sqrt_AtA).solution) + (
delta_samples = (torch.linalg.solve_triangular(sqrt_AtA, y, upper=True)) + (
delta.unsqueeze(-1)
).repeat(1, 1, n_samples)

Expand Down

0 comments on commit 3bbbb15

Please sign in to comment.