From 63626f68fe1e3b622bee7c9a9a748bec9d009e41 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Fri, 14 May 2021 14:03:26 -0700 Subject: [PATCH 01/39] [refactor] Changed Factor to receive additional args and kwargs --- theseus/core/factor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/theseus/core/factor.py b/theseus/core/factor.py index 9f1b46578..4a2447322 100644 --- a/theseus/core/factor.py +++ b/theseus/core/factor.py @@ -1,6 +1,6 @@ import abc import copy -from typing import Dict, Iterator, List, Optional, Tuple +from typing import Any, Dict, Iterator, List, Optional, Tuple import torch @@ -19,8 +19,12 @@ def __init__( self, variables: List[Variable], precision: Precision, + *args: Any, name: Optional[str] = None, + **kwargs: Any, ): + # TODO: might as well change these so that vars can be passed as separate args + # but it still maintains an internal variables array self.variables = variables self.precision = precision if name: From 1b9554a80744d75cb1e263a5ab926c5c925e1f1a Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 17 May 2021 13:06:12 -0700 Subject: [PATCH 02/39] Added additional manifold methods to Variable.py --- theseus/core/tests/common.py | 16 ++++ theseus/core/tests/test_variable.py | 16 ++++ theseus/core/variable.py | 74 ++++++++++++++++++- theseus/geometry/vector.py | 29 ++++++-- .../tests/test_dense_linearization.py | 16 ++++ 5 files changed, 143 insertions(+), 8 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 272188ce5..e643acea3 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -21,6 +21,22 @@ def _local(variable1, variable2): def _retract(variable, delta): pass + @staticmethod + def _inverse(variable1): + pass + + @staticmethod + def _compose(variable1, variable2): + pass + + @staticmethod + def _inverse_jacobian(variable1): + pass + + @staticmethod + def _compose_jacobians(variable1, variable2): + pass + class MockPrecision(theseus.core.Precision): def __init__(self, the_data): diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index dc153e571..dbbcc6754 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -62,6 +62,22 @@ def _local(variable1, variable2): def _retract(variable, delta): pass + @staticmethod + def _inverse(variable1): + pass + + @staticmethod + def _compose(variable1, variable2): + pass + + @staticmethod + def _inverse_jacobian(variable1): + pass + + @staticmethod + def _compose_jacobians(variable1, variable2): + pass + def test_variable_no_args_init(): var = MockVarNoArgs(name="mock") diff --git a/theseus/core/variable.py b/theseus/core/variable.py index 4ee815ea2..ce0f47e34 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -1,10 +1,12 @@ import abc import copy -from typing import Any, Optional +from typing import Any, Optional, Tuple import torch +# TODO(mhmukadam) add docs/comments for +# _compose, _inverse, _inverse_jacobian, _compose_jacobians # Abstract class to represent Variables in the factor graph. # Concrete classes must implement the following methods: # - `_init_data`: initializes the underlying data tensor given constructor arguments @@ -42,6 +44,10 @@ def dim(self) -> int: def shape(self) -> torch.Size: return self.data.shape + # TODO(lep): is there a reason why we are keeping the static versions of + # these? The alias below the class fulfill the same role but they would be + # easier to use + def local(self, variable2: "Variable") -> torch.Tensor: return self.__class__._local(self, variable2) @@ -58,6 +64,42 @@ def retract(self, delta: torch.Tensor) -> "Variable": def _retract(variable: "Variable", delta: torch.Tensor) -> "Variable": pass + def compose(self, variable2: "Variable") -> "Variable": + return self.__class__._compose(self, variable2) + + @staticmethod + @abc.abstractmethod + def _compose(variable1: "Variable", variable2: "Variable") -> "Variable": + pass + + def inverse(self) -> "Variable": + return self.__class__._inverse(self) + + @staticmethod + @abc.abstractmethod + def _inverse(variable1: "Variable") -> "Variable": + pass + + def inverse_jacobian(self) -> torch.Tensor: + return self.__class__._inverse_jacobian(self) + + @staticmethod + @abc.abstractmethod + def _inverse_jacobian(variable1: "Variable") -> torch.Tensor: + pass + + def compose_jacobians( + self, variable2: "Variable" + ) -> Tuple[torch.Tensor, torch.Tensor]: + return self.__class__.compose_jacobians(self, variable2) + + @staticmethod + @abc.abstractmethod + def _compose_jacobians( + variable1: "Variable", variable2: "Variable" + ) -> Tuple[torch.Tensor, torch.Tensor]: + pass + def copy(self, new_name: Optional[str] = None) -> "Variable": if not new_name: new_name = f"{self.name}_copy" @@ -89,12 +131,38 @@ def update( def to(self, *args, **kwargs): self.data.to(*args, **kwargs) + @property + def device(self) -> torch.device: + return self.data.device -# Alias for Variable._local() + +# Alias for Variable.local() def local(variable1: Variable, variable2: Variable) -> torch.Tensor: return variable1.local(variable2) -# Alias for Variable._retract() +# Alias for Variable.retract() def retract(variable: Variable, delta: torch.Tensor) -> Variable: return variable.retract(delta) + + +# Alias for Variable.compose() +def compose(variable1: Variable, variable2: Variable) -> Variable: + return variable1.compose(variable2) + + +# Alias for Variable.inverse() +def inverse(variable1: Variable) -> Variable: + return variable1.inverse() + + +# Alias for Variable.inverse_jacobian() +def inverse_jacobian(variable1: Variable) -> torch.Tensor: + return variable1.inverse_jacobian() + + +# Alias for Variable.compose_jacobian() +def compose_jacobian( + variable1, variable2: Variable +) -> Tuple[torch.Tensor, torch.Tensor]: + return variable1.compose_jacobian(variable2) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 0fa67a030..830bd50df 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Tuple, Union +from typing import List, Optional, Tuple, Union, cast import torch @@ -66,15 +66,13 @@ def __ge__(self, other: "Vector") -> bool: return bool(torch.all(self.data >= other.data)) def __add__(self, other: "Vector") -> "Vector": - return Vector(self.data.shape[1], data=torch.add(self.data, other.data)) - - compose = __add__ + return cast(Vector, Vector._compose(self, other)) def __sub__(self, other: "Vector") -> "Vector": return Vector(self.data.shape[1], data=torch.sub(self.data, other.data)) def __neg__(self) -> "Vector": - return Vector(self.data.shape[1], data=-self.data) + return cast(Vector, Vector._inverse(self)) def __mul__(self, other: Union["Vector", torch.Tensor]) -> "Vector": if isinstance(other, Vector): @@ -151,5 +149,26 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": else: return Vector(vec.data.shape[1], data=torch.add(vec.data, delta)) + @staticmethod + def _compose(vec1: "Variable", vec2: "Variable") -> "Variable": + return Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) + + @staticmethod + def _inverse(vec1: "Variable") -> "Variable": + return Vector(vec1.data.shape[1], data=-vec1.data) + + @staticmethod + def _inverse_jacobian(vec1: "Variable") -> torch.Tensor: + return -torch.eye(vec1.dim()).to(vec1.device) + + @staticmethod + def _compose_jacobians( + vec1: "Variable", vec2: "Variable" + ) -> Tuple[torch.Tensor, torch.Tensor]: + return ( + torch.eye(vec1.dim()).to(vec1.device), + torch.eye(vec2.dim()).to(vec2.device), + ) + def __hash__(self): return id(self) diff --git a/theseus/optimizer/tests/test_dense_linearization.py b/theseus/optimizer/tests/test_dense_linearization.py index dd61b287f..7ba837ac2 100644 --- a/theseus/optimizer/tests/test_dense_linearization.py +++ b/theseus/optimizer/tests/test_dense_linearization.py @@ -21,6 +21,22 @@ def _local(variable1, variable2): def _retract(variable, delta): return variable + @staticmethod + def _inverse(variable1): + pass + + @staticmethod + def _compose(variable1, variable2): + pass + + @staticmethod + def _inverse_jacobian(variable1): + pass + + @staticmethod + def _compose_jacobians(variable1, variable2): + pass + class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): From aa4bd37a1ded3c0453c4d6ea5b8535dcca32e103 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 17 May 2021 13:10:37 -0700 Subject: [PATCH 03/39] Added a GaussianVariable (prior) factor and a Between factor. Not tested yet. --- theseus/embodied/measurements/between.py | 35 ++++++++++++++++++++++ theseus/embodied/misc/gaussian_variable.py | 32 ++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 theseus/embodied/measurements/between.py create mode 100644 theseus/embodied/misc/gaussian_variable.py diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py new file mode 100644 index 000000000..e60fe1cb7 --- /dev/null +++ b/theseus/embodied/measurements/between.py @@ -0,0 +1,35 @@ +from typing import List, Optional + +import torch + +from theseus.core import Factor, Precision, Variable + + +class Between(Factor): + def __init__( + self, + variables: List[Variable], + precision: Precision, + measurement: Variable, + name: Optional[str], + ): + if len(variables) != 2: + raise ValueError("Between factor accepts only two variables.") + if not isinstance(variables[0], variables[1].__class__) or not isinstance( + variables[0], measurement.__class__ + ): + raise ValueError("Inconsistent types between variables and measurement.") + super().__init__(variables, precision, name=name) + self._measurement = measurement + + def error(self) -> torch.Tensor: + var_diff = self.variables[0].inverse().compose(self.variables[1]) + return self._measurement.local(var_diff) + + def jacobians(self) -> List[torch.Tensor]: + Hinv = self.variables[0].inverse_jacobian() + Hcmp1, Hcmp2 = self.variables[0].inverse().compose_jacobians(self.variables[1]) + return [torch.matmul(Hinv, Hinv), Hcmp2] + + def dim(self) -> int: + return self.variables[0].dim() diff --git a/theseus/embodied/misc/gaussian_variable.py b/theseus/embodied/misc/gaussian_variable.py new file mode 100644 index 000000000..1145646d2 --- /dev/null +++ b/theseus/embodied/misc/gaussian_variable.py @@ -0,0 +1,32 @@ +from typing import List, Optional + +import torch + +from theseus.core import Factor, Precision, Variable + + +class GaussianVariable(Factor): + def __init__( + self, + variables: List[Variable], + precision: Precision, + prior: Variable, + name: Optional[str], + ): + if len(variables) != 1: + raise ValueError("GaussianVariable factor accepts only a single variable.") + if type(variables[0]) != type(prior): + raise ValueError( + "Prior variable type inconsistent with variables for the graph." + ) + super().__init__(variables, precision, name=name) + self._prior = prior + + def error(self) -> torch.Tensor: + return self._prior.local(self.variables[0]) + + def jacobians(self) -> List[torch.Tensor]: + return [torch.eye(self.dim()).to(self.variables[0].device)] + + def dim(self) -> int: + return self.variables[0].dim() From 0f6ed5a84e0da8818c20a8199eed2266ab57a1c0 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Tue, 25 May 2021 09:02:37 -0700 Subject: [PATCH 04/39] Refactored compose() and inverse() in class Variable so that they can optionally return the jacobians. Removed the jacobians versions of these methods. --- theseus/core/tests/common.py | 12 +--- theseus/core/tests/test_variable.py | 12 +--- theseus/core/variable.py | 66 +++++++------------ theseus/embodied/measurements/between.py | 8 +-- theseus/geometry/tests/test_vector.py | 2 +- theseus/geometry/vector.py | 41 ++++++------ .../tests/test_dense_linearization.py | 12 +--- 7 files changed, 56 insertions(+), 97 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index e643acea3..002289f76 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -22,19 +22,11 @@ def _retract(variable, delta): pass @staticmethod - def _inverse(variable1): + def _inverse(variable1, get_jacobian=False): pass @staticmethod - def _compose(variable1, variable2): - pass - - @staticmethod - def _inverse_jacobian(variable1): - pass - - @staticmethod - def _compose_jacobians(variable1, variable2): + def _compose(variable1, variable2, get_jacobians=False): pass diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index dbbcc6754..a7e1dc5ba 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -63,19 +63,11 @@ def _retract(variable, delta): pass @staticmethod - def _inverse(variable1): + def _inverse(variable1, get_jacobian=False): pass @staticmethod - def _compose(variable1, variable2): - pass - - @staticmethod - def _inverse_jacobian(variable1): - pass - - @staticmethod - def _compose_jacobians(variable1, variable2): + def _compose(variable1, variable2, get_jacobians=False): pass diff --git a/theseus/core/variable.py b/theseus/core/variable.py index ce0f47e34..ed6701e7d 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -6,7 +6,7 @@ # TODO(mhmukadam) add docs/comments for -# _compose, _inverse, _inverse_jacobian, _compose_jacobians +# _compose, _inverse # Abstract class to represent Variables in the factor graph. # Concrete classes must implement the following methods: # - `_init_data`: initializes the underlying data tensor given constructor arguments @@ -64,40 +64,28 @@ def retract(self, delta: torch.Tensor) -> "Variable": def _retract(variable: "Variable", delta: torch.Tensor) -> "Variable": pass - def compose(self, variable2: "Variable") -> "Variable": - return self.__class__._compose(self, variable2) + def compose( + self, variable2: "Variable", get_jacobians: bool = False + ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + return self.__class__._compose(self, variable2, get_jacobians=get_jacobians) @staticmethod @abc.abstractmethod - def _compose(variable1: "Variable", variable2: "Variable") -> "Variable": + def _compose( + variable1: "Variable", variable2: "Variable", get_jacobians: bool = False + ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: pass - def inverse(self) -> "Variable": - return self.__class__._inverse(self) + def inverse( + self, get_jacobian: bool = False + ) -> Tuple["Variable", Optional[torch.Tensor]]: + return self.__class__._inverse(self, get_jacobian=get_jacobian) @staticmethod @abc.abstractmethod - def _inverse(variable1: "Variable") -> "Variable": - pass - - def inverse_jacobian(self) -> torch.Tensor: - return self.__class__._inverse_jacobian(self) - - @staticmethod - @abc.abstractmethod - def _inverse_jacobian(variable1: "Variable") -> torch.Tensor: - pass - - def compose_jacobians( - self, variable2: "Variable" - ) -> Tuple[torch.Tensor, torch.Tensor]: - return self.__class__.compose_jacobians(self, variable2) - - @staticmethod - @abc.abstractmethod - def _compose_jacobians( - variable1: "Variable", variable2: "Variable" - ) -> Tuple[torch.Tensor, torch.Tensor]: + def _inverse( + variable1: "Variable", get_jacobian: bool = False + ) -> Tuple["Variable", Optional[torch.Tensor]]: pass def copy(self, new_name: Optional[str] = None) -> "Variable": @@ -147,22 +135,14 @@ def retract(variable: Variable, delta: torch.Tensor) -> Variable: # Alias for Variable.compose() -def compose(variable1: Variable, variable2: Variable) -> Variable: - return variable1.compose(variable2) +def compose( + variable1: Variable, variable2: Variable, get_jacobians: bool = False +) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + return variable1.compose(variable2, get_jacobians=get_jacobians) # Alias for Variable.inverse() -def inverse(variable1: Variable) -> Variable: - return variable1.inverse() - - -# Alias for Variable.inverse_jacobian() -def inverse_jacobian(variable1: Variable) -> torch.Tensor: - return variable1.inverse_jacobian() - - -# Alias for Variable.compose_jacobian() -def compose_jacobian( - variable1, variable2: Variable -) -> Tuple[torch.Tensor, torch.Tensor]: - return variable1.compose_jacobian(variable2) +def inverse( + variable1: Variable, get_jacobian: bool = False +) -> Tuple[Variable, Optional[torch.Tensor]]: + return variable1.inverse(get_jacobian=get_jacobian) diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index e60fe1cb7..ade99027c 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -23,13 +23,13 @@ def __init__( self._measurement = measurement def error(self) -> torch.Tensor: - var_diff = self.variables[0].inverse().compose(self.variables[1]) + var_diff, _ = self.variables[0].inverse()[0].compose(self.variables[1]) return self._measurement.local(var_diff) def jacobians(self) -> List[torch.Tensor]: - Hinv = self.variables[0].inverse_jacobian() - Hcmp1, Hcmp2 = self.variables[0].inverse().compose_jacobians(self.variables[1]) - return [torch.matmul(Hinv, Hinv), Hcmp2] + v0_inverse, Hinv = self.variables[0].inverse(get_jacobian=True) + Hcmp1, Hcmp2 = v0_inverse.compose(self.variables[1], get_jacobians=True)[1] + return [torch.matmul(Hcmp1, Hinv), Hcmp2] def dim(self) -> int: return self.variables[0].dim() diff --git a/theseus/geometry/tests/test_vector.py b/theseus/geometry/tests/test_vector.py index 0a78f6790..34cf6bff5 100644 --- a/theseus/geometry/tests/test_vector.py +++ b/theseus/geometry/tests/test_vector.py @@ -50,7 +50,7 @@ def test_add(): v2 = Vector(j, data=t2.clone(), name="v2") vsum = Vector(j, data=t1 + t2) assert v1 + v2 == vsum - assert v1.compose(v2) == vsum + assert v1.compose(v2)[0] == vsum def test_sub(): diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 830bd50df..8ec79ef66 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -66,13 +66,13 @@ def __ge__(self, other: "Vector") -> bool: return bool(torch.all(self.data >= other.data)) def __add__(self, other: "Vector") -> "Vector": - return cast(Vector, Vector._compose(self, other)) + return cast(Vector, Vector._compose(self, other)[0]) def __sub__(self, other: "Vector") -> "Vector": return Vector(self.data.shape[1], data=torch.sub(self.data, other.data)) def __neg__(self) -> "Vector": - return cast(Vector, Vector._inverse(self)) + return cast(Vector, Vector._inverse(self)[0]) def __mul__(self, other: Union["Vector", torch.Tensor]) -> "Vector": if isinstance(other, Vector): @@ -150,25 +150,28 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": return Vector(vec.data.shape[1], data=torch.add(vec.data, delta)) @staticmethod - def _compose(vec1: "Variable", vec2: "Variable") -> "Variable": - return Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) - - @staticmethod - def _inverse(vec1: "Variable") -> "Variable": - return Vector(vec1.data.shape[1], data=-vec1.data) - - @staticmethod - def _inverse_jacobian(vec1: "Variable") -> torch.Tensor: - return -torch.eye(vec1.dim()).to(vec1.device) + def _compose( + vec1: "Variable", vec2: "Variable", get_jacobians: bool = False + ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + result = Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) + if get_jacobians: + jacobians = ( + torch.eye(vec1.dim()).to(vec1.device), + torch.eye(vec2.dim()).to(vec2.device), + ) + return result, jacobians + else: + return result, None @staticmethod - def _compose_jacobians( - vec1: "Variable", vec2: "Variable" - ) -> Tuple[torch.Tensor, torch.Tensor]: - return ( - torch.eye(vec1.dim()).to(vec1.device), - torch.eye(vec2.dim()).to(vec2.device), - ) + def _inverse( + vec1: "Variable", get_jacobian: bool = False + ) -> Tuple["Variable", Optional[torch.Tensor]]: + inverse = Vector(vec1.data.shape[1], data=-vec1.data) + if get_jacobian: + return inverse, -torch.eye(vec1.dim()).to(vec1.device) + else: + return inverse, None def __hash__(self): return id(self) diff --git a/theseus/optimizer/tests/test_dense_linearization.py b/theseus/optimizer/tests/test_dense_linearization.py index 7ba837ac2..83bd3ebb8 100644 --- a/theseus/optimizer/tests/test_dense_linearization.py +++ b/theseus/optimizer/tests/test_dense_linearization.py @@ -22,19 +22,11 @@ def _retract(variable, delta): return variable @staticmethod - def _inverse(variable1): + def _inverse(variable1, get_jacobian=False): pass @staticmethod - def _compose(variable1, variable2): - pass - - @staticmethod - def _inverse_jacobian(variable1): - pass - - @staticmethod - def _compose_jacobians(variable1, variable2): + def _compose(variable1, variable2, get_jacobians=False): pass From 5f095e1c1c92a1edeb2f28aef6b3d2976519f51d Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Tue, 25 May 2021 21:48:01 -0700 Subject: [PATCH 05/39] Added abstract log_map and exp_map methods to Variable --- theseus/core/tests/common.py | 8 ++++++ theseus/core/tests/test_variable.py | 8 ++++++ theseus/core/variable.py | 24 ++++++++++++++++- theseus/geometry/vector.py | 27 ++++++++++++++----- .../tests/test_dense_linearization.py | 8 ++++++ 5 files changed, 67 insertions(+), 8 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 002289f76..1257574ae 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -29,6 +29,14 @@ def _inverse(variable1, get_jacobian=False): def _compose(variable1, variable2, get_jacobians=False): pass + @staticmethod + def exp_map(tangent_vector): + pass + + @staticmethod + def _log_map(variable1): + pass + class MockPrecision(theseus.core.Precision): def __init__(self, the_data): diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index a7e1dc5ba..2378c092e 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -70,6 +70,14 @@ def _inverse(variable1, get_jacobian=False): def _compose(variable1, variable2, get_jacobians=False): pass + @staticmethod + def exp_map(tangent_vector): + pass + + @staticmethod + def _log_map(variable1): + pass + def test_variable_no_args_init(): var = MockVarNoArgs(name="mock") diff --git a/theseus/core/variable.py b/theseus/core/variable.py index ed6701e7d..47bbea57f 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -51,6 +51,19 @@ def shape(self) -> torch.Size: def local(self, variable2: "Variable") -> torch.Tensor: return self.__class__._local(self, variable2) + @staticmethod + @abc.abstractmethod + def _log_map(variable: "Variable") -> torch.Tensor: + pass + + def log_map(self) -> torch.Tensor: + return self.__class__._log_map(self) + + @staticmethod + @abc.abstractmethod + def exp_map(tangent_vector: torch.Tensor) -> "Variable": + pass + @staticmethod @abc.abstractmethod def _local(variable1: "Variable", variable2: "Variable") -> torch.Tensor: @@ -103,7 +116,6 @@ def __deepcopy__(self, memo): memo[id(self)] = the_copy return the_copy - # data shape must be batch_size x var.dim() def update( self, data: torch.Tensor, ignore_batch_indices: Optional[torch.Tensor] = None ): @@ -146,3 +158,13 @@ def inverse( variable1: Variable, get_jacobian: bool = False ) -> Tuple[Variable, Optional[torch.Tensor]]: return variable1.inverse(get_jacobian=get_jacobian) + + +# Alias for Variable.log_map() +def log_map(variable: Variable) -> torch.Tensor: + return variable.log_map() + + +# Alias for Variable.exp_map() +def exp_map(variable: Variable, tangent_vector: torch.Tensor) -> Variable: + return variable.__class__.exp_map(tangent_vector) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 8ec79ef66..a12616d6a 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -151,13 +151,14 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": @staticmethod def _compose( - vec1: "Variable", vec2: "Variable", get_jacobians: bool = False - ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + vec1: Variable, vec2: Variable, get_jacobians: bool = False + ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: result = Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) if get_jacobians: + batch_size = vec1.data.shape[0] jacobians = ( - torch.eye(vec1.dim()).to(vec1.device), - torch.eye(vec2.dim()).to(vec2.device), + torch.eye(vec1.dim()).repeat(batch_size, 1, 1).to(vec1.device), + torch.eye(vec2.dim()).repeat(batch_size, 1, 1).to(vec2.device), ) return result, jacobians else: @@ -165,13 +166,25 @@ def _compose( @staticmethod def _inverse( - vec1: "Variable", get_jacobian: bool = False - ) -> Tuple["Variable", Optional[torch.Tensor]]: + vec1: Variable, get_jacobian: bool = False + ) -> Tuple[Variable, Optional[torch.Tensor]]: inverse = Vector(vec1.data.shape[1], data=-vec1.data) if get_jacobian: - return inverse, -torch.eye(vec1.dim()).to(vec1.device) + batch_size = vec1.data.shape[0] + return inverse, -torch.eye(vec1.dim()).repeat(batch_size, 1, 1).to( + vec1.device + ) else: return inverse, None + # TODO: Define these for Vector (or remove from Variable if they are not general enough) + @staticmethod + def exp_map(tangent_vector): + pass + + @staticmethod + def _log_map(variable1): + pass + def __hash__(self): return id(self) diff --git a/theseus/optimizer/tests/test_dense_linearization.py b/theseus/optimizer/tests/test_dense_linearization.py index 83bd3ebb8..9be510a9d 100644 --- a/theseus/optimizer/tests/test_dense_linearization.py +++ b/theseus/optimizer/tests/test_dense_linearization.py @@ -29,6 +29,14 @@ def _inverse(variable1, get_jacobian=False): def _compose(variable1, variable2, get_jacobians=False): pass + @staticmethod + def exp_map(tangent_vector): + pass + + @staticmethod + def _log_map(variable1): + pass + class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): From 8bc81df9e5284cc0ae38d25afd7b5cb77c2b93a7 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Tue, 25 May 2021 21:50:49 -0700 Subject: [PATCH 06/39] Created a class for 2D rotations. Not yet tested. --- theseus/geometry/rot2.py | 80 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 theseus/geometry/rot2.py diff --git a/theseus/geometry/rot2.py b/theseus/geometry/rot2.py new file mode 100644 index 000000000..c38efd990 --- /dev/null +++ b/theseus/geometry/rot2.py @@ -0,0 +1,80 @@ +from typing import Optional, Tuple, cast + +import torch + +from theseus.core import Variable + + +class Rot2D(Variable): + def __init__( + self, + data: Optional[torch.Tensor] = None, + name: Optional[str] = None, + ): + super().__init__(data=data, name=name) + + def _init_data(self): + return torch.empty(1, 2) # cos and sin + + def update_from_angle(self, theta: torch.Tensor): + self.update(torch.cat([theta.cos(), theta.sin()], dim=1)) + + def dim(self) -> int: + return 1 + + @staticmethod + def _log_map(variable: Variable) -> torch.Tensor: + rot = cast(Rot2D, variable) + return torch.atan2(rot.data[:, 1], rot.data[:, 0]) + + @staticmethod + def exp_map(tangent_vector: torch.Tensor) -> Variable: + rot = Rot2D() + rot.update_from_angle(tangent_vector) + return rot + + @staticmethod + def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: + return variable1.inverse()[0].compose(variable2)[0].log_map() + + @staticmethod + def _retract(variable: Variable, delta: torch.Tensor) -> Variable: + return variable.compose(Rot2D.exp_map(delta))[0] + + @staticmethod + def _compose( + rot_1: Variable, rot_2: Variable, get_jacobians: bool = False + ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + cos_1, sin_1 = rot_1.data[:, :1], rot_1.data[:, 1:] + cos_2, sin_2 = rot_2.data[:, :1], rot_2.data[:, 1:] + new_cos = cos_1 * cos_2 - sin_1 * sin_2 + new_sin = sin_1 * cos_2 + cos_1 * sin_2 + composition = Rot2D(data=torch.cat([new_cos, new_sin], dim=1)) + if get_jacobians: + batch_size = rot_1.data.shape[0] + + def _build_jac(other_cos, other_sin): + jac = torch.empty(batch_size, 2, 2) + jac[:, 0, :] = torch.cat([other_cos, -other_sin], dim=1) + jac[:, 1, :] = torch.cat([other_sin, other_cos], dim=1) + return jac.to(rot_1.device) + + return composition, (_build_jac(cos_2, sin_2), _build_jac(cos_1, sin_1)) + else: + return composition, None + + @staticmethod + def _inverse( + rot_1: Variable, get_jacobian: bool = False + ) -> Tuple[Variable, Optional[torch.Tensor]]: + inverse = Rot2D(data=torch.cat([rot_1.data[:, 0], -rot_1.data[:, 1]], dim=1)) + if get_jacobian: + batch_size = rot_1.data.shape[0] + jac = torch.eye(2) + jac[1, 1] = -1 + return ( + inverse, + jac.repeat(batch_size, 1, 1).to(rot_1.device), + ) + else: + return inverse, None From 6e26e802b9dc2ea3d40aaebd807cbeb714734081 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 26 May 2021 08:08:19 -0700 Subject: [PATCH 07/39] Added a LieGroup subclass of Variable that implements local and retract and jacobians for all lie group types --- theseus/core/variable.py | 28 +++++++++++-- theseus/geometry/__init__.py | 3 +- theseus/geometry/lie_group.py | 76 +++++++++++++++++++++++++++++++++++ theseus/geometry/rot2.py | 50 +++++++++++------------ theseus/geometry/vector.py | 4 ++ 5 files changed, 130 insertions(+), 31 deletions(-) create mode 100644 theseus/geometry/lie_group.py diff --git a/theseus/core/variable.py b/theseus/core/variable.py index 47bbea57f..ed6808516 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -5,13 +5,19 @@ import torch +# TODO maybe a lot of these methods should only appear in LieGroup subclass # TODO(mhmukadam) add docs/comments for -# _compose, _inverse +# _compose, _inverse, _adjoint, _log_map, _exp_map # Abstract class to represent Variables in the factor graph. # Concrete classes must implement the following methods: # - `_init_data`: initializes the underlying data tensor given constructor arguments # -`_local`: given two close Variable gives distance in tangent space # - `_retract`: returns Variable close by delta to given Variable +# - `exp_map` +# - `_log_map` +# - `_adjoint` +# _ `_compose` +# _ `_inverse` # - `update`: replaces the data tensor with the provided one (and checks that # the provided has the right format). # - `dim`: # of degrees of freedom of the variable @@ -41,6 +47,7 @@ def dim(self) -> int: # Note that shape returns the shape of the underlying data tensor, # which also includes a batch dimension as the first dimension. + @property def shape(self) -> torch.Size: return self.data.shape @@ -48,9 +55,6 @@ def shape(self) -> torch.Size: # these? The alias below the class fulfill the same role but they would be # easier to use - def local(self, variable2: "Variable") -> torch.Tensor: - return self.__class__._local(self, variable2) - @staticmethod @abc.abstractmethod def _log_map(variable: "Variable") -> torch.Tensor: @@ -69,6 +73,17 @@ def exp_map(tangent_vector: torch.Tensor) -> "Variable": def _local(variable1: "Variable", variable2: "Variable") -> torch.Tensor: pass + def local(self, variable2: "Variable") -> torch.Tensor: + return self.__class__._local(self, variable2) + + @staticmethod + @abc.abstractmethod + def _adjoint(variable: "Variable") -> torch.Tensor: + pass + + def adjoint(self) -> torch.Tensor: + return self.__class__._adjoint(self) + def retract(self, delta: torch.Tensor) -> "Variable": return self.__class__._retract(self, delta) @@ -141,6 +156,11 @@ def local(variable1: Variable, variable2: Variable) -> torch.Tensor: return variable1.local(variable2) +# Alias for Variable.adjoint() +def adjoint(variable: Variable) -> torch.Tensor: + return variable.adjoint() + + # Alias for Variable.retract() def retract(variable: Variable, delta: torch.Tensor) -> Variable: return variable.retract(delta) diff --git a/theseus/geometry/__init__.py b/theseus/geometry/__init__.py index b0d6eb9ee..71c0f0ea7 100644 --- a/theseus/geometry/__init__.py +++ b/theseus/geometry/__init__.py @@ -1,3 +1,4 @@ +from .lie_group import LieGroup from .vector import Vector -__all__ = ["Vector"] +__all__ = ["LieGroup", "Vector"] diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py new file mode 100644 index 000000000..9e09fa148 --- /dev/null +++ b/theseus/geometry/lie_group.py @@ -0,0 +1,76 @@ +import abc +from typing import Any, Optional, Tuple + +import torch + +from theseus.core.variable import Variable + + +class LieGroup(Variable): + def __init__( + self, + *args: Any, + data: Optional[torch.Tensor] = None, + name: Optional[str] = None, + ): + super().__init__(*args, data=data, name=name) + + @abc.abstractmethod + def _init_data(self, *args: Any): + pass + + @abc.abstractmethod + def dim(self) -> int: + pass + + @staticmethod + @abc.abstractmethod + def _log_map(variable: "Variable") -> torch.Tensor: + pass + + @staticmethod + @abc.abstractmethod + def exp_map(tangent_vector: torch.Tensor) -> "Variable": + pass + + @staticmethod + @abc.abstractmethod + def _adjoint(variable: "Variable") -> torch.Tensor: + pass + + @staticmethod + @abc.abstractmethod + def _compose( + variable1: "Variable", variable2: "Variable", get_jacobians: bool = False + ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + pass + + @staticmethod + @abc.abstractmethod + def _inverse( + variable1: "Variable", get_jacobian: bool = False + ) -> Tuple["Variable", Optional[torch.Tensor]]: + pass + + @staticmethod + def _compose_jacobians( + group1: "LieGroup", group2: "LieGroup" + ) -> Tuple[torch.Tensor, torch.Tensor]: + if not type(group1) is type(group2): + raise ValueError("Lie groups for compose must be of the same type.") + jac1 = group1.inverse()[0].adjoint() + jac2 = torch.eye(group2.dim()).repeat(group2.shape[0], 1, 1).to(group2.device) + return jac1, jac2 + + @staticmethod + def _inverse_jacobian(group1: "LieGroup") -> torch.Tensor: + # TODO as it's done in minisam. Is (-) supposed to be a negation or an inverse() operator? + return -group1.adjoint() + + @staticmethod + def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: + return variable1.inverse()[0].compose(variable2)[0].log_map() + + @staticmethod + def _retract(variable: Variable, delta: torch.Tensor) -> Variable: + return variable.compose(variable.exp_map(delta))[0] diff --git a/theseus/geometry/rot2.py b/theseus/geometry/rot2.py index c38efd990..12dfc83cf 100644 --- a/theseus/geometry/rot2.py +++ b/theseus/geometry/rot2.py @@ -2,10 +2,13 @@ import torch -from theseus.core import Variable +from theseus.core.variable import Variable +from .lie_group import LieGroup +from .vector import Vector -class Rot2D(Variable): + +class Rot2D(LieGroup): def __init__( self, data: Optional[torch.Tensor] = None, @@ -27,20 +30,18 @@ def _log_map(variable: Variable) -> torch.Tensor: rot = cast(Rot2D, variable) return torch.atan2(rot.data[:, 1], rot.data[:, 0]) + @staticmethod + def _adjoint(variable: Variable) -> torch.Tensor: + return torch.ones(variable.shape[0], 1).to(variable.device) + @staticmethod def exp_map(tangent_vector: torch.Tensor) -> Variable: rot = Rot2D() rot.update_from_angle(tangent_vector) return rot - @staticmethod - def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: - return variable1.inverse()[0].compose(variable2)[0].log_map() - - @staticmethod - def _retract(variable: Variable, delta: torch.Tensor) -> Variable: - return variable.compose(Rot2D.exp_map(delta))[0] - + # TODO start here, there derivatives are with respect to theta, not sin/cos + # need to use adjoint map @staticmethod def _compose( rot_1: Variable, rot_2: Variable, get_jacobians: bool = False @@ -51,15 +52,10 @@ def _compose( new_sin = sin_1 * cos_2 + cos_1 * sin_2 composition = Rot2D(data=torch.cat([new_cos, new_sin], dim=1)) if get_jacobians: - batch_size = rot_1.data.shape[0] - - def _build_jac(other_cos, other_sin): - jac = torch.empty(batch_size, 2, 2) - jac[:, 0, :] = torch.cat([other_cos, -other_sin], dim=1) - jac[:, 1, :] = torch.cat([other_sin, other_cos], dim=1) - return jac.to(rot_1.device) - - return composition, (_build_jac(cos_2, sin_2), _build_jac(cos_1, sin_1)) + jac1, jac2 = Rot2D._compose_jacobians( + cast(LieGroup, rot_1), cast(LieGroup, rot_2) + ) + return composition, (jac1, jac2) else: return composition, None @@ -69,12 +65,14 @@ def _inverse( ) -> Tuple[Variable, Optional[torch.Tensor]]: inverse = Rot2D(data=torch.cat([rot_1.data[:, 0], -rot_1.data[:, 1]], dim=1)) if get_jacobian: - batch_size = rot_1.data.shape[0] - jac = torch.eye(2) - jac[1, 1] = -1 - return ( - inverse, - jac.repeat(batch_size, 1, 1).to(rot_1.device), - ) + return inverse, Rot2D._inverse_jacobian(cast(Rot2D, rot_1)) else: return inverse, None + + def rotate(self, point: Vector) -> Vector: + px, py = point.data[:, :1], point.data[:, 1:] + cos, sin = self.data[:, :1], self.data[:, 1:] + new_point_data = torch.empty_like(point.data) + new_point_data[:, 0] = cos * px - sin * py + new_point_data[:, 1] = sin * px + cos * py + return Vector(2, data=new_point_data) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index a12616d6a..e4b1af821 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -149,6 +149,10 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": else: return Vector(vec.data.shape[1], data=torch.add(vec.data, delta)) + @staticmethod + def _adjoint(variable: Variable) -> torch.Tensor: + return NotImplemented + @staticmethod def _compose( vec1: Variable, vec2: Variable, get_jacobians: bool = False From 4ec7b82e94cc056feb1cf3aabbff69d514e2db7b Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 26 May 2021 09:49:10 -0700 Subject: [PATCH 08/39] Implemented initial version of SE(2) group. Not yet tested. --- theseus/constants.py | 1 + theseus/geometry/pose2.py | 127 ++++++++++++++++++++++++++++++++++++++ theseus/geometry/rot2.py | 41 ++++++++---- 3 files changed, 157 insertions(+), 12 deletions(-) create mode 100644 theseus/constants.py create mode 100644 theseus/geometry/pose2.py diff --git a/theseus/constants.py b/theseus/constants.py new file mode 100644 index 000000000..dc9b0eec2 --- /dev/null +++ b/theseus/constants.py @@ -0,0 +1 @@ +EPS = 1e-12 diff --git a/theseus/geometry/pose2.py b/theseus/geometry/pose2.py new file mode 100644 index 000000000..bf0110786 --- /dev/null +++ b/theseus/geometry/pose2.py @@ -0,0 +1,127 @@ +from typing import Optional, Tuple, cast + +import torch + +import theseus.constants +from theseus.core import Variable + +from .lie_group import LieGroup +from .rot2 import Rot2D +from .vector import Vector + + +class Pose2D(LieGroup): + def __init__( + self, + data: Optional[torch.Tensor] = None, + name: Optional[str] = None, + ): + super().__init__(data=data, name=name) + self.translation = Vector(2, data=self.data[:, :2]) + self.rotation = Rot2D(data=self.data[:, 2:]) + + def _init_data(self): + return torch.empty(1, 4) # x, y, cos and sin + + def dim(self) -> int: + return 3 + + def update_from_rot_and_trans(self, rotation: Rot2D, translation: Vector): + batch_size = rotation.shape[0] + self.data = torch.empty(batch_size, 4, device=rotation.device) + self.data[:, :2] = translation.data + cos, sin = rotation.to_cos_sin() + self.data[:, 2] = cos + self.data[:, 3] = sin + + # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L160 + @staticmethod + def _log_map(variable: Variable) -> torch.Tensor: + pose = cast(Pose2D, variable) + theta = pose.rotation.log_map() + half_theta = 0.5 * theta + + cos, sin = pose.rotation.to_cos_sin() + cos_minus_one = cos - 1 + halftheta_by_tan_of_halftheta = -(half_theta * sin) / cos_minus_one + + # Same as above three lines but for small values + idx_small_vals = cos_minus_one.abs() < theseus.constants.EPS + theta_sq_at_idx = theta[idx_small_vals] ** 2 + halftheta_by_tan_of_halftheta[idx_small_vals] = -theta_sq_at_idx / 12 + 1 + + v_inv = torch.empty(pose.shape[0], 2, 2) + v_inv[:, 0, 0] = halftheta_by_tan_of_halftheta + v_inv[:, 0, 1] = half_theta + v_inv[:, 1, 0] = -half_theta + v_inv[:, 1, 1] = halftheta_by_tan_of_halftheta + tangent_translation = torch.matmul(v_inv, pose.translation.data.unsqueeze(-1)) + + return torch.cat([tangent_translation.squeeze(), theta], dim=1) + + # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L558 + @staticmethod + def exp_map(tangent_vector: torch.Tensor) -> Variable: + u = tangent_vector[:, :2] + theta = tangent_vector[:, 2:] + rotation = Rot2D() + rotation.update_from_angle(tangent_vector) + + cos, sin = rotation.to_cos_sin() + sin_theta_by_theta = sin / theta + one_minus_cos_theta_by_theta = (-cos + 1) / theta + + # Same as above three lines but for small angles + idx_small_thetas = theta.abs() < theseus.constants.EPS + small_theta = theta[idx_small_thetas] + small_theta_sq = small_theta ** 2 + sin_theta_by_theta[idx_small_thetas] = -small_theta_sq / 6 + 1 + one_minus_cos_theta_by_theta[idx_small_thetas] = ( + 0.5 * small_theta - small_theta / 24 * small_theta_sq + ) + + new_x = sin_theta_by_theta * u[:, 0] - one_minus_cos_theta_by_theta * u[:, 1] + new_y = one_minus_cos_theta_by_theta * u[:, 0] + sin_theta_by_theta * u[:, 1] + translation = Vector(size=2, data=torch.stack([new_x, new_y], dim=1)) + + pose = Pose2D() + pose.update_from_rot_and_trans(rotation, translation) + return pose + + @staticmethod + def _adjoint(pose: "Variable") -> torch.Tensor: + pose = cast(Pose2D, pose) + ret = torch.empty(pose.shape[0], 3, 3).to(pose.device) + ret[:, :2, :2] = pose.rotation.to_matrix() + ret[:, 0, 2] = pose.translation.data[:, 1] + ret[:, 1, 2] = -pose.translation.data[:, 0] + ret[:, 2, 0] = 0 + ret[:, 2, 2] = 1 + return ret + + @staticmethod + def _compose( + pose_1: Variable, pose_2: Variable, get_jacobians: bool = False + ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + pose_1 = cast(Pose2D, pose_1) + pose_2 = cast(Pose2D, pose_2) + new_rotation = pose_1.rotation.compose( + pose_2.rotation, get_jacobians=get_jacobians + )[0] + new_translation = pose_1.translation.compose( + pose_1.rotation.rotate(pose_2.translation) + )[0] + composition = Pose2D( + data=torch.cat([new_translation.data, new_rotation.data], dim=1) + ) + if get_jacobians: + jac1, jac2 = Pose2D._compose_jacobians(pose_1, pose_2) + return composition, (jac1, jac2) + else: + return composition, None + + @staticmethod + def _inverse( + rot_1: Variable, get_jacobian: bool = False + ) -> Tuple[Variable, Optional[torch.Tensor]]: + pass diff --git a/theseus/geometry/rot2.py b/theseus/geometry/rot2.py index 12dfc83cf..3936b0a0a 100644 --- a/theseus/geometry/rot2.py +++ b/theseus/geometry/rot2.py @@ -25,11 +25,6 @@ def update_from_angle(self, theta: torch.Tensor): def dim(self) -> int: return 1 - @staticmethod - def _log_map(variable: Variable) -> torch.Tensor: - rot = cast(Rot2D, variable) - return torch.atan2(rot.data[:, 1], rot.data[:, 0]) - @staticmethod def _adjoint(variable: Variable) -> torch.Tensor: return torch.ones(variable.shape[0], 1).to(variable.device) @@ -40,17 +35,25 @@ def exp_map(tangent_vector: torch.Tensor) -> Variable: rot.update_from_angle(tangent_vector) return rot + @staticmethod + def _log_map(variable: Variable) -> torch.Tensor: + rot = cast(Rot2D, variable) + cos, sin = rot.to_cos_sin() + return torch.atan2(sin, cos).unsqueeze(1) + # TODO start here, there derivatives are with respect to theta, not sin/cos # need to use adjoint map @staticmethod def _compose( rot_1: Variable, rot_2: Variable, get_jacobians: bool = False ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: - cos_1, sin_1 = rot_1.data[:, :1], rot_1.data[:, 1:] - cos_2, sin_2 = rot_2.data[:, :1], rot_2.data[:, 1:] + rot_1 = cast(Rot2D, rot_1) + rot_2 = cast(Rot2D, rot_2) + cos_1, sin_1 = rot_1.to_cos_sin() + cos_2, sin_2 = rot_2.to_cos_sin() new_cos = cos_1 * cos_2 - sin_1 * sin_2 new_sin = sin_1 * cos_2 + cos_1 * sin_2 - composition = Rot2D(data=torch.cat([new_cos, new_sin], dim=1)) + composition = Rot2D(data=torch.stack([new_cos, new_sin], dim=1)) if get_jacobians: jac1, jac2 = Rot2D._compose_jacobians( cast(LieGroup, rot_1), cast(LieGroup, rot_2) @@ -61,18 +64,32 @@ def _compose( @staticmethod def _inverse( - rot_1: Variable, get_jacobian: bool = False + rot: Variable, get_jacobian: bool = False ) -> Tuple[Variable, Optional[torch.Tensor]]: - inverse = Rot2D(data=torch.cat([rot_1.data[:, 0], -rot_1.data[:, 1]], dim=1)) + rot = cast(Rot2D, rot) + cos, sin = rot.to_cos_sin() + inverse = Rot2D(data=torch.stack([cos, -sin], dim=1)) if get_jacobian: - return inverse, Rot2D._inverse_jacobian(cast(Rot2D, rot_1)) + return inverse, Rot2D._inverse_jacobian(rot) else: return inverse, None def rotate(self, point: Vector) -> Vector: px, py = point.data[:, :1], point.data[:, 1:] - cos, sin = self.data[:, :1], self.data[:, 1:] + cos, sin = self.to_cos_sin() new_point_data = torch.empty_like(point.data) new_point_data[:, 0] = cos * px - sin * py new_point_data[:, 1] = sin * px + cos * py return Vector(2, data=new_point_data) + + def to_cos_sin(self) -> Tuple[torch.Tensor, torch.Tensor]: + return self.data[:, 0], self.data[:, 1] + + def to_matrix(self) -> torch.Tensor: + matrix = torch.Tensor(self.shape[0], 2, 2) + cos, sin = self.to_cos_sin() + matrix[:, 0, 0] = cos + matrix[:, 0, 1] = -sin + matrix[:, 1, 0] = sin + matrix[:, 1, 1] = cos + return matrix From 005ca47cfd386b580aa3a01e7fb57507a4786f36 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 26 May 2021 10:05:31 -0700 Subject: [PATCH 09/39] Fixed broken tests --- theseus/core/tests/common.py | 4 ++++ theseus/core/tests/test_variable.py | 8 ++++++-- theseus/optimizer/tests/test_dense_linearization.py | 4 ++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 1257574ae..04cebf78c 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -37,6 +37,10 @@ def exp_map(tangent_vector): def _log_map(variable1): pass + @staticmethod + def _adjoint(variable1): + pass + class MockPrecision(theseus.core.Precision): def __init__(self, the_data): diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index 2378c092e..da65272bd 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -15,7 +15,7 @@ def _check_copy(var, new_var, new_name): assert var is not new_var assert var.data is not new_var.data assert new_var.name == new_name - assert new_var.shape() == var.shape() + assert new_var.shape == var.shape for i in range(100): size = torch.randint(low=1, high=21, size=(1,)).item() @@ -29,7 +29,7 @@ def test_var_shape(): for sz in range(100): data = torch.ones(sz) var = MockVar(sz, data=data) - assert (1,) + data.shape == var.shape() + assert (1,) + data.shape == var.shape def test_update_shape_check(): @@ -78,6 +78,10 @@ def exp_map(tangent_vector): def _log_map(variable1): pass + @staticmethod + def _adjoint(variable): + pass + def test_variable_no_args_init(): var = MockVarNoArgs(name="mock") diff --git a/theseus/optimizer/tests/test_dense_linearization.py b/theseus/optimizer/tests/test_dense_linearization.py index 9be510a9d..f97a2fa5f 100644 --- a/theseus/optimizer/tests/test_dense_linearization.py +++ b/theseus/optimizer/tests/test_dense_linearization.py @@ -37,6 +37,10 @@ def exp_map(tangent_vector): def _log_map(variable1): pass + @staticmethod + def _adjoint(variable1): + pass + class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): From e474373f0199a6c2e561b408151a06b7c7227e35 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 06:51:14 -0700 Subject: [PATCH 10/39] Moved compose, inverse, jacobian and adjoint methods from Variable to LieGroup --- theseus/core/tests/common.py | 20 ----- theseus/core/tests/test_variable.py | 20 ----- theseus/core/variable.py | 88 +------------------ theseus/geometry/lie_group.py | 76 ++++++++++++++-- theseus/geometry/vector.py | 26 +++--- .../tests/test_dense_linearization.py | 20 ----- 6 files changed, 84 insertions(+), 166 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 04cebf78c..272188ce5 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -21,26 +21,6 @@ def _local(variable1, variable2): def _retract(variable, delta): pass - @staticmethod - def _inverse(variable1, get_jacobian=False): - pass - - @staticmethod - def _compose(variable1, variable2, get_jacobians=False): - pass - - @staticmethod - def exp_map(tangent_vector): - pass - - @staticmethod - def _log_map(variable1): - pass - - @staticmethod - def _adjoint(variable1): - pass - class MockPrecision(theseus.core.Precision): def __init__(self, the_data): diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index da65272bd..62273e34a 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -62,26 +62,6 @@ def _local(variable1, variable2): def _retract(variable, delta): pass - @staticmethod - def _inverse(variable1, get_jacobian=False): - pass - - @staticmethod - def _compose(variable1, variable2, get_jacobians=False): - pass - - @staticmethod - def exp_map(tangent_vector): - pass - - @staticmethod - def _log_map(variable1): - pass - - @staticmethod - def _adjoint(variable): - pass - def test_variable_no_args_init(): var = MockVarNoArgs(name="mock") diff --git a/theseus/core/variable.py b/theseus/core/variable.py index ed6808516..3ee1eff71 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -1,23 +1,15 @@ import abc import copy -from typing import Any, Optional, Tuple +from typing import Any, Optional import torch -# TODO maybe a lot of these methods should only appear in LieGroup subclass -# TODO(mhmukadam) add docs/comments for -# _compose, _inverse, _adjoint, _log_map, _exp_map # Abstract class to represent Variables in the factor graph. # Concrete classes must implement the following methods: # - `_init_data`: initializes the underlying data tensor given constructor arguments # -`_local`: given two close Variable gives distance in tangent space # - `_retract`: returns Variable close by delta to given Variable -# - `exp_map` -# - `_log_map` -# - `_adjoint` -# _ `_compose` -# _ `_inverse` # - `update`: replaces the data tensor with the provided one (and checks that # the provided has the right format). # - `dim`: # of degrees of freedom of the variable @@ -57,65 +49,20 @@ def shape(self) -> torch.Size: @staticmethod @abc.abstractmethod - def _log_map(variable: "Variable") -> torch.Tensor: - pass - - def log_map(self) -> torch.Tensor: - return self.__class__._log_map(self) - - @staticmethod - @abc.abstractmethod - def exp_map(tangent_vector: torch.Tensor) -> "Variable": + def _local(variable1: "Variable", variable2: "Variable") -> torch.Tensor: pass @staticmethod @abc.abstractmethod - def _local(variable1: "Variable", variable2: "Variable") -> torch.Tensor: + def _retract(variable: "Variable", delta: torch.Tensor) -> "Variable": pass def local(self, variable2: "Variable") -> torch.Tensor: return self.__class__._local(self, variable2) - @staticmethod - @abc.abstractmethod - def _adjoint(variable: "Variable") -> torch.Tensor: - pass - - def adjoint(self) -> torch.Tensor: - return self.__class__._adjoint(self) - def retract(self, delta: torch.Tensor) -> "Variable": return self.__class__._retract(self, delta) - @staticmethod - @abc.abstractmethod - def _retract(variable: "Variable", delta: torch.Tensor) -> "Variable": - pass - - def compose( - self, variable2: "Variable", get_jacobians: bool = False - ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: - return self.__class__._compose(self, variable2, get_jacobians=get_jacobians) - - @staticmethod - @abc.abstractmethod - def _compose( - variable1: "Variable", variable2: "Variable", get_jacobians: bool = False - ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: - pass - - def inverse( - self, get_jacobian: bool = False - ) -> Tuple["Variable", Optional[torch.Tensor]]: - return self.__class__._inverse(self, get_jacobian=get_jacobian) - - @staticmethod - @abc.abstractmethod - def _inverse( - variable1: "Variable", get_jacobian: bool = False - ) -> Tuple["Variable", Optional[torch.Tensor]]: - pass - def copy(self, new_name: Optional[str] = None) -> "Variable": if not new_name: new_name = f"{self.name}_copy" @@ -156,35 +103,6 @@ def local(variable1: Variable, variable2: Variable) -> torch.Tensor: return variable1.local(variable2) -# Alias for Variable.adjoint() -def adjoint(variable: Variable) -> torch.Tensor: - return variable.adjoint() - - # Alias for Variable.retract() def retract(variable: Variable, delta: torch.Tensor) -> Variable: return variable.retract(delta) - - -# Alias for Variable.compose() -def compose( - variable1: Variable, variable2: Variable, get_jacobians: bool = False -) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: - return variable1.compose(variable2, get_jacobians=get_jacobians) - - -# Alias for Variable.inverse() -def inverse( - variable1: Variable, get_jacobian: bool = False -) -> Tuple[Variable, Optional[torch.Tensor]]: - return variable1.inverse(get_jacobian=get_jacobian) - - -# Alias for Variable.log_map() -def log_map(variable: Variable) -> torch.Tensor: - return variable.log_map() - - -# Alias for Variable.exp_map() -def exp_map(variable: Variable, tangent_vector: torch.Tensor) -> Variable: - return variable.__class__.exp_map(tangent_vector) diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py index 9e09fa148..5cc32e1d7 100644 --- a/theseus/geometry/lie_group.py +++ b/theseus/geometry/lie_group.py @@ -1,11 +1,22 @@ import abc -from typing import Any, Optional, Tuple +from typing import Any, Optional, Tuple, cast import torch from theseus.core.variable import Variable +# TODO(mhmukadam) add docs/comments for +# _compose, _inverse, _adjoint, _log_map, exp_map +# Abstract class to represent Lie groups. +# Concrete classes must implement the following methods: +# - `exp_map` +# - `_log_map` +# - `_adjoint` +# _ `_compose` +# _ `_inverse` +# +# Constructor can optionally provide an initial data value as a keyword argument. class LieGroup(Variable): def __init__( self, @@ -25,33 +36,49 @@ def dim(self) -> int: @staticmethod @abc.abstractmethod - def _log_map(variable: "Variable") -> torch.Tensor: + def exp_map(tangent_vector: torch.Tensor) -> "LieGroup": pass @staticmethod @abc.abstractmethod - def exp_map(tangent_vector: torch.Tensor) -> "Variable": + def _log_map(variable: "LieGroup") -> torch.Tensor: pass + def log_map(self) -> torch.Tensor: + return self.__class__._log_map(self) + @staticmethod @abc.abstractmethod - def _adjoint(variable: "Variable") -> torch.Tensor: + def _adjoint(variable: "LieGroup") -> torch.Tensor: pass + def adjoint(self) -> torch.Tensor: + return self.__class__._adjoint(self) + @staticmethod @abc.abstractmethod def _compose( - variable1: "Variable", variable2: "Variable", get_jacobians: bool = False - ) -> Tuple["Variable", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + variable1: "LieGroup", variable2: "LieGroup", get_jacobians: bool = False + ) -> Tuple["LieGroup", Optional[Tuple[torch.Tensor, torch.Tensor]]]: pass + def compose( + self, variable2: "LieGroup", get_jacobians: bool = False + ) -> Tuple["LieGroup", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + return self.__class__._compose(self, variable2, get_jacobians=get_jacobians) + @staticmethod @abc.abstractmethod def _inverse( - variable1: "Variable", get_jacobian: bool = False - ) -> Tuple["Variable", Optional[torch.Tensor]]: + variable1: "LieGroup", get_jacobian: bool = False + ) -> Tuple["LieGroup", Optional[torch.Tensor]]: pass + def inverse( + self, get_jacobian: bool = False + ) -> Tuple["LieGroup", Optional[torch.Tensor]]: + return self.__class__._inverse(self, get_jacobian=get_jacobian) + @staticmethod def _compose_jacobians( group1: "LieGroup", group2: "LieGroup" @@ -64,13 +91,44 @@ def _compose_jacobians( @staticmethod def _inverse_jacobian(group1: "LieGroup") -> torch.Tensor: - # TODO as it's done in minisam. Is (-) supposed to be a negation or an inverse() operator? return -group1.adjoint() @staticmethod def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: + variable1 = cast(LieGroup, variable1) + variable2 = cast(LieGroup, variable2) return variable1.inverse()[0].compose(variable2)[0].log_map() @staticmethod def _retract(variable: Variable, delta: torch.Tensor) -> Variable: + variable = cast(LieGroup, variable) return variable.compose(variable.exp_map(delta))[0] + + +# Alias for LieGroup.adjoint() +def adjoint(variable: LieGroup) -> torch.Tensor: + return variable.adjoint() + + +# Alias for LieGroup.compose() +def compose( + variable1: LieGroup, variable2: LieGroup, get_jacobians: bool = False +) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + return variable1.compose(variable2, get_jacobians=get_jacobians) + + +# Alias for LieGroup.inverse() +def inverse( + variable1: LieGroup, get_jacobian: bool = False +) -> Tuple[LieGroup, Optional[torch.Tensor]]: + return variable1.inverse(get_jacobian=get_jacobian) + + +# Alias for LieGroup.log_map() +def log_map(variable: LieGroup) -> torch.Tensor: + return variable.log_map() + + +# Alias for LieGroup.exp_map() +def exp_map(variable: LieGroup, tangent_vector: torch.Tensor) -> LieGroup: + return variable.__class__.exp_map(tangent_vector) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index e4b1af821..77835059f 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -4,8 +4,10 @@ from theseus.core.variable import Variable +from .lie_group import LieGroup -class Vector(Variable): + +class Vector(LieGroup): def __init__( self, size: int, @@ -149,14 +151,10 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": else: return Vector(vec.data.shape[1], data=torch.add(vec.data, delta)) - @staticmethod - def _adjoint(variable: Variable) -> torch.Tensor: - return NotImplemented - @staticmethod def _compose( - vec1: Variable, vec2: Variable, get_jacobians: bool = False - ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + vec1: LieGroup, vec2: LieGroup, get_jacobians: bool = False + ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: result = Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) if get_jacobians: batch_size = vec1.data.shape[0] @@ -170,8 +168,8 @@ def _compose( @staticmethod def _inverse( - vec1: Variable, get_jacobian: bool = False - ) -> Tuple[Variable, Optional[torch.Tensor]]: + vec1: LieGroup, get_jacobian: bool = False + ) -> Tuple[LieGroup, Optional[torch.Tensor]]: inverse = Vector(vec1.data.shape[1], data=-vec1.data) if get_jacobian: batch_size = vec1.data.shape[0] @@ -181,13 +179,17 @@ def _inverse( else: return inverse, None - # TODO: Define these for Vector (or remove from Variable if they are not general enough) + # TODO: Define these three methods for Vector (are these identity operations?) + @staticmethod + def _adjoint(variable: LieGroup) -> torch.Tensor: + return NotImplemented + @staticmethod - def exp_map(tangent_vector): + def exp_map(tangent_vector: torch.Tensor) -> LieGroup: pass @staticmethod - def _log_map(variable1): + def _log_map(variable1: LieGroup) -> torch.Tensor: pass def __hash__(self): diff --git a/theseus/optimizer/tests/test_dense_linearization.py b/theseus/optimizer/tests/test_dense_linearization.py index f97a2fa5f..dd61b287f 100644 --- a/theseus/optimizer/tests/test_dense_linearization.py +++ b/theseus/optimizer/tests/test_dense_linearization.py @@ -21,26 +21,6 @@ def _local(variable1, variable2): def _retract(variable, delta): return variable - @staticmethod - def _inverse(variable1, get_jacobian=False): - pass - - @staticmethod - def _compose(variable1, variable2, get_jacobians=False): - pass - - @staticmethod - def exp_map(tangent_vector): - pass - - @staticmethod - def _log_map(variable1): - pass - - @staticmethod - def _adjoint(variable1): - pass - class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): From 1f3005f44a7c254c40115f2bbe283b45711aa977 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 07:54:17 -0700 Subject: [PATCH 11/39] Replaced cos/sin by cosine/sine --- theseus/geometry/pose2.py | 33 +++++++++++++++--------------- theseus/geometry/rot2.py | 42 ++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 40 deletions(-) diff --git a/theseus/geometry/pose2.py b/theseus/geometry/pose2.py index bf0110786..e8f4120ac 100644 --- a/theseus/geometry/pose2.py +++ b/theseus/geometry/pose2.py @@ -3,7 +3,6 @@ import torch import theseus.constants -from theseus.core import Variable from .lie_group import LieGroup from .rot2 import Rot2D @@ -30,20 +29,20 @@ def update_from_rot_and_trans(self, rotation: Rot2D, translation: Vector): batch_size = rotation.shape[0] self.data = torch.empty(batch_size, 4, device=rotation.device) self.data[:, :2] = translation.data - cos, sin = rotation.to_cos_sin() - self.data[:, 2] = cos - self.data[:, 3] = sin + cosine, sine = rotation.to_cos_sin() + self.data[:, 2] = cosine + self.data[:, 3] = sine # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L160 @staticmethod - def _log_map(variable: Variable) -> torch.Tensor: + def _log_map(variable: LieGroup) -> torch.Tensor: pose = cast(Pose2D, variable) theta = pose.rotation.log_map() half_theta = 0.5 * theta - cos, sin = pose.rotation.to_cos_sin() - cos_minus_one = cos - 1 - halftheta_by_tan_of_halftheta = -(half_theta * sin) / cos_minus_one + cosine, sine = pose.rotation.to_cos_sin() + cos_minus_one = cosine - 1 + halftheta_by_tan_of_halftheta = -(half_theta * sine) / cos_minus_one # Same as above three lines but for small values idx_small_vals = cos_minus_one.abs() < theseus.constants.EPS @@ -61,15 +60,15 @@ def _log_map(variable: Variable) -> torch.Tensor: # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L558 @staticmethod - def exp_map(tangent_vector: torch.Tensor) -> Variable: + def exp_map(tangent_vector: torch.Tensor) -> LieGroup: u = tangent_vector[:, :2] theta = tangent_vector[:, 2:] rotation = Rot2D() rotation.update_from_angle(tangent_vector) - cos, sin = rotation.to_cos_sin() - sin_theta_by_theta = sin / theta - one_minus_cos_theta_by_theta = (-cos + 1) / theta + cosine, sine = rotation.to_cos_sin() + sin_theta_by_theta = sine / theta + one_minus_cos_theta_by_theta = (-cosine + 1) / theta # Same as above three lines but for small angles idx_small_thetas = theta.abs() < theseus.constants.EPS @@ -89,7 +88,7 @@ def exp_map(tangent_vector: torch.Tensor) -> Variable: return pose @staticmethod - def _adjoint(pose: "Variable") -> torch.Tensor: + def _adjoint(pose: LieGroup) -> torch.Tensor: pose = cast(Pose2D, pose) ret = torch.empty(pose.shape[0], 3, 3).to(pose.device) ret[:, :2, :2] = pose.rotation.to_matrix() @@ -101,8 +100,8 @@ def _adjoint(pose: "Variable") -> torch.Tensor: @staticmethod def _compose( - pose_1: Variable, pose_2: Variable, get_jacobians: bool = False - ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + pose_1: LieGroup, pose_2: LieGroup, get_jacobians: bool = False + ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: pose_1 = cast(Pose2D, pose_1) pose_2 = cast(Pose2D, pose_2) new_rotation = pose_1.rotation.compose( @@ -122,6 +121,6 @@ def _compose( @staticmethod def _inverse( - rot_1: Variable, get_jacobian: bool = False - ) -> Tuple[Variable, Optional[torch.Tensor]]: + rot_1: LieGroup, get_jacobian: bool = False + ) -> Tuple[LieGroup, Optional[torch.Tensor]]: pass diff --git a/theseus/geometry/rot2.py b/theseus/geometry/rot2.py index 3936b0a0a..a227dee0f 100644 --- a/theseus/geometry/rot2.py +++ b/theseus/geometry/rot2.py @@ -2,8 +2,6 @@ import torch -from theseus.core.variable import Variable - from .lie_group import LieGroup from .vector import Vector @@ -26,27 +24,25 @@ def dim(self) -> int: return 1 @staticmethod - def _adjoint(variable: Variable) -> torch.Tensor: + def _adjoint(variable: LieGroup) -> torch.Tensor: return torch.ones(variable.shape[0], 1).to(variable.device) @staticmethod - def exp_map(tangent_vector: torch.Tensor) -> Variable: + def exp_map(tangent_vector: torch.Tensor) -> LieGroup: rot = Rot2D() rot.update_from_angle(tangent_vector) return rot @staticmethod - def _log_map(variable: Variable) -> torch.Tensor: + def _log_map(variable: LieGroup) -> torch.Tensor: rot = cast(Rot2D, variable) - cos, sin = rot.to_cos_sin() - return torch.atan2(sin, cos).unsqueeze(1) + cosine, sine = rot.to_cos_sin() + return torch.atan2(sine, cosine).unsqueeze(1) - # TODO start here, there derivatives are with respect to theta, not sin/cos - # need to use adjoint map @staticmethod def _compose( - rot_1: Variable, rot_2: Variable, get_jacobians: bool = False - ) -> Tuple[Variable, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + rot_1: LieGroup, rot_2: LieGroup, get_jacobians: bool = False + ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: rot_1 = cast(Rot2D, rot_1) rot_2 = cast(Rot2D, rot_2) cos_1, sin_1 = rot_1.to_cos_sin() @@ -64,11 +60,11 @@ def _compose( @staticmethod def _inverse( - rot: Variable, get_jacobian: bool = False - ) -> Tuple[Variable, Optional[torch.Tensor]]: + rot: LieGroup, get_jacobian: bool = False + ) -> Tuple[LieGroup, Optional[torch.Tensor]]: rot = cast(Rot2D, rot) - cos, sin = rot.to_cos_sin() - inverse = Rot2D(data=torch.stack([cos, -sin], dim=1)) + cosine, sine = rot.to_cos_sin() + inverse = Rot2D(data=torch.stack([cosine, -sine], dim=1)) if get_jacobian: return inverse, Rot2D._inverse_jacobian(rot) else: @@ -76,10 +72,10 @@ def _inverse( def rotate(self, point: Vector) -> Vector: px, py = point.data[:, :1], point.data[:, 1:] - cos, sin = self.to_cos_sin() + cosine, sine = self.to_cos_sin() new_point_data = torch.empty_like(point.data) - new_point_data[:, 0] = cos * px - sin * py - new_point_data[:, 1] = sin * px + cos * py + new_point_data[:, 0] = cosine * px - sine * py + new_point_data[:, 1] = sine * px + cosine * py return Vector(2, data=new_point_data) def to_cos_sin(self) -> Tuple[torch.Tensor, torch.Tensor]: @@ -87,9 +83,9 @@ def to_cos_sin(self) -> Tuple[torch.Tensor, torch.Tensor]: def to_matrix(self) -> torch.Tensor: matrix = torch.Tensor(self.shape[0], 2, 2) - cos, sin = self.to_cos_sin() - matrix[:, 0, 0] = cos - matrix[:, 0, 1] = -sin - matrix[:, 1, 0] = sin - matrix[:, 1, 1] = cos + cosine, sine = self.to_cos_sin() + matrix[:, 0, 0] = cosine + matrix[:, 0, 1] = -sine + matrix[:, 1, 0] = sine + matrix[:, 1, 1] = cosine return matrix From 21ff68592d996bdef4e7a9834059ca74cbb6a7eb Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 08:07:05 -0700 Subject: [PATCH 12/39] Changed Between factor to accept only LieGroup variables. --- theseus/embodied/measurements/between.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index ade99027c..3f9198922 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -1,8 +1,9 @@ -from typing import List, Optional +from typing import List, Optional, cast import torch from theseus.core import Factor, Precision, Variable +from theseus.geometry import LieGroup class Between(Factor): @@ -19,16 +20,22 @@ def __init__( variables[0], measurement.__class__ ): raise ValueError("Inconsistent types between variables and measurement.") + if not isinstance(variables[0], LieGroup): + raise ValueError("Between factor only accepts LieGroup variables.") super().__init__(variables, precision, name=name) self._measurement = measurement def error(self) -> torch.Tensor: - var_diff, _ = self.variables[0].inverse()[0].compose(self.variables[1]) + v0 = cast(LieGroup, self.variables[0]) + v1 = cast(LieGroup, self.variables[1]) + var_diff, _ = v0.inverse()[0].compose(v1) return self._measurement.local(var_diff) def jacobians(self) -> List[torch.Tensor]: - v0_inverse, Hinv = self.variables[0].inverse(get_jacobian=True) - Hcmp1, Hcmp2 = v0_inverse.compose(self.variables[1], get_jacobians=True)[1] + v0 = cast(LieGroup, self.variables[0]) + v1 = cast(LieGroup, self.variables[1]) + v0_inverse, Hinv = v0.inverse(get_jacobian=True) + Hcmp1, Hcmp2 = v0_inverse.compose(v1, get_jacobians=True)[1] return [torch.matmul(Hcmp1, Hinv), Hcmp2] def dim(self) -> int: From 68ecdd1a17bbb1b92d4973062305155c87dd9812 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 08:14:55 -0700 Subject: [PATCH 13/39] [refactor] Renamed Rot2D and Pose2D as SO2 and SE2, respectively. --- theseus/geometry/__init__.py | 4 +++- theseus/geometry/{pose2.py => se2.py} | 24 ++++++++++++------------ theseus/geometry/{rot2.py => so2.py} | 20 ++++++++++---------- 3 files changed, 25 insertions(+), 23 deletions(-) rename theseus/geometry/{pose2.py => se2.py} (89%) rename theseus/geometry/{rot2.py => so2.py} (85%) diff --git a/theseus/geometry/__init__.py b/theseus/geometry/__init__.py index 71c0f0ea7..ea400ca70 100644 --- a/theseus/geometry/__init__.py +++ b/theseus/geometry/__init__.py @@ -1,4 +1,6 @@ from .lie_group import LieGroup +from .se2 import SE2 +from .so2 import SO2 from .vector import Vector -__all__ = ["LieGroup", "Vector"] +__all__ = ["LieGroup", "SE2", "SO2", "Vector"] diff --git a/theseus/geometry/pose2.py b/theseus/geometry/se2.py similarity index 89% rename from theseus/geometry/pose2.py rename to theseus/geometry/se2.py index e8f4120ac..d9eb28899 100644 --- a/theseus/geometry/pose2.py +++ b/theseus/geometry/se2.py @@ -5,11 +5,11 @@ import theseus.constants from .lie_group import LieGroup -from .rot2 import Rot2D +from .so2 import SO2 from .vector import Vector -class Pose2D(LieGroup): +class SE2(LieGroup): def __init__( self, data: Optional[torch.Tensor] = None, @@ -17,7 +17,7 @@ def __init__( ): super().__init__(data=data, name=name) self.translation = Vector(2, data=self.data[:, :2]) - self.rotation = Rot2D(data=self.data[:, 2:]) + self.rotation = SO2(data=self.data[:, 2:]) def _init_data(self): return torch.empty(1, 4) # x, y, cos and sin @@ -25,7 +25,7 @@ def _init_data(self): def dim(self) -> int: return 3 - def update_from_rot_and_trans(self, rotation: Rot2D, translation: Vector): + def update_from_rot_and_trans(self, rotation: SO2, translation: Vector): batch_size = rotation.shape[0] self.data = torch.empty(batch_size, 4, device=rotation.device) self.data[:, :2] = translation.data @@ -36,7 +36,7 @@ def update_from_rot_and_trans(self, rotation: Rot2D, translation: Vector): # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L160 @staticmethod def _log_map(variable: LieGroup) -> torch.Tensor: - pose = cast(Pose2D, variable) + pose = cast(SE2, variable) theta = pose.rotation.log_map() half_theta = 0.5 * theta @@ -63,7 +63,7 @@ def _log_map(variable: LieGroup) -> torch.Tensor: def exp_map(tangent_vector: torch.Tensor) -> LieGroup: u = tangent_vector[:, :2] theta = tangent_vector[:, 2:] - rotation = Rot2D() + rotation = SO2() rotation.update_from_angle(tangent_vector) cosine, sine = rotation.to_cos_sin() @@ -83,13 +83,13 @@ def exp_map(tangent_vector: torch.Tensor) -> LieGroup: new_y = one_minus_cos_theta_by_theta * u[:, 0] + sin_theta_by_theta * u[:, 1] translation = Vector(size=2, data=torch.stack([new_x, new_y], dim=1)) - pose = Pose2D() + pose = SE2() pose.update_from_rot_and_trans(rotation, translation) return pose @staticmethod def _adjoint(pose: LieGroup) -> torch.Tensor: - pose = cast(Pose2D, pose) + pose = cast(SE2, pose) ret = torch.empty(pose.shape[0], 3, 3).to(pose.device) ret[:, :2, :2] = pose.rotation.to_matrix() ret[:, 0, 2] = pose.translation.data[:, 1] @@ -102,19 +102,19 @@ def _adjoint(pose: LieGroup) -> torch.Tensor: def _compose( pose_1: LieGroup, pose_2: LieGroup, get_jacobians: bool = False ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: - pose_1 = cast(Pose2D, pose_1) - pose_2 = cast(Pose2D, pose_2) + pose_1 = cast(SE2, pose_1) + pose_2 = cast(SE2, pose_2) new_rotation = pose_1.rotation.compose( pose_2.rotation, get_jacobians=get_jacobians )[0] new_translation = pose_1.translation.compose( pose_1.rotation.rotate(pose_2.translation) )[0] - composition = Pose2D( + composition = SE2( data=torch.cat([new_translation.data, new_rotation.data], dim=1) ) if get_jacobians: - jac1, jac2 = Pose2D._compose_jacobians(pose_1, pose_2) + jac1, jac2 = SE2._compose_jacobians(pose_1, pose_2) return composition, (jac1, jac2) else: return composition, None diff --git a/theseus/geometry/rot2.py b/theseus/geometry/so2.py similarity index 85% rename from theseus/geometry/rot2.py rename to theseus/geometry/so2.py index a227dee0f..95fd69e50 100644 --- a/theseus/geometry/rot2.py +++ b/theseus/geometry/so2.py @@ -6,7 +6,7 @@ from .vector import Vector -class Rot2D(LieGroup): +class SO2(LieGroup): def __init__( self, data: Optional[torch.Tensor] = None, @@ -29,13 +29,13 @@ def _adjoint(variable: LieGroup) -> torch.Tensor: @staticmethod def exp_map(tangent_vector: torch.Tensor) -> LieGroup: - rot = Rot2D() + rot = SO2() rot.update_from_angle(tangent_vector) return rot @staticmethod def _log_map(variable: LieGroup) -> torch.Tensor: - rot = cast(Rot2D, variable) + rot = cast(SO2, variable) cosine, sine = rot.to_cos_sin() return torch.atan2(sine, cosine).unsqueeze(1) @@ -43,15 +43,15 @@ def _log_map(variable: LieGroup) -> torch.Tensor: def _compose( rot_1: LieGroup, rot_2: LieGroup, get_jacobians: bool = False ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: - rot_1 = cast(Rot2D, rot_1) - rot_2 = cast(Rot2D, rot_2) + rot_1 = cast(SO2, rot_1) + rot_2 = cast(SO2, rot_2) cos_1, sin_1 = rot_1.to_cos_sin() cos_2, sin_2 = rot_2.to_cos_sin() new_cos = cos_1 * cos_2 - sin_1 * sin_2 new_sin = sin_1 * cos_2 + cos_1 * sin_2 - composition = Rot2D(data=torch.stack([new_cos, new_sin], dim=1)) + composition = SO2(data=torch.stack([new_cos, new_sin], dim=1)) if get_jacobians: - jac1, jac2 = Rot2D._compose_jacobians( + jac1, jac2 = SO2._compose_jacobians( cast(LieGroup, rot_1), cast(LieGroup, rot_2) ) return composition, (jac1, jac2) @@ -62,11 +62,11 @@ def _compose( def _inverse( rot: LieGroup, get_jacobian: bool = False ) -> Tuple[LieGroup, Optional[torch.Tensor]]: - rot = cast(Rot2D, rot) + rot = cast(SO2, rot) cosine, sine = rot.to_cos_sin() - inverse = Rot2D(data=torch.stack([cosine, -sine], dim=1)) + inverse = SO2(data=torch.stack([cosine, -sine], dim=1)) if get_jacobian: - return inverse, Rot2D._inverse_jacobian(rot) + return inverse, SO2._inverse_jacobian(rot) else: return inverse, None From a9e410561cb2665237de3e6add6ed061674102a9 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 08:19:31 -0700 Subject: [PATCH 14/39] Fixed dynamic type checking for Vector operations. --- theseus/geometry/vector.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 77835059f..0c0d50df4 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -45,13 +45,13 @@ def allclose(self, other: "Vector", *args, **kwargs) -> bool: def __eq__(self, other: object) -> bool: if not isinstance(other, Vector): - return NotImplemented + raise ValueError("Non-vector input type for equality operator.") else: return bool(torch.all(self.data == other.data)) def __ne__(self, other: object) -> bool: if not isinstance(other, Vector): - return NotImplemented + raise ValueError("Non-vector input type for non-equality operator.") else: return not self.__eq__(other) @@ -139,15 +139,15 @@ def cat(self, vecs: Union["Vector", Tuple["Vector"], List["Vector"]]) -> "Vector @staticmethod def _local(vec1: Variable, vec2: Variable) -> torch.Tensor: - if not isinstance(vec1, Vector) and not isinstance(vec2, Vector): - return NotImplemented + if not isinstance(vec1, Vector) or not isinstance(vec2, Vector): + raise ValueError("Non-vector inputs for Vector.local()") else: return torch.sub(vec2.data, vec1.data) @staticmethod def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": if not isinstance(vec, Vector): - return NotImplemented + raise ValueError("Non-vector input for Vector.retract()") else: return Vector(vec.data.shape[1], data=torch.add(vec.data, delta)) From e38e3895981ad74d9dff1d4d2a4b86a31068a5e9 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 09:24:49 -0700 Subject: [PATCH 15/39] [refactor] Changed compose/inverse to not return a tuple if get_jacobians=False. --- theseus/embodied/measurements/between.py | 15 +++++++++++---- theseus/geometry/lie_group.py | 23 +++++++++++++---------- theseus/geometry/se2.py | 19 +++++++++---------- theseus/geometry/so2.py | 10 +++++----- theseus/geometry/tests/test_vector.py | 2 +- theseus/geometry/vector.py | 12 ++++++------ 6 files changed, 45 insertions(+), 36 deletions(-) diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index 3f9198922..5a1656b47 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -1,4 +1,4 @@ -from typing import List, Optional, cast +from typing import List, Optional, Tuple, cast import torch @@ -28,14 +28,21 @@ def __init__( def error(self) -> torch.Tensor: v0 = cast(LieGroup, self.variables[0]) v1 = cast(LieGroup, self.variables[1]) - var_diff, _ = v0.inverse()[0].compose(v1) + v0_inverse = cast(LieGroup, v0.inverse()) + var_diff = cast(LieGroup, v0_inverse.compose(v1)) return self._measurement.local(var_diff) def jacobians(self) -> List[torch.Tensor]: v0 = cast(LieGroup, self.variables[0]) v1 = cast(LieGroup, self.variables[1]) - v0_inverse, Hinv = v0.inverse(get_jacobian=True) - Hcmp1, Hcmp2 = v0_inverse.compose(v1, get_jacobians=True)[1] + v0_inverse, Hinv = cast( + Tuple[LieGroup, torch.Tensor], + v0.inverse(get_jacobian=True), + ) + _, (Hcmp1, Hcmp2) = cast( + Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]], + v0_inverse.compose(v1, get_jacobians=True), + ) return [torch.matmul(Hcmp1, Hinv), Hcmp2] def dim(self) -> int: diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py index 5cc32e1d7..4e12638b9 100644 --- a/theseus/geometry/lie_group.py +++ b/theseus/geometry/lie_group.py @@ -1,5 +1,5 @@ import abc -from typing import Any, Optional, Tuple, cast +from typing import Any, Optional, Tuple, Union, cast import torch @@ -59,24 +59,24 @@ def adjoint(self) -> torch.Tensor: @abc.abstractmethod def _compose( variable1: "LieGroup", variable2: "LieGroup", get_jacobians: bool = False - ) -> Tuple["LieGroup", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + ) -> Union["LieGroup", Tuple["LieGroup", Tuple[torch.Tensor, torch.Tensor]]]: pass def compose( self, variable2: "LieGroup", get_jacobians: bool = False - ) -> Tuple["LieGroup", Optional[Tuple[torch.Tensor, torch.Tensor]]]: + ) -> Union["LieGroup", Tuple["LieGroup", Tuple[torch.Tensor, torch.Tensor]]]: return self.__class__._compose(self, variable2, get_jacobians=get_jacobians) @staticmethod @abc.abstractmethod def _inverse( variable1: "LieGroup", get_jacobian: bool = False - ) -> Tuple["LieGroup", Optional[torch.Tensor]]: + ) -> Union["LieGroup", Tuple["LieGroup", torch.Tensor]]: pass def inverse( self, get_jacobian: bool = False - ) -> Tuple["LieGroup", Optional[torch.Tensor]]: + ) -> Union["LieGroup", Tuple["LieGroup", torch.Tensor]]: return self.__class__._inverse(self, get_jacobian=get_jacobian) @staticmethod @@ -85,7 +85,8 @@ def _compose_jacobians( ) -> Tuple[torch.Tensor, torch.Tensor]: if not type(group1) is type(group2): raise ValueError("Lie groups for compose must be of the same type.") - jac1 = group1.inverse()[0].adjoint() + g1_inverse = cast(LieGroup, group1.inverse()) + jac1 = g1_inverse.adjoint() jac2 = torch.eye(group2.dim()).repeat(group2.shape[0], 1, 1).to(group2.device) return jac1, jac2 @@ -97,12 +98,14 @@ def _inverse_jacobian(group1: "LieGroup") -> torch.Tensor: def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: variable1 = cast(LieGroup, variable1) variable2 = cast(LieGroup, variable2) - return variable1.inverse()[0].compose(variable2)[0].log_map() + v1_inverse = cast(LieGroup, variable1.inverse()) + composition = cast(LieGroup, v1_inverse.compose(variable2)) + return composition.log_map() @staticmethod def _retract(variable: Variable, delta: torch.Tensor) -> Variable: variable = cast(LieGroup, variable) - return variable.compose(variable.exp_map(delta))[0] + return cast(LieGroup, variable.compose(variable.exp_map(delta))) # Alias for LieGroup.adjoint() @@ -113,14 +116,14 @@ def adjoint(variable: LieGroup) -> torch.Tensor: # Alias for LieGroup.compose() def compose( variable1: LieGroup, variable2: LieGroup, get_jacobians: bool = False -) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: +) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: return variable1.compose(variable2, get_jacobians=get_jacobians) # Alias for LieGroup.inverse() def inverse( variable1: LieGroup, get_jacobian: bool = False -) -> Tuple[LieGroup, Optional[torch.Tensor]]: +) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: return variable1.inverse(get_jacobian=get_jacobian) diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index d9eb28899..491a4f74f 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, cast +from typing import Optional, Tuple, Union, cast import torch @@ -101,15 +101,14 @@ def _adjoint(pose: LieGroup) -> torch.Tensor: @staticmethod def _compose( pose_1: LieGroup, pose_2: LieGroup, get_jacobians: bool = False - ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: pose_1 = cast(SE2, pose_1) pose_2 = cast(SE2, pose_2) - new_rotation = pose_1.rotation.compose( - pose_2.rotation, get_jacobians=get_jacobians - )[0] - new_translation = pose_1.translation.compose( - pose_1.rotation.rotate(pose_2.translation) - )[0] + new_rotation = cast(SO2, pose_1.rotation.compose(pose_2.rotation)) + new_translation = cast( + Vector, + pose_1.translation.compose(pose_1.rotation.rotate(pose_2.translation)), + ) composition = SE2( data=torch.cat([new_translation.data, new_rotation.data], dim=1) ) @@ -117,10 +116,10 @@ def _compose( jac1, jac2 = SE2._compose_jacobians(pose_1, pose_2) return composition, (jac1, jac2) else: - return composition, None + return composition @staticmethod def _inverse( rot_1: LieGroup, get_jacobian: bool = False - ) -> Tuple[LieGroup, Optional[torch.Tensor]]: + ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: pass diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 95fd69e50..982867fe1 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, cast +from typing import Optional, Tuple, Union, cast import torch @@ -42,7 +42,7 @@ def _log_map(variable: LieGroup) -> torch.Tensor: @staticmethod def _compose( rot_1: LieGroup, rot_2: LieGroup, get_jacobians: bool = False - ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: rot_1 = cast(SO2, rot_1) rot_2 = cast(SO2, rot_2) cos_1, sin_1 = rot_1.to_cos_sin() @@ -56,19 +56,19 @@ def _compose( ) return composition, (jac1, jac2) else: - return composition, None + return composition @staticmethod def _inverse( rot: LieGroup, get_jacobian: bool = False - ) -> Tuple[LieGroup, Optional[torch.Tensor]]: + ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: rot = cast(SO2, rot) cosine, sine = rot.to_cos_sin() inverse = SO2(data=torch.stack([cosine, -sine], dim=1)) if get_jacobian: return inverse, SO2._inverse_jacobian(rot) else: - return inverse, None + return inverse def rotate(self, point: Vector) -> Vector: px, py = point.data[:, :1], point.data[:, 1:] diff --git a/theseus/geometry/tests/test_vector.py b/theseus/geometry/tests/test_vector.py index 34cf6bff5..0a78f6790 100644 --- a/theseus/geometry/tests/test_vector.py +++ b/theseus/geometry/tests/test_vector.py @@ -50,7 +50,7 @@ def test_add(): v2 = Vector(j, data=t2.clone(), name="v2") vsum = Vector(j, data=t1 + t2) assert v1 + v2 == vsum - assert v1.compose(v2)[0] == vsum + assert v1.compose(v2) == vsum def test_sub(): diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 0c0d50df4..a484f22c3 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -68,13 +68,13 @@ def __ge__(self, other: "Vector") -> bool: return bool(torch.all(self.data >= other.data)) def __add__(self, other: "Vector") -> "Vector": - return cast(Vector, Vector._compose(self, other)[0]) + return cast(Vector, cast(Vector, Vector._compose(self, other))) def __sub__(self, other: "Vector") -> "Vector": return Vector(self.data.shape[1], data=torch.sub(self.data, other.data)) def __neg__(self) -> "Vector": - return cast(Vector, Vector._inverse(self)[0]) + return cast(Vector, cast(Vector, Vector._inverse(self))) def __mul__(self, other: Union["Vector", torch.Tensor]) -> "Vector": if isinstance(other, Vector): @@ -154,7 +154,7 @@ def _retract(vec: Variable, delta: torch.Tensor) -> "Vector": @staticmethod def _compose( vec1: LieGroup, vec2: LieGroup, get_jacobians: bool = False - ) -> Tuple[LieGroup, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: result = Vector(vec1.data.shape[1], data=torch.add(vec1.data, vec2.data)) if get_jacobians: batch_size = vec1.data.shape[0] @@ -164,12 +164,12 @@ def _compose( ) return result, jacobians else: - return result, None + return result @staticmethod def _inverse( vec1: LieGroup, get_jacobian: bool = False - ) -> Tuple[LieGroup, Optional[torch.Tensor]]: + ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: inverse = Vector(vec1.data.shape[1], data=-vec1.data) if get_jacobian: batch_size = vec1.data.shape[0] @@ -177,7 +177,7 @@ def _inverse( vec1.device ) else: - return inverse, None + return inverse # TODO: Define these three methods for Vector (are these identity operations?) @staticmethod From b1e4585a35c31e6d262d40870712d6a27aa63267 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 09:30:30 -0700 Subject: [PATCH 16/39] Renamed some left over pose/rot variables as se2/so2. --- theseus/geometry/se2.py | 42 ++++++++++++++++++++--------------------- theseus/geometry/so2.py | 30 ++++++++++++++--------------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 491a4f74f..55cc9f9b1 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -36,11 +36,11 @@ def update_from_rot_and_trans(self, rotation: SO2, translation: Vector): # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L160 @staticmethod def _log_map(variable: LieGroup) -> torch.Tensor: - pose = cast(SE2, variable) - theta = pose.rotation.log_map() + se2 = cast(SE2, variable) + theta = se2.rotation.log_map() half_theta = 0.5 * theta - cosine, sine = pose.rotation.to_cos_sin() + cosine, sine = se2.rotation.to_cos_sin() cos_minus_one = cosine - 1 halftheta_by_tan_of_halftheta = -(half_theta * sine) / cos_minus_one @@ -49,12 +49,12 @@ def _log_map(variable: LieGroup) -> torch.Tensor: theta_sq_at_idx = theta[idx_small_vals] ** 2 halftheta_by_tan_of_halftheta[idx_small_vals] = -theta_sq_at_idx / 12 + 1 - v_inv = torch.empty(pose.shape[0], 2, 2) + v_inv = torch.empty(se2.shape[0], 2, 2) v_inv[:, 0, 0] = halftheta_by_tan_of_halftheta v_inv[:, 0, 1] = half_theta v_inv[:, 1, 0] = -half_theta v_inv[:, 1, 1] = halftheta_by_tan_of_halftheta - tangent_translation = torch.matmul(v_inv, pose.translation.data.unsqueeze(-1)) + tangent_translation = torch.matmul(v_inv, se2.translation.data.unsqueeze(-1)) return torch.cat([tangent_translation.squeeze(), theta], dim=1) @@ -83,43 +83,43 @@ def exp_map(tangent_vector: torch.Tensor) -> LieGroup: new_y = one_minus_cos_theta_by_theta * u[:, 0] + sin_theta_by_theta * u[:, 1] translation = Vector(size=2, data=torch.stack([new_x, new_y], dim=1)) - pose = SE2() - pose.update_from_rot_and_trans(rotation, translation) - return pose + se2 = SE2() + se2.update_from_rot_and_trans(rotation, translation) + return se2 @staticmethod - def _adjoint(pose: LieGroup) -> torch.Tensor: - pose = cast(SE2, pose) - ret = torch.empty(pose.shape[0], 3, 3).to(pose.device) - ret[:, :2, :2] = pose.rotation.to_matrix() - ret[:, 0, 2] = pose.translation.data[:, 1] - ret[:, 1, 2] = -pose.translation.data[:, 0] + def _adjoint(se2: LieGroup) -> torch.Tensor: + se2 = cast(SE2, se2) + ret = torch.empty(se2.shape[0], 3, 3).to(se2.device) + ret[:, :2, :2] = se2.rotation.to_matrix() + ret[:, 0, 2] = se2.translation.data[:, 1] + ret[:, 1, 2] = -se2.translation.data[:, 0] ret[:, 2, 0] = 0 ret[:, 2, 2] = 1 return ret @staticmethod def _compose( - pose_1: LieGroup, pose_2: LieGroup, get_jacobians: bool = False + se2_1: LieGroup, se2_2: LieGroup, get_jacobians: bool = False ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: - pose_1 = cast(SE2, pose_1) - pose_2 = cast(SE2, pose_2) - new_rotation = cast(SO2, pose_1.rotation.compose(pose_2.rotation)) + se2_1 = cast(SE2, se2_1) + se2_2 = cast(SE2, se2_2) + new_rotation = cast(SO2, se2_1.rotation.compose(se2_2.rotation)) new_translation = cast( Vector, - pose_1.translation.compose(pose_1.rotation.rotate(pose_2.translation)), + se2_1.translation.compose(se2_1.rotation.rotate(se2_2.translation)), ) composition = SE2( data=torch.cat([new_translation.data, new_rotation.data], dim=1) ) if get_jacobians: - jac1, jac2 = SE2._compose_jacobians(pose_1, pose_2) + jac1, jac2 = SE2._compose_jacobians(se2_1, se2_2) return composition, (jac1, jac2) else: return composition @staticmethod def _inverse( - rot_1: LieGroup, get_jacobian: bool = False + so2_1: LieGroup, get_jacobian: bool = False ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: pass diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 982867fe1..670107ce9 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -29,30 +29,30 @@ def _adjoint(variable: LieGroup) -> torch.Tensor: @staticmethod def exp_map(tangent_vector: torch.Tensor) -> LieGroup: - rot = SO2() - rot.update_from_angle(tangent_vector) - return rot + so2 = SO2() + so2.update_from_angle(tangent_vector) + return so2 @staticmethod def _log_map(variable: LieGroup) -> torch.Tensor: - rot = cast(SO2, variable) - cosine, sine = rot.to_cos_sin() + so2 = cast(SO2, variable) + cosine, sine = so2.to_cos_sin() return torch.atan2(sine, cosine).unsqueeze(1) @staticmethod def _compose( - rot_1: LieGroup, rot_2: LieGroup, get_jacobians: bool = False + so2_1: LieGroup, so2_2: LieGroup, get_jacobians: bool = False ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: - rot_1 = cast(SO2, rot_1) - rot_2 = cast(SO2, rot_2) - cos_1, sin_1 = rot_1.to_cos_sin() - cos_2, sin_2 = rot_2.to_cos_sin() + so2_1 = cast(SO2, so2_1) + so2_2 = cast(SO2, so2_2) + cos_1, sin_1 = so2_1.to_cos_sin() + cos_2, sin_2 = so2_2.to_cos_sin() new_cos = cos_1 * cos_2 - sin_1 * sin_2 new_sin = sin_1 * cos_2 + cos_1 * sin_2 composition = SO2(data=torch.stack([new_cos, new_sin], dim=1)) if get_jacobians: jac1, jac2 = SO2._compose_jacobians( - cast(LieGroup, rot_1), cast(LieGroup, rot_2) + cast(LieGroup, so2_1), cast(LieGroup, so2_2) ) return composition, (jac1, jac2) else: @@ -60,13 +60,13 @@ def _compose( @staticmethod def _inverse( - rot: LieGroup, get_jacobian: bool = False + so2: LieGroup, get_jacobian: bool = False ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: - rot = cast(SO2, rot) - cosine, sine = rot.to_cos_sin() + so2 = cast(SO2, so2) + cosine, sine = so2.to_cos_sin() inverse = SO2(data=torch.stack([cosine, -sine], dim=1)) if get_jacobian: - return inverse, SO2._inverse_jacobian(rot) + return inverse, SO2._inverse_jacobian(so2) else: return inverse From 322640afaca6af42d287deda53e5dba65e5b91a1 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 13:48:44 -0700 Subject: [PATCH 17/39] Added unit tests for SO2. --- theseus/constants.py | 2 +- theseus/geometry/se2.py | 2 +- theseus/geometry/so2.py | 6 +-- theseus/geometry/tests/test_so2.py | 69 ++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 5 deletions(-) create mode 100644 theseus/geometry/tests/test_so2.py diff --git a/theseus/constants.py b/theseus/constants.py index dc9b0eec2..0f4dbd94c 100644 --- a/theseus/constants.py +++ b/theseus/constants.py @@ -1 +1 @@ -EPS = 1e-12 +EPS = 1e-10 diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 55cc9f9b1..7333053e6 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -20,7 +20,7 @@ def __init__( self.rotation = SO2(data=self.data[:, 2:]) def _init_data(self): - return torch.empty(1, 4) # x, y, cos and sin + self.data = torch.empty(1, 4) # x, y, cos and sin def dim(self) -> int: return 3 diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 670107ce9..d0ab4c01b 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -15,7 +15,7 @@ def __init__( super().__init__(data=data, name=name) def _init_data(self): - return torch.empty(1, 2) # cos and sin + self.data = torch.empty(1, 2) # cos and sin def update_from_angle(self, theta: torch.Tensor): self.update(torch.cat([theta.cos(), theta.sin()], dim=1)) @@ -25,7 +25,7 @@ def dim(self) -> int: @staticmethod def _adjoint(variable: LieGroup) -> torch.Tensor: - return torch.ones(variable.shape[0], 1).to(variable.device) + return torch.ones(variable.shape[0], 1, 1).to(variable.device) @staticmethod def exp_map(tangent_vector: torch.Tensor) -> LieGroup: @@ -71,7 +71,7 @@ def _inverse( return inverse def rotate(self, point: Vector) -> Vector: - px, py = point.data[:, :1], point.data[:, 1:] + px, py = point.data[:, 0], point.data[:, 1] cosine, sine = self.to_cos_sin() new_point_data = torch.empty_like(point.data) new_point_data[:, 0] = cosine * px - sine * py diff --git a/theseus/geometry/tests/test_so2.py b/theseus/geometry/tests/test_so2.py new file mode 100644 index 000000000..6385bbd82 --- /dev/null +++ b/theseus/geometry/tests/test_so2.py @@ -0,0 +1,69 @@ +import numpy as np +import pytest # noqa: F401 +import torch + +from theseus.constants import EPS +from theseus.geometry.so2 import SO2, Vector + + +# TODO: looking at this test, we could add a kwarg to SO2's constructor to receive theta +def test_exp_map(): + batch_size = 20 + thetas = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + so2 = SO2.exp_map(thetas.unsqueeze(1)) + hat_matrices = torch.zeros(batch_size, 2, 2).double() + hat_matrices[:, 0, 1] = -thetas + hat_matrices[:, 1, 0] = thetas + assert torch.allclose(hat_matrices.matrix_exp().float(), so2.to_matrix(), atol=EPS) + + +def test_log_map(): + batch_size = 20 + thetas = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + so2 = SO2.exp_map(thetas) + new_thetas = so2.log_map() + assert torch.allclose(thetas, new_thetas, atol=EPS) + + +def test_compose(): + batch_size = 20 + rng = torch.Generator() + rng.manual_seed(0) + thetas_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + thetas_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2_1 = SO2() + so2_2 = SO2() + so2_1.update_from_angle(thetas_1) + so2_2.update_from_angle(thetas_2) + composition, (jac1, jac2) = so2_1.compose(so2_2, get_jacobians=True) + expected_matrix = so2_1.to_matrix() @ so2_2.to_matrix() + assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) + assert torch.allclose(jac1, torch.ones(batch_size, 1, 1)) + assert torch.allclose(jac2, torch.ones(batch_size, 1, 1)) + + +def test_inverse(): + batch_size = 20 + rng = torch.Generator() + rng.manual_seed(0) + thetas = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2() + so2.update_from_angle(thetas) + so2_minus_theta = SO2() + so2_minus_theta.update_from_angle(-thetas) + so2_inv, jac = so2.inverse(get_jacobian=True) + assert torch.allclose(so2_minus_theta.to_matrix(), so2_inv.to_matrix(), atol=EPS) + assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) + + +def test_rotate(): + batch_size = 20 + rng = torch.Generator() + rng.manual_seed(0) + thetas = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2() + so2.update_from_angle(thetas) + vector = Vector(2, data=torch.randn(batch_size, 2)) + rotated_vector = so2.rotate(vector) + expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) + assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) From 75686773a37915ebd74720cd96e7c9f9ae75dd5a Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 2 Jun 2021 13:57:06 -0700 Subject: [PATCH 18/39] Added option to pass theta directly to constructor of SO2. --- theseus/geometry/so2.py | 11 +++++++++ theseus/geometry/tests/test_so2.py | 39 ++++++++++++++---------------- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index d0ab4c01b..1c9c6cf23 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -9,10 +9,21 @@ class SO2(LieGroup): def __init__( self, + theta: Optional[torch.Tensor] = None, data: Optional[torch.Tensor] = None, name: Optional[str] = None, ): + if theta is not None and data is not None: + raise ValueError("Please provide only one of theta or data.") super().__init__(data=data, name=name) + if theta is not None: + if theta.ndim == 1: + theta.unsqueeze(1) + if theta.ndim != 2 or theta.shape[1] != 1: + raise ValueError( + "Argument theta must be have ndim = 1, or ndim=2 and shape[1] = 1." + ) + self.update_from_angle(theta) def _init_data(self): self.data = torch.empty(1, 2) # cos and sin diff --git a/theseus/geometry/tests/test_so2.py b/theseus/geometry/tests/test_so2.py index 6385bbd82..88e58156d 100644 --- a/theseus/geometry/tests/test_so2.py +++ b/theseus/geometry/tests/test_so2.py @@ -9,32 +9,30 @@ # TODO: looking at this test, we could add a kwarg to SO2's constructor to receive theta def test_exp_map(): batch_size = 20 - thetas = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) - so2 = SO2.exp_map(thetas.unsqueeze(1)) + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + so2 = SO2.exp_map(theta.unsqueeze(1)) hat_matrices = torch.zeros(batch_size, 2, 2).double() - hat_matrices[:, 0, 1] = -thetas - hat_matrices[:, 1, 0] = thetas + hat_matrices[:, 0, 1] = -theta + hat_matrices[:, 1, 0] = theta assert torch.allclose(hat_matrices.matrix_exp().float(), so2.to_matrix(), atol=EPS) def test_log_map(): batch_size = 20 - thetas = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) - so2 = SO2.exp_map(thetas) - new_thetas = so2.log_map() - assert torch.allclose(thetas, new_thetas, atol=EPS) + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + so2 = SO2.exp_map(theta) + new_theta = so2.log_map() + assert torch.allclose(theta, new_theta, atol=EPS) def test_compose(): batch_size = 20 rng = torch.Generator() rng.manual_seed(0) - thetas_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - thetas_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2_1 = SO2() - so2_2 = SO2() - so2_1.update_from_angle(thetas_1) - so2_2.update_from_angle(thetas_2) + theta_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + theta_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2_1 = SO2(theta=theta_1) + so2_2 = SO2(theta=theta_2) composition, (jac1, jac2) = so2_1.compose(so2_2, get_jacobians=True) expected_matrix = so2_1.to_matrix() @ so2_2.to_matrix() assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) @@ -46,11 +44,10 @@ def test_inverse(): batch_size = 20 rng = torch.Generator() rng.manual_seed(0) - thetas = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2() - so2.update_from_angle(thetas) + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2(theta=theta) so2_minus_theta = SO2() - so2_minus_theta.update_from_angle(-thetas) + so2_minus_theta.update_from_angle(-theta) so2_inv, jac = so2.inverse(get_jacobian=True) assert torch.allclose(so2_minus_theta.to_matrix(), so2_inv.to_matrix(), atol=EPS) assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) @@ -60,9 +57,9 @@ def test_rotate(): batch_size = 20 rng = torch.Generator() rng.manual_seed(0) - thetas = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2() - so2.update_from_angle(thetas) + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2(theta=theta) + so2.update_from_angle(theta) vector = Vector(2, data=torch.randn(batch_size, 2)) rotated_vector = so2.rotate(vector) expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) From 1a2f31a1fdea49e7e9b550a473d14201aca8c76a Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Thu, 3 Jun 2021 14:48:58 -0700 Subject: [PATCH 19/39] [bug-fix] Fixed bug in Variable.copy() which would mishandle any member container objects, as it would have done a shallow copy of them. Added a _copy_impl() method to address this --- theseus/core/tests/common.py | 3 +++ theseus/core/tests/test_variable.py | 3 +++ theseus/core/variable.py | 8 ++++++-- theseus/geometry/se2.py | 4 ++++ theseus/geometry/so2.py | 5 +++++ theseus/geometry/vector.py | 3 +++ theseus/optimizer/tests/linearization_test_utils.py | 3 +++ 7 files changed, 27 insertions(+), 2 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 272188ce5..e9f9439fb 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -21,6 +21,9 @@ def _local(variable1, variable2): def _retract(variable, delta): pass + def _copy_impl(self): + return MockVar(self.data.shape[1]) + class MockPrecision(theseus.core.Precision): def __init__(self, the_data): diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index 62273e34a..6901f28a2 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -62,6 +62,9 @@ def _local(variable1, variable2): def _retract(variable, delta): pass + def _copy_impl(self): + return MockVarNoArgs() + def test_variable_no_args_init(): var = MockVarNoArgs(name="mock") diff --git a/theseus/core/variable.py b/theseus/core/variable.py index 3ee1eff71..7722346ac 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -1,5 +1,4 @@ import abc -import copy from typing import Any, Optional import torch @@ -63,10 +62,15 @@ def local(self, variable2: "Variable") -> torch.Tensor: def retract(self, delta: torch.Tensor) -> "Variable": return self.__class__._retract(self, delta) + # Must copy everything that is not name and data + @abc.abstractmethod + def _copy_impl(self) -> "Variable": + pass + def copy(self, new_name: Optional[str] = None) -> "Variable": if not new_name: new_name = f"{self.name}_copy" - var_copy = copy.copy(self) + var_copy = self._copy_impl() var_copy.name = new_name var_copy.data = self.data.clone() return var_copy diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 7333053e6..0f8e29706 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -3,6 +3,7 @@ import torch import theseus.constants +from theseus.core import Variable from .lie_group import LieGroup from .so2 import SO2 @@ -123,3 +124,6 @@ def _inverse( so2_1: LieGroup, get_jacobian: bool = False ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: pass + + def _copy_impl(self) -> Variable: + return SE2() diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 1c9c6cf23..a68cca036 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -2,6 +2,8 @@ import torch +from theseus.core import Variable + from .lie_group import LieGroup from .vector import Vector @@ -100,3 +102,6 @@ def to_matrix(self) -> torch.Tensor: matrix[:, 1, 0] = sine matrix[:, 1, 1] = cosine return matrix + + def _copy_impl(self) -> Variable: + return SO2() diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index a484f22c3..437317a21 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -194,3 +194,6 @@ def _log_map(variable1: LieGroup) -> torch.Tensor: def __hash__(self): return id(self) + + def _copy_impl(self) -> Variable: + return Vector(self.dim()) diff --git a/theseus/optimizer/tests/linearization_test_utils.py b/theseus/optimizer/tests/linearization_test_utils.py index 808ca00ab..739ded58b 100644 --- a/theseus/optimizer/tests/linearization_test_utils.py +++ b/theseus/optimizer/tests/linearization_test_utils.py @@ -20,6 +20,9 @@ def _local(variable1, variable2): def _retract(variable, delta): return variable + def _copy_impl(self): + raise NotImplementedError + class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): From ca9a533dc7255857d718ccacdf0e06869533b0db Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Thu, 3 Jun 2021 14:50:50 -0700 Subject: [PATCH 20/39] [bug-fix] Fixed bug in Precision.copy() which would mishandle any member container objects, as it would have done a shallow copy of them. Added a _copy_impl() method to address this --- theseus/core/precision.py | 17 +++++++++++++++-- theseus/core/tests/common.py | 6 ++++++ theseus/tests/test_theseus_layer.py | 6 ++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/theseus/core/precision.py b/theseus/core/precision.py index f934ffa53..adfc8ff0b 100644 --- a/theseus/core/precision.py +++ b/theseus/core/precision.py @@ -1,5 +1,4 @@ import abc -import copy from itertools import count from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple @@ -55,8 +54,13 @@ def weight_jacobians_and_error( ) -> Tuple[List[torch.Tensor], torch.Tensor]: pass + # Must copy everything that is not name and data + @abc.abstractmethod + def _copy_impl(self) -> "Precision": + pass + def copy(self) -> "Precision": - new_cov = copy.copy(self) + new_cov = self._copy_impl() new_cov.data = self.data.clone() return new_cov @@ -91,6 +95,7 @@ def __init__(self, scale: float, learnable: bool = False): # TODO if we keep this _id mechanism, add test for this _id = next(self._ids) param_name = f"scale_cov_{_id}" + self.scale = scale super().__init__(scale, learnable=learnable, param_name=param_name) def _init_data(self, scale: float): # type: ignore @@ -110,6 +115,9 @@ def weight_jacobians_and_error( jac.data.mul_(self.data) return jacobians, error + def _copy_impl(self) -> Precision: + return ScalePrecision(self.scale, learnable=False) + # Note that these operations are in-place # (consider renaming weight functions as in minisam - or adding underscore suffix) @@ -125,6 +133,7 @@ def __init__( # TODO if we keep this _id mechanism, add test for this _id = next(self._ids) param_name = f"diagonal_cov_{_id}" + self.diagonal = diagonal super().__init__(diagonal, learnable=learnable, param_name=param_name) def _init_data(self, diagonal: float): # type: ignore @@ -136,6 +145,7 @@ def _init_data(self, diagonal: float): # type: ignore raise ValueError("DiagonalPrecision only accepts arrays of dim. 1.") def weight_error(self, error: torch.Tensor) -> torch.Tensor: + # TODO stop making these inplace (safer, and this is probably not faster anyway) error.mul_(self.data) return error @@ -150,3 +160,6 @@ def weight_jacobians_and_error( # This left multiplies the weights (inv cov.) to jacobian jac.data.mul_(self.data.view(1, -1, 1)) return jacobians, error + + def _copy_impl(self) -> Precision: + return DiagonalPrecision(self.diagonal, data=None, learnable=False) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index e9f9439fb..4c622ad80 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -38,6 +38,9 @@ def weight_error(self, error): def weight_jacobians_and_error(self, jacobians, error): pass + def _copy_impl(self): + return MockPrecision(self.data) + class NullPrecision(theseus.core.Precision): def __init__(self): @@ -52,6 +55,9 @@ def weight_error(self, error): def weight_jacobians_and_error(self, jacobians, error): return jacobians, error + def _copy_impl(self): + return NullPrecision() + class MockFactor(theseus.core.Factor): def __init__(self, variables, precision, name=None): diff --git a/theseus/tests/test_theseus_layer.py b/theseus/tests/test_theseus_layer.py index da8295719..5a3b987e8 100644 --- a/theseus/tests/test_theseus_layer.py +++ b/theseus/tests/test_theseus_layer.py @@ -56,6 +56,9 @@ def parameters(self) -> Iterator[Tuple[str, torch.nn.Parameter]]: def to(self, *args, **kwargs): self.nn = self.nn.to(*args, **kwargs) + def _copy_impl(self): + raise NotImplementedError + # This is a diagonal precision except that data is passed through a softmax so that # they weights are positive and sum to 1. @@ -87,6 +90,9 @@ def weight_jacobians_and_error(self, jacobians, error): jac.data.mul_(weights.view(1, -1, 1)) return jacobians, error + def _copy_impl(self): + raise NotImplementedError + def model(x, b): return (b[..., :1] * x) ** 2 + b[..., 1:] From 492fc305a385f21467049325c05651917099cfc3 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Thu, 3 Jun 2021 15:04:01 -0700 Subject: [PATCH 21/39] [bug-fix] Fixed bug in Factor.copy() which would mishandle any member container objects, as it would have done a shallow copy of them. Added a _copy_impl() method to address this --- theseus/core/factor.py | 22 ++++++++++++++++--- theseus/core/factor_graph.py | 6 ++--- theseus/core/tests/common.py | 4 ++++ .../nonlinear/tests/test_gauss_newton.py | 6 +++++ .../tests/linearization_test_utils.py | 6 +++++ theseus/tests/test_theseus_layer.py | 3 +++ 6 files changed, 41 insertions(+), 6 deletions(-) diff --git a/theseus/core/factor.py b/theseus/core/factor.py index 4a2447322..ec3fafd26 100644 --- a/theseus/core/factor.py +++ b/theseus/core/factor.py @@ -65,13 +65,29 @@ def weighted_jacobians_error( def __len__(self): return len(self.variables) - def copy(self, new_name: Optional[str] = None) -> "Factor": + # Must create copies of any member objects except the name and + # the variables (the original ones can be kept, as they will be + # replaced by factor.copy()). + # However, if any internal variables are stored in a container, + # another container of the same size must be created and populated + # with variables (it's OK if it's the original ones). + @abc.abstractmethod + def _copy_impl(self) -> "Factor": + pass + + def copy( + self, new_name: Optional[str] = None, keep_variable_names: bool = False + ) -> "Factor": if not new_name: new_name = f"{self.name}_copy" - new_factor = copy.copy(self) + new_factor = self._copy_impl() new_factor.name = new_name new_factor.precision = self.precision.copy() - new_factor.variables = copy.deepcopy(self.variables) + if keep_variable_names: + new_variables = [v.copy(new_name=v.name) for v in self.variables] + else: + new_variables = copy.deepcopy(self.variables) + new_factor.variables = new_variables return new_factor def __deepcopy__(self, memo): diff --git a/theseus/core/factor_graph.py b/theseus/core/factor_graph.py index 407f59cc4..09fe55e86 100644 --- a/theseus/core/factor_graph.py +++ b/theseus/core/factor_graph.py @@ -154,11 +154,11 @@ def copy(self) -> "FactorGraph": new_graph = FactorGraph() new_factors = [] for factor in self.factors.values(): - new_variables = [v.copy(new_name=v.name) for v in factor.variables] - new_factor = factor.copy(new_name=factor.name) - new_factor.variables = new_variables + new_factor = factor.copy(new_name=factor.name, keep_variable_names=True) new_factors.append(new_factor) + # Handle case where a variable is copied in 2+ factors, but only a single + # copy should be maintained by graph for factor in new_factors: for i, var in enumerate(factor.variables): if new_graph.has_variable(var.name): diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 4c622ad80..5a7cf1876 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -75,6 +75,10 @@ def dim(self) -> int: return self._dim + def _copy_impl(self): + return MockFactor([v for v in self._variables], self.precision.copy()) + + def create_mock_factors(data=None, precision=NullPrecision()): len_data = 1 if data is None else data.shape[1] var1 = MockVar(len_data, data=data, name="var1") diff --git a/theseus/optimizer/nonlinear/tests/test_gauss_newton.py b/theseus/optimizer/nonlinear/tests/test_gauss_newton.py index 8214271f6..12dccd119 100644 --- a/theseus/optimizer/nonlinear/tests/test_gauss_newton.py +++ b/theseus/optimizer/nonlinear/tests/test_gauss_newton.py @@ -51,6 +51,9 @@ def jacobians(self): def dim(self): return 1 + def _copy_impl(self): + raise NotImplementedError + # This test uses least-squares regression to find the coefficients b_j of # @@ -209,6 +212,9 @@ def jacobians(self): def dim(self): return 1 + def _copy_impl(self): + raise NotImplementedError + graph = theseus.FactorGraph() variables = [thgeom.Vector(1, name="dummy")] graph.add(MockFactor(variables, theseus.ScalePrecision(torch.ones(1)))) diff --git a/theseus/optimizer/tests/linearization_test_utils.py b/theseus/optimizer/tests/linearization_test_utils.py index 739ded58b..f77264ad1 100644 --- a/theseus/optimizer/tests/linearization_test_utils.py +++ b/theseus/optimizer/tests/linearization_test_utils.py @@ -62,6 +62,9 @@ def jacobians(self): def dim(self): return self.the_dim + def _copy_impl(self): + raise NotImplementedError + # This is just an identity matrix times some multiplier class MockPrecision(thcore.Precision): @@ -85,6 +88,9 @@ def weight_jacobians_and_error(self, jacobians, error): werr = torch.matmul(sqrt, error.unsqueeze(2)).squeeze(2) return wjs, werr + def _copy_impl(self): + raise NotImplementedError + def build_test_graph_and_linear_system(): # This functionn creates the a graph that results in the diff --git a/theseus/tests/test_theseus_layer.py b/theseus/tests/test_theseus_layer.py index 5a3b987e8..0a404878a 100644 --- a/theseus/tests/test_theseus_layer.py +++ b/theseus/tests/test_theseus_layer.py @@ -134,6 +134,9 @@ def to(self, *args, **kwargs): self.xs = self.xs.to(*args, **kwargs) self.ys = self.ys.to(*args, **kwargs) + def _copy_impl(self): + raise NotImplementedError + def create_qf_theseus_layer( xs, From bae769b57692c2f94555b6837a647f86b591aff1 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Thu, 3 Jun 2021 15:08:10 -0700 Subject: [PATCH 22/39] [refactor] Changed Factor class so that variables can be passed as individual arguments, rather than a list of variables. --- theseus/core/factor.py | 24 +++++-- theseus/core/factor_graph.py | 2 +- theseus/core/tests/common.py | 8 ++- theseus/embodied/measurements/between.py | 62 ++++++++++++------- theseus/embodied/misc/gaussian_variable.py | 32 +++++++--- .../nonlinear/tests/test_gauss_newton.py | 18 +++++- .../tests/linearization_test_utils.py | 9 ++- theseus/tests/test_theseus_layer.py | 9 ++- 8 files changed, 125 insertions(+), 39 deletions(-) diff --git a/theseus/core/factor.py b/theseus/core/factor.py index ec3fafd26..b6895b275 100644 --- a/theseus/core/factor.py +++ b/theseus/core/factor.py @@ -1,6 +1,6 @@ import abc import copy -from typing import Any, Dict, Iterator, List, Optional, Tuple +from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple import torch @@ -17,15 +17,11 @@ class Factor(abc.ABC): def __init__( self, - variables: List[Variable], precision: Precision, *args: Any, name: Optional[str] = None, **kwargs: Any, ): - # TODO: might as well change these so that vars can be passed as separate args - # but it still maintains an internal variables array - self.variables = variables self.precision = precision if name: self.name = name @@ -107,3 +103,21 @@ def to(self, *args, **kwargs): self.precision.to(*args, **kwargs) for var in self.variables: var.to(*args, **kwargs) + + @abc.abstractmethod + def _get_variables_impl(self) -> List[Variable]: + pass + + @property + def variables(self) -> List[Variable]: + return self._get_variables_impl() + + @variables.setter + def variables(self, variables: Sequence[Variable]): + for i, v in enumerate(variables): + self.set_variable_at(v, i) + + # Sets the variable at the given index in the order returned by factor.variables + @abc.abstractmethod + def set_variable_at(self, variable: Variable, idx: int): + pass diff --git a/theseus/core/factor_graph.py b/theseus/core/factor_graph.py index 09fe55e86..ffda39152 100644 --- a/theseus/core/factor_graph.py +++ b/theseus/core/factor_graph.py @@ -162,7 +162,7 @@ def copy(self) -> "FactorGraph": for factor in new_factors: for i, var in enumerate(factor.variables): if new_graph.has_variable(var.name): - factor.variables[i] = new_graph.variables[var.name] + factor.set_variable_at(new_graph.variables[var.name], i) new_graph.add(factor) return new_graph diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 5a7cf1876..2d330ed04 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -61,8 +61,9 @@ def _copy_impl(self): class MockFactor(theseus.core.Factor): def __init__(self, variables, precision, name=None): - super().__init__(variables, precision, name=name) + self._variables = variables self._dim = 2 + super().__init__(precision, name=name) def error(self): mu = torch.stack([v.data for v in self.variables]).sum() @@ -74,6 +75,11 @@ def jacobians(self): def dim(self) -> int: return self._dim + def _get_variables_impl(self): + return self._variables + + def set_variable_at(self, variable, idx): + self._variables[idx] = variable def _copy_impl(self): return MockFactor([v for v in self._variables], self.precision.copy()) diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index 5a1656b47..a88114f4f 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -9,41 +9,61 @@ class Between(Factor): def __init__( self, - variables: List[Variable], + v0: LieGroup, + v1: LieGroup, precision: Precision, - measurement: Variable, - name: Optional[str], + measurement: LieGroup, + name: Optional[str] = None, ): - if len(variables) != 2: - raise ValueError("Between factor accepts only two variables.") - if not isinstance(variables[0], variables[1].__class__) or not isinstance( - variables[0], measurement.__class__ + self.v0 = v0 + self.v1 = v1 + self.measurement = measurement + if not isinstance(v0, v1.__class__) or not isinstance( + v0, measurement.__class__ ): raise ValueError("Inconsistent types between variables and measurement.") - if not isinstance(variables[0], LieGroup): - raise ValueError("Between factor only accepts LieGroup variables.") - super().__init__(variables, precision, name=name) - self._measurement = measurement + super().__init__(precision, name=name) def error(self) -> torch.Tensor: - v0 = cast(LieGroup, self.variables[0]) - v1 = cast(LieGroup, self.variables[1]) - v0_inverse = cast(LieGroup, v0.inverse()) - var_diff = cast(LieGroup, v0_inverse.compose(v1)) - return self._measurement.local(var_diff) + v0_inverse = cast(LieGroup, self.v0.inverse()) + var_diff = cast(LieGroup, v0_inverse.compose(self.v1)) + return self.measurement.local(var_diff) def jacobians(self) -> List[torch.Tensor]: - v0 = cast(LieGroup, self.variables[0]) - v1 = cast(LieGroup, self.variables[1]) v0_inverse, Hinv = cast( Tuple[LieGroup, torch.Tensor], - v0.inverse(get_jacobian=True), + self.v0.inverse(get_jacobian=True), ) _, (Hcmp1, Hcmp2) = cast( Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]], - v0_inverse.compose(v1, get_jacobians=True), + v0_inverse.compose(self.v1, get_jacobians=True), ) return [torch.matmul(Hcmp1, Hinv), Hcmp2] def dim(self) -> int: - return self.variables[0].dim() + return self.v0.dim() + + def _get_variables_impl(self) -> List[Variable]: + return [self.v0, self.v1] + + def set_variable_at(self, variable: Variable, idx: int): + if not isinstance(variable, self.v0.__class__): + raise ValueError( + f"Attempted to assign a variable with inconsistent " + f"type {variable.__class__.__name__}. " + f"Expected {self.v0.__class__.__name__}." + ) + if idx == 0: + self.v0 = variable + elif idx == 1: + self.v1 = variable + else: + raise ValueError("Index out of range.") + + def _copy_impl(self) -> Factor: + return Between( + self.v0, + self.v1, + self.precision.copy(), + self.measurement.copy(), # type: ignore + ) diff --git a/theseus/embodied/misc/gaussian_variable.py b/theseus/embodied/misc/gaussian_variable.py index 1145646d2..5aba34502 100644 --- a/theseus/embodied/misc/gaussian_variable.py +++ b/theseus/embodied/misc/gaussian_variable.py @@ -3,24 +3,24 @@ import torch from theseus.core import Factor, Precision, Variable +from theseus.geometry import LieGroup class GaussianVariable(Factor): def __init__( self, - variables: List[Variable], + var: LieGroup, precision: Precision, - prior: Variable, - name: Optional[str], + prior: LieGroup, + name: Optional[str] = None, ): - if len(variables) != 1: - raise ValueError("GaussianVariable factor accepts only a single variable.") - if type(variables[0]) != type(prior): + if not isinstance(var, prior.__class__): raise ValueError( "Prior variable type inconsistent with variables for the graph." ) - super().__init__(variables, precision, name=name) + self.var = var self._prior = prior + super().__init__(precision, name=name) def error(self) -> torch.Tensor: return self._prior.local(self.variables[0]) @@ -30,3 +30,21 @@ def jacobians(self) -> List[torch.Tensor]: def dim(self) -> int: return self.variables[0].dim() + + def _get_variables_impl(self) -> List[Variable]: + return [self.var] + + def set_variable_at(self, variable: Variable, idx: int): + if not isinstance(variable, self.var.__class__): + raise ValueError( + f"Attempted to assign a variable with inconsistent " + f"type {variable.__class__.__name__}. " + f"Expected {self.var.__class__.__name__}." + ) + if idx == 0: + self.var = variable + else: + raise ValueError("Index out of range.") + + def _copy_impl(self) -> Factor: + return GaussianVariable(self.var, self.precision.copy(), self.prior.copy()) # type: ignore diff --git a/theseus/optimizer/nonlinear/tests/test_gauss_newton.py b/theseus/optimizer/nonlinear/tests/test_gauss_newton.py index 12dccd119..e04d2655b 100644 --- a/theseus/optimizer/nonlinear/tests/test_gauss_newton.py +++ b/theseus/optimizer/nonlinear/tests/test_gauss_newton.py @@ -25,7 +25,8 @@ def __init__( point = point.unsqueeze(0) assert point.ndim == 2 and point.shape[1] == len_vars - 1 batch_size = point.shape[0] - super().__init__(variables, precision, name=name) + self._variables = variables + super().__init__(precision, name=name) self.point = torch.cat([point, torch.ones(batch_size, 1)], dim=1) self.target = (self.point * true_coeffs.unsqueeze(0)).sum(1, keepdim=True) ** 2 if noise_mag: @@ -51,6 +52,12 @@ def jacobians(self): def dim(self): return 1 + def _get_variables_impl(self): + return self._variables + + def set_variable_at(self, variable, idx): + self._variables[idx] = variable + def _copy_impl(self): raise NotImplementedError @@ -201,7 +208,8 @@ def _linearize_hessian_impl(self): class MockFactor(theseus.Factor): def __init__(self, variables, precision): - super().__init__(variables, precision) + self._variables = variables + super().__init__(precision) def error(self): return torch.ones(1) @@ -212,6 +220,12 @@ def jacobians(self): def dim(self): return 1 + def _get_variables_impl(self): + return self._variables + + def set_variable_at(self, variable, idx): + self._variables[idx] = variable + def _copy_impl(self): raise NotImplementedError diff --git a/theseus/optimizer/tests/linearization_test_utils.py b/theseus/optimizer/tests/linearization_test_utils.py index f77264ad1..8d46fe3c0 100644 --- a/theseus/optimizer/tests/linearization_test_utils.py +++ b/theseus/optimizer/tests/linearization_test_utils.py @@ -26,7 +26,8 @@ def _copy_impl(self): class MockFactor(thcore.Factor): def __init__(self, variables, precision, dim, name=None): - super().__init__(variables, precision, name=name) + self._variables = variables + super().__init__(precision, name=name) self.the_dim = dim # Error function for this factor is (for each batch element) @@ -62,6 +63,12 @@ def jacobians(self): def dim(self): return self.the_dim + def _get_variables_impl(self): + return self._variables + + def set_variable_at(self, variable, idx): + self._variables[idx] = variable + def _copy_impl(self): raise NotImplementedError diff --git a/theseus/tests/test_theseus_layer.py b/theseus/tests/test_theseus_layer.py index 0a404878a..c39774cef 100644 --- a/theseus/tests/test_theseus_layer.py +++ b/theseus/tests/test_theseus_layer.py @@ -112,7 +112,8 @@ def model_grad(x, b): # variable object of dimension 2. class QuadraticFitFactor(thcore.Factor): def __init__(self, variables, precision, xs=None, ys=None): - super().__init__(variables, precision, name="qf_factor") + self._variables = variables + super().__init__(precision, name="qf_factor") assert len(variables) == 1 and variables[0].dim() == 2 self.xs = xs self.ys = ys @@ -134,6 +135,12 @@ def to(self, *args, **kwargs): self.xs = self.xs.to(*args, **kwargs) self.ys = self.ys.to(*args, **kwargs) + def _get_variables_impl(self): + return self._variables + + def set_variable_at(self, variable, idx): + self._variables[idx] = variable + def _copy_impl(self): raise NotImplementedError From ed7deb9fda97c5082562f7839c6b16b2bc42a677 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Fri, 4 Jun 2021 09:01:38 -0700 Subject: [PATCH 23/39] Added tests for SE2. --- theseus/geometry/se2.py | 81 ++++++++++---- theseus/geometry/so2.py | 11 +- theseus/geometry/tests/test_lie_group.py | 130 +++++++++++++++++++++++ theseus/geometry/tests/test_so2.py | 66 ------------ 4 files changed, 199 insertions(+), 89 deletions(-) create mode 100644 theseus/geometry/tests/test_lie_group.py delete mode 100644 theseus/geometry/tests/test_so2.py diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 7333053e6..afecf89c4 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -9,6 +9,7 @@ from .vector import Vector +# If data is passed, must be x, y, cos, sin class SE2(LieGroup): def __init__( self, @@ -16,8 +17,6 @@ def __init__( name: Optional[str] = None, ): super().__init__(data=data, name=name) - self.translation = Vector(2, data=self.data[:, :2]) - self.rotation = SO2(data=self.data[:, 2:]) def _init_data(self): self.data = torch.empty(1, 4) # x, y, cos and sin @@ -25,9 +24,19 @@ def _init_data(self): def dim(self) -> int: return 3 + @property + def rotation(self) -> SO2: + return SO2(data=self.data[:, 2:]) + + @property + def translation(self) -> Vector: + return Vector(2, data=self.data[:, :2]) + def update_from_rot_and_trans(self, rotation: SO2, translation: Vector): batch_size = rotation.shape[0] - self.data = torch.empty(batch_size, 4, device=rotation.device) + self.data = torch.empty( + batch_size, 4, device=rotation.device, dtype=rotation.data.dtype + ) self.data[:, :2] = translation.data cosine, sine = rotation.to_cos_sin() self.data[:, 2] = cosine @@ -37,34 +46,36 @@ def update_from_rot_and_trans(self, rotation: SO2, translation: Vector): @staticmethod def _log_map(variable: LieGroup) -> torch.Tensor: se2 = cast(SE2, variable) - theta = se2.rotation.log_map() - half_theta = 0.5 * theta + rotation = se2.rotation - cosine, sine = se2.rotation.to_cos_sin() + theta = rotation.log_map() + half_theta = 0.5 * theta.view(-1) + + cosine, sine = rotation.to_cos_sin() cos_minus_one = cosine - 1 halftheta_by_tan_of_halftheta = -(half_theta * sine) / cos_minus_one # Same as above three lines but for small values idx_small_vals = cos_minus_one.abs() < theseus.constants.EPS - theta_sq_at_idx = theta[idx_small_vals] ** 2 - halftheta_by_tan_of_halftheta[idx_small_vals] = -theta_sq_at_idx / 12 + 1 + if idx_small_vals.any(): + theta_sq_at_idx = theta[idx_small_vals] ** 2 + halftheta_by_tan_of_halftheta[idx_small_vals] = -theta_sq_at_idx / 12 + 1 - v_inv = torch.empty(se2.shape[0], 2, 2) + v_inv = torch.empty(se2.shape[0], 2, 2, dtype=variable.data.dtype) v_inv[:, 0, 0] = halftheta_by_tan_of_halftheta v_inv[:, 0, 1] = half_theta v_inv[:, 1, 0] = -half_theta v_inv[:, 1, 1] = halftheta_by_tan_of_halftheta - tangent_translation = torch.matmul(v_inv, se2.translation.data.unsqueeze(-1)) + tangent_translation = torch.matmul(v_inv, se2.data[:, :2].unsqueeze(-1)) - return torch.cat([tangent_translation.squeeze(), theta], dim=1) + return torch.cat([tangent_translation.view(-1, 2), theta], dim=1) # From https://github.com/strasdat/Sophus/blob/master/sophus/se2.hpp#L558 @staticmethod def exp_map(tangent_vector: torch.Tensor) -> LieGroup: u = tangent_vector[:, :2] - theta = tangent_vector[:, 2:] - rotation = SO2() - rotation.update_from_angle(tangent_vector) + theta = tangent_vector[:, 2] + rotation = SO2(theta=theta) cosine, sine = rotation.to_cos_sin() sin_theta_by_theta = sine / theta @@ -90,10 +101,10 @@ def exp_map(tangent_vector: torch.Tensor) -> LieGroup: @staticmethod def _adjoint(se2: LieGroup) -> torch.Tensor: se2 = cast(SE2, se2) - ret = torch.empty(se2.shape[0], 3, 3).to(se2.device) + ret = torch.empty(se2.shape[0], 3, 3, dtype=se2.data.dtype).to(se2.device) ret[:, :2, :2] = se2.rotation.to_matrix() - ret[:, 0, 2] = se2.translation.data[:, 1] - ret[:, 1, 2] = -se2.translation.data[:, 0] + ret[:, 0, 2] = se2.data[:, 1] + ret[:, 1, 2] = -se2.data[:, 0] ret[:, 2, 0] = 0 ret[:, 2, 2] = 1 return ret @@ -104,10 +115,14 @@ def _compose( ) -> Union[LieGroup, Tuple[LieGroup, Tuple[torch.Tensor, torch.Tensor]]]: se2_1 = cast(SE2, se2_1) se2_2 = cast(SE2, se2_2) - new_rotation = cast(SO2, se2_1.rotation.compose(se2_2.rotation)) + rotation_1 = se2_1.rotation + rotation_2 = se2_2.rotation + translation_1 = cast(Vector, se2_1.translation) + translation_2 = cast(Vector, se2_2.translation) + new_rotation = cast(SO2, rotation_1.compose(rotation_2)) new_translation = cast( Vector, - se2_1.translation.compose(se2_1.rotation.rotate(se2_2.translation)), + translation_1.compose(rotation_1.rotate(translation_2)), ) composition = SE2( data=torch.cat([new_translation.data, new_rotation.data], dim=1) @@ -120,6 +135,30 @@ def _compose( @staticmethod def _inverse( - so2_1: LieGroup, get_jacobian: bool = False + se2: LieGroup, get_jacobian: bool = False ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: - pass + se2 = cast(SE2, se2) + inverse_rotation = cast(SO2, se2.rotation.inverse()) + inverse_translation = inverse_rotation.rotate(-se2.translation) + se2_inverse = SE2() + se2_inverse.update_from_rot_and_trans(inverse_rotation, inverse_translation) + if get_jacobian: + return se2_inverse, SE2._inverse_jacobian(se2) + return se2_inverse + + def to_matrix(self) -> torch.Tensor: + matrix = torch.zeros(self.shape[0], 3, 3, dtype=self.data.dtype) + matrix[:, :2, :2] = self.rotation.to_matrix() + matrix[:, :2, 2] = self.data[:, :2] + matrix[:, 2, 2] = 1.0 + return matrix + + @staticmethod + def hat(tangent_vector: torch.Tensor) -> torch.Tensor: + theta = tangent_vector[:, 2] + u = tangent_vector[:, :2] + matrix = torch.zeros(tangent_vector.shape[0], 3, 3, dtype=tangent_vector.dtype) + matrix[:, 0, 1] = -theta + matrix[:, 1, 0] = theta + matrix[:, :2, 2] = u + return matrix diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 1c9c6cf23..23bfc43aa 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -18,7 +18,7 @@ def __init__( super().__init__(data=data, name=name) if theta is not None: if theta.ndim == 1: - theta.unsqueeze(1) + theta = theta.unsqueeze(1) if theta.ndim != 2 or theta.shape[1] != 1: raise ValueError( "Argument theta must be have ndim = 1, or ndim=2 and shape[1] = 1." @@ -93,10 +93,17 @@ def to_cos_sin(self) -> Tuple[torch.Tensor, torch.Tensor]: return self.data[:, 0], self.data[:, 1] def to_matrix(self) -> torch.Tensor: - matrix = torch.Tensor(self.shape[0], 2, 2) + matrix = torch.empty(self.shape[0], 2, 2, dtype=self.data.dtype) cosine, sine = self.to_cos_sin() matrix[:, 0, 0] = cosine matrix[:, 0, 1] = -sine matrix[:, 1, 0] = sine matrix[:, 1, 1] = cosine return matrix + + @staticmethod + def hat(tangent_vector: torch.Tensor) -> torch.Tensor: + matrix = torch.zeros(tangent_vector.shape[0], 2, 2, dtype=tangent_vector.dtype) + matrix[:, 0, 1] = -tangent_vector.view(-1) + matrix[:, 1, 0] = tangent_vector.view(-1) + return matrix diff --git a/theseus/geometry/tests/test_lie_group.py b/theseus/geometry/tests/test_lie_group.py new file mode 100644 index 000000000..4f11238a7 --- /dev/null +++ b/theseus/geometry/tests/test_lie_group.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest # noqa: F401 +import torch + +from theseus.constants import EPS +from theseus.geometry import SE2, SO2, Vector + + +def _check_exp_map(tangent_vector, group_cls): + group = group_cls.exp_map(tangent_vector) + assert torch.allclose( + group_cls.hat(tangent_vector).matrix_exp(), + group.to_matrix(), + atol=EPS, + ) + + +def _check_log_map(tangent_vector, group_cls): + assert torch.allclose( + tangent_vector, group_cls.exp_map(tangent_vector).log_map(), atol=EPS + ) + + +def _check_compose(group_1, group_2): + composition = group_1.compose(group_2, get_jacobians=False) + expected_matrix = group_1.to_matrix() @ group_2.to_matrix() + assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) + # assert torch.allclose(jac1, torch.ones(batch_size, 1, 1)) + # assert torch.allclose(jac2, torch.ones(batch_size, 1, 1)) + + +def _check_inverse(group): + tangent_vector = group.log_map() + inverse_group = group.exp_map(-tangent_vector) + inverse_result = group.inverse(get_jacobian=False) + assert torch.allclose( + inverse_group.to_matrix(), inverse_result.to_matrix(), atol=EPS + ) + # assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) + + +# ---------------------------------------------------------------------------- +# -------------------------------- SO2 tests --------------------------------- +# ---------------------------------------------------------------------------- +def test_exp_map_so2(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + _check_exp_map(theta, SO2) + + +def test_log_map_so2(): + for batch_size in [1, 2, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + _check_log_map(theta, SO2) + + +def test_compose_so2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + theta_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + theta_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2_1 = SO2(theta=theta_1) + so2_2 = SO2(theta=theta_2) + _check_compose(so2_1, so2_2) + + +def test_inverse_so2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2(theta=theta) + _check_inverse(so2) + + +def test_rotate_so2(): + batch_size = 20 + rng = torch.Generator() + rng.manual_seed(0) + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + so2 = SO2(theta=theta) + so2.update_from_angle(theta) + vector = Vector(2, data=torch.randn(batch_size, 2)) + rotated_vector = so2.rotate(vector) + expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) + assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) + + +# ---------------------------------------------------------------------------- +# -------------------------------- SE2 tests --------------------------------- +# ---------------------------------------------------------------------------- +def test_exp_map_se2(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) + _check_exp_map(tangent_vector.double(), SE2) + + +def test_log_map_se2(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) + _check_log_map(tangent_vector, SE2) + + +def _create_random_se2(batch_size, rng): + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta], dim=1) + return SE2.exp_map(tangent_vector.double()) + + +def test_compose_se2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2_1 = _create_random_se2(batch_size, rng) + se2_2 = _create_random_se2(batch_size, rng) + _check_compose(se2_1, se2_2) + + +def test_inverse_se2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2 = _create_random_se2(batch_size, rng) + _check_inverse(se2) diff --git a/theseus/geometry/tests/test_so2.py b/theseus/geometry/tests/test_so2.py deleted file mode 100644 index 88e58156d..000000000 --- a/theseus/geometry/tests/test_so2.py +++ /dev/null @@ -1,66 +0,0 @@ -import numpy as np -import pytest # noqa: F401 -import torch - -from theseus.constants import EPS -from theseus.geometry.so2 import SO2, Vector - - -# TODO: looking at this test, we could add a kwarg to SO2's constructor to receive theta -def test_exp_map(): - batch_size = 20 - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) - so2 = SO2.exp_map(theta.unsqueeze(1)) - hat_matrices = torch.zeros(batch_size, 2, 2).double() - hat_matrices[:, 0, 1] = -theta - hat_matrices[:, 1, 0] = theta - assert torch.allclose(hat_matrices.matrix_exp().float(), so2.to_matrix(), atol=EPS) - - -def test_log_map(): - batch_size = 20 - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) - so2 = SO2.exp_map(theta) - new_theta = so2.log_map() - assert torch.allclose(theta, new_theta, atol=EPS) - - -def test_compose(): - batch_size = 20 - rng = torch.Generator() - rng.manual_seed(0) - theta_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - theta_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2_1 = SO2(theta=theta_1) - so2_2 = SO2(theta=theta_2) - composition, (jac1, jac2) = so2_1.compose(so2_2, get_jacobians=True) - expected_matrix = so2_1.to_matrix() @ so2_2.to_matrix() - assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) - assert torch.allclose(jac1, torch.ones(batch_size, 1, 1)) - assert torch.allclose(jac2, torch.ones(batch_size, 1, 1)) - - -def test_inverse(): - batch_size = 20 - rng = torch.Generator() - rng.manual_seed(0) - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2(theta=theta) - so2_minus_theta = SO2() - so2_minus_theta.update_from_angle(-theta) - so2_inv, jac = so2.inverse(get_jacobian=True) - assert torch.allclose(so2_minus_theta.to_matrix(), so2_inv.to_matrix(), atol=EPS) - assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) - - -def test_rotate(): - batch_size = 20 - rng = torch.Generator() - rng.manual_seed(0) - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2(theta=theta) - so2.update_from_angle(theta) - vector = Vector(2, data=torch.randn(batch_size, 2)) - rotated_vector = so2.rotate(vector) - expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) - assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) From b4eeef06b4b14f02b3f3501916dbd0a1dafe042d Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Sat, 5 Jun 2021 17:14:45 -0700 Subject: [PATCH 24/39] Added vee operator to SO2 and SE2 and test for adjoint method. --- theseus/geometry/se2.py | 17 ++++++- theseus/geometry/so2.py | 16 +++++- theseus/geometry/tests/test_lie_group.py | 63 +++++++++++++++++------- 3 files changed, 77 insertions(+), 19 deletions(-) diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index afecf89c4..86949d685 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -101,7 +101,7 @@ def exp_map(tangent_vector: torch.Tensor) -> LieGroup: @staticmethod def _adjoint(se2: LieGroup) -> torch.Tensor: se2 = cast(SE2, se2) - ret = torch.empty(se2.shape[0], 3, 3, dtype=se2.data.dtype).to(se2.device) + ret = torch.zeros(se2.shape[0], 3, 3, dtype=se2.data.dtype).to(se2.device) ret[:, :2, :2] = se2.rotation.to_matrix() ret[:, 0, 2] = se2.data[:, 1] ret[:, 1, 2] = -se2.data[:, 0] @@ -162,3 +162,18 @@ def hat(tangent_vector: torch.Tensor) -> torch.Tensor: matrix[:, 1, 0] = theta matrix[:, :2, 2] = u return matrix + + @staticmethod + def vee(matrix: torch.Tensor) -> torch.Tensor: + _check = matrix.ndim == 3 and matrix.shape[1:] == (3, 3) + _check &= torch.allclose(matrix[:, 0, 1], -matrix[:, 1, 0]) + _check &= matrix[:, 0, 0].abs().max().item() < theseus.constants.EPS + _check &= matrix[:, 1, 1].abs().max().item() < theseus.constants.EPS + _check &= matrix[:, 2, 2].abs().max().item() < theseus.constants.EPS + if not _check: + raise ValueError("Invalid hat matrix for SE2.") + batch_size = matrix.shape[0] + tangent_vector = torch.zeros(batch_size, 3, dtype=matrix.dtype) + tangent_vector[:, 2] = matrix[:, 1, 0] + tangent_vector[:, :2] = matrix[:, :2, 2] + return tangent_vector diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 23bfc43aa..4acb7d4a1 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -2,6 +2,8 @@ import torch +import theseus.constants + from .lie_group import LieGroup from .vector import Vector @@ -36,7 +38,9 @@ def dim(self) -> int: @staticmethod def _adjoint(variable: LieGroup) -> torch.Tensor: - return torch.ones(variable.shape[0], 1, 1).to(variable.device) + return torch.ones(variable.shape[0], 1, 1, dtype=variable.data.dtype).to( + variable.device + ) @staticmethod def exp_map(tangent_vector: torch.Tensor) -> LieGroup: @@ -107,3 +111,13 @@ def hat(tangent_vector: torch.Tensor) -> torch.Tensor: matrix[:, 0, 1] = -tangent_vector.view(-1) matrix[:, 1, 0] = tangent_vector.view(-1) return matrix + + @staticmethod + def vee(matrix: torch.Tensor) -> torch.Tensor: + _check = matrix.ndim == 3 and matrix.shape[1:] == (2, 2) + _check &= matrix[:, 0, 0].abs().max().item() < theseus.constants.EPS + _check &= matrix[:, 1, 1].abs().max().item() < theseus.constants.EPS + _check &= torch.allclose(matrix[:, 0, 1], -matrix[:, 1, 0]) + if not _check: + raise ValueError("Invalid hat matrix for SO2.") + return matrix[:, 1, 0].clone().view(-1, 1) diff --git a/theseus/geometry/tests/test_lie_group.py b/theseus/geometry/tests/test_lie_group.py index 4f11238a7..46e29d1fb 100644 --- a/theseus/geometry/tests/test_lie_group.py +++ b/theseus/geometry/tests/test_lie_group.py @@ -39,9 +39,25 @@ def _check_inverse(group): # assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) +def _check_adjoint(group, tangent_vector): + group_matrix = group.to_matrix() + tangent_left = group.__class__.adjoint(group) @ tangent_vector.unsqueeze(2) + tangent_right = group.__class__.vee( + group_matrix @ group.hat(tangent_vector) @ group.inverse().to_matrix() + ) + diff = tangent_left.squeeze(2) - tangent_right + print(diff.abs().max(dim=1)) + assert torch.allclose(tangent_left.squeeze(2), tangent_right, atol=EPS) + + # ---------------------------------------------------------------------------- # -------------------------------- SO2 tests --------------------------------- # ---------------------------------------------------------------------------- +def _create_random_so2(batch_size, rng): + theta = torch.rand(batch_size, 1, generator=rng).double() * 2 * np.pi - np.pi + return SO2(theta=theta) + + def test_exp_map_so2(): for batch_size in [1, 20, 100]: theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) @@ -58,10 +74,8 @@ def test_compose_so2(): rng = torch.Generator() rng.manual_seed(0) for batch_size in [1, 20, 100]: - theta_1 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - theta_2 = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2_1 = SO2(theta=theta_1) - so2_2 = SO2(theta=theta_2) + so2_1 = _create_random_so2(batch_size, rng) + so2_2 = _create_random_so2(batch_size, rng) _check_compose(so2_1, so2_2) @@ -69,8 +83,7 @@ def test_inverse_so2(): rng = torch.Generator() rng.manual_seed(0) for batch_size in [1, 20, 100]: - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2(theta=theta) + so2 = _create_random_so2(batch_size, rng) _check_inverse(so2) @@ -78,18 +91,32 @@ def test_rotate_so2(): batch_size = 20 rng = torch.Generator() rng.manual_seed(0) - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - so2 = SO2(theta=theta) - so2.update_from_angle(theta) - vector = Vector(2, data=torch.randn(batch_size, 2)) + so2 = _create_random_so2(batch_size, rng) + vector = Vector(2, data=torch.randn(batch_size, 2).double()) rotated_vector = so2.rotate(vector) expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) +def test_adjoint_so2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + so2 = _create_random_so2(batch_size, rng) + tangent = torch.randn(batch_size, 1).double() + _check_adjoint(so2, tangent) + + # ---------------------------------------------------------------------------- # -------------------------------- SE2 tests --------------------------------- # ---------------------------------------------------------------------------- +def _create_random_se2(batch_size, rng): + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta], dim=1) + return SE2.exp_map(tangent_vector.double()) + + def test_exp_map_se2(): for batch_size in [1, 20, 100]: theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) @@ -106,13 +133,6 @@ def test_log_map_se2(): _check_log_map(tangent_vector, SE2) -def _create_random_se2(batch_size, rng): - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - u = torch.randn(batch_size, 2) - tangent_vector = torch.cat([u, theta], dim=1) - return SE2.exp_map(tangent_vector.double()) - - def test_compose_se2(): rng = torch.Generator() rng.manual_seed(0) @@ -128,3 +148,12 @@ def test_inverse_se2(): for batch_size in [1, 20, 100]: se2 = _create_random_se2(batch_size, rng) _check_inverse(se2) + + +def test_adjoint_se2(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2 = _create_random_se2(batch_size, rng) + tangent = torch.randn(batch_size, 3).double() + _check_adjoint(se2, tangent) From 9ede240d187b22ca6d66a6efaef27130a0e1279a Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 7 Jun 2021 05:55:50 -0700 Subject: [PATCH 25/39] Method _copy_impl() of subclasses of Variable must now copy everything. --- theseus/core/tests/common.py | 6 +++--- theseus/core/tests/test_factor.py | 2 +- theseus/core/tests/test_variable.py | 4 ++-- theseus/core/variable.py | 8 +++----- theseus/geometry/lie_group.py | 4 ++++ theseus/geometry/se2.py | 9 ++++++--- theseus/geometry/so2.py | 10 ++++++---- theseus/geometry/vector.py | 8 ++++++-- 8 files changed, 31 insertions(+), 20 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 2d330ed04..12ae7c849 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -5,7 +5,7 @@ class MockVar(theseus.core.Variable): def __init__(self, length, data=None, name=None): - super().__init__(length, data=None, name=name) + super().__init__(length, data=data, name=name) def _init_data(self, length): self.data = torch.empty(1, length) @@ -21,8 +21,8 @@ def _local(variable1, variable2): def _retract(variable, delta): pass - def _copy_impl(self): - return MockVar(self.data.shape[1]) + def _copy_impl(self, new_name=None): + return MockVar(self.data.shape[1], data=self.data.clone(), name=new_name) class MockPrecision(theseus.core.Precision): diff --git a/theseus/core/tests/test_factor.py b/theseus/core/tests/test_factor.py index faddf6c25..0f998c37c 100644 --- a/theseus/core/tests/test_factor.py +++ b/theseus/core/tests/test_factor.py @@ -38,7 +38,7 @@ def test_default_name(): for var in range(num_vars): name = str(np.random.randint(111111, high=1000000)) names.append(name) - variables.append(MockVar(1, 1, name=name)) + variables.append(MockVar(1, torch.ones(1, 1), name=name)) factor_name = ".".join(names) factor = MockFactor(variables, MockPrecision(torch.ones(1))) assert factor.name == factor_name diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index 6901f28a2..97e38358f 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -27,9 +27,9 @@ def _check_copy(var, new_var, new_name): def test_var_shape(): for sz in range(100): - data = torch.ones(sz) + data = torch.ones(1, sz) var = MockVar(sz, data=data) - assert (1,) + data.shape == var.shape + assert data.shape == var.shape def test_update_shape_check(): diff --git a/theseus/core/variable.py b/theseus/core/variable.py index 7722346ac..f42797f66 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -62,17 +62,15 @@ def local(self, variable2: "Variable") -> torch.Tensor: def retract(self, delta: torch.Tensor) -> "Variable": return self.__class__._retract(self, delta) - # Must copy everything that is not name and data + # Must copy everything @abc.abstractmethod - def _copy_impl(self) -> "Variable": + def _copy_impl(self, new_name: Optional[str] = None) -> "Variable": pass def copy(self, new_name: Optional[str] = None) -> "Variable": if not new_name: new_name = f"{self.name}_copy" - var_copy = self._copy_impl() - var_copy.name = new_name - var_copy.data = self.data.clone() + var_copy = self._copy_impl(new_name=new_name) return var_copy def __deepcopy__(self, memo): diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py index 4e12638b9..75cbc71f9 100644 --- a/theseus/geometry/lie_group.py +++ b/theseus/geometry/lie_group.py @@ -107,6 +107,10 @@ def _retract(variable: Variable, delta: torch.Tensor) -> Variable: variable = cast(LieGroup, variable) return cast(LieGroup, variable.compose(variable.exp_map(delta))) + # only added to avoid casting downstream + def copy(self, new_name: Optional[str] = None) -> "LieGroup": + return cast(LieGroup, super().copy(new_name=new_name)) + # Alias for LieGroup.adjoint() def adjoint(variable: LieGroup) -> torch.Tensor: diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 0f8e29706..eeee6d8cb 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -3,7 +3,6 @@ import torch import theseus.constants -from theseus.core import Variable from .lie_group import LieGroup from .so2 import SO2 @@ -125,5 +124,9 @@ def _inverse( ) -> Union[LieGroup, Tuple[LieGroup, torch.Tensor]]: pass - def _copy_impl(self) -> Variable: - return SE2() + def _copy_impl(self, new_name: Optional[str] = None) -> "SE2": + return SE2(data=self.data.clone(), name=new_name) + + # only added to avoid casting downstream + def copy(self, new_name: Optional[str] = None) -> "SE2": + return cast(SE2, super().copy(new_name=new_name)) diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index a68cca036..e81e60229 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -2,8 +2,6 @@ import torch -from theseus.core import Variable - from .lie_group import LieGroup from .vector import Vector @@ -103,5 +101,9 @@ def to_matrix(self) -> torch.Tensor: matrix[:, 1, 1] = cosine return matrix - def _copy_impl(self) -> Variable: - return SO2() + def _copy_impl(self, new_name: Optional[str] = None) -> "SO2": + return SO2(data=self.data.clone(), name=new_name) + + # only added to avoid casting downstream + def copy(self, new_name: Optional[str] = None) -> "SO2": + return cast(SO2, super().copy(new_name=new_name)) diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 437317a21..80bd1ca06 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -195,5 +195,9 @@ def _log_map(variable1: LieGroup) -> torch.Tensor: def __hash__(self): return id(self) - def _copy_impl(self) -> Variable: - return Vector(self.dim()) + def _copy_impl(self, new_name: Optional[str] = None) -> "Vector": + return Vector(self.dim(), data=self.data.clone()) + + # only added to avoid casting downstream + def copy(self, new_name: Optional[str] = None) -> "Vector": + return cast(Vector, super().copy(new_name=new_name)) From 9d47ece03a27a13ebdf45ada1720d9cff274a2e3 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 7 Jun 2021 05:57:15 -0700 Subject: [PATCH 26/39] Method _copy_impl() of subclasses of Precision must now copy everything. --- theseus/core/precision.py | 16 ++++++++-------- theseus/core/tests/common.py | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/theseus/core/precision.py b/theseus/core/precision.py index adfc8ff0b..ed0e1cece 100644 --- a/theseus/core/precision.py +++ b/theseus/core/precision.py @@ -54,15 +54,13 @@ def weight_jacobians_and_error( ) -> Tuple[List[torch.Tensor], torch.Tensor]: pass - # Must copy everything that is not name and data + # Must copy everything @abc.abstractmethod def _copy_impl(self) -> "Precision": pass def copy(self) -> "Precision": - new_cov = self._copy_impl() - new_cov.data = self.data.clone() - return new_cov + return self._copy_impl() def __deepcopy__(self, memo): if id(self) in memo: @@ -115,8 +113,10 @@ def weight_jacobians_and_error( jac.data.mul_(self.data) return jacobians, error - def _copy_impl(self) -> Precision: - return ScalePrecision(self.scale, learnable=False) + def _copy_impl(self) -> "ScalePrecision": + new_prec = ScalePrecision(self.scale) + new_prec.data = new_prec.data.clone() + return new_prec # Note that these operations are in-place @@ -161,5 +161,5 @@ def weight_jacobians_and_error( jac.data.mul_(self.data.view(1, -1, 1)) return jacobians, error - def _copy_impl(self) -> Precision: - return DiagonalPrecision(self.diagonal, data=None, learnable=False) + def _copy_impl(self) -> "DiagonalPrecision": + return DiagonalPrecision(self.diagonal, data=self.data.clone()) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 12ae7c849..7b84255cc 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -38,8 +38,8 @@ def weight_error(self, error): def weight_jacobians_and_error(self, jacobians, error): pass - def _copy_impl(self): - return MockPrecision(self.data) + def _copy_impl(self, new_name=None): + return MockPrecision(self.data.clone()) class NullPrecision(theseus.core.Precision): @@ -55,7 +55,7 @@ def weight_error(self, error): def weight_jacobians_and_error(self, jacobians, error): return jacobians, error - def _copy_impl(self): + def _copy_impl(self, new_name=None): return NullPrecision() From 272d9d1726ee97c3b671d46ef14468476d2ff82b Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 7 Jun 2021 05:58:20 -0700 Subject: [PATCH 27/39] Method _copy_impl() of subclasses of Factor must now copy everything. --- theseus/core/factor.py | 19 +++++-------------- theseus/core/tests/common.py | 6 ++++-- theseus/embodied/measurements/between.py | 9 +++++---- theseus/embodied/misc/gaussian_variable.py | 10 ++++++---- 4 files changed, 20 insertions(+), 24 deletions(-) diff --git a/theseus/core/factor.py b/theseus/core/factor.py index b6895b275..091ba45fc 100644 --- a/theseus/core/factor.py +++ b/theseus/core/factor.py @@ -1,5 +1,4 @@ import abc -import copy from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple import torch @@ -61,14 +60,9 @@ def weighted_jacobians_error( def __len__(self): return len(self.variables) - # Must create copies of any member objects except the name and - # the variables (the original ones can be kept, as they will be - # replaced by factor.copy()). - # However, if any internal variables are stored in a container, - # another container of the same size must be created and populated - # with variables (it's OK if it's the original ones). + # Must copy everything @abc.abstractmethod - def _copy_impl(self) -> "Factor": + def _copy_impl(self, new_name: Optional[str] = None) -> "Factor": pass def copy( @@ -76,14 +70,11 @@ def copy( ) -> "Factor": if not new_name: new_name = f"{self.name}_copy" - new_factor = self._copy_impl() - new_factor.name = new_name + new_factor = self._copy_impl(new_name=new_name) new_factor.precision = self.precision.copy() if keep_variable_names: - new_variables = [v.copy(new_name=v.name) for v in self.variables] - else: - new_variables = copy.deepcopy(self.variables) - new_factor.variables = new_variables + for i, v in enumerate(new_factor.variables): + v.name = self.variables[i].name return new_factor def __deepcopy__(self, memo): diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index 7b84255cc..bbbbe1fa3 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -81,8 +81,10 @@ def _get_variables_impl(self): def set_variable_at(self, variable, idx): self._variables[idx] = variable - def _copy_impl(self): - return MockFactor([v for v in self._variables], self.precision.copy()) + def _copy_impl(self, new_name=None): + return MockFactor( + [v.copy() for v in self._variables], self.precision.copy(), name=new_name + ) def create_mock_factors(data=None, precision=NullPrecision()): diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index a88114f4f..c3677a6ea 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -60,10 +60,11 @@ def set_variable_at(self, variable: Variable, idx: int): else: raise ValueError("Index out of range.") - def _copy_impl(self) -> Factor: + def _copy_impl(self, new_name: Optional[str] = None) -> "Between": return Between( - self.v0, - self.v1, + self.v0.copy(), + self.v1.copy(), self.precision.copy(), - self.measurement.copy(), # type: ignore + self.measurement.copy(), + name=new_name, ) diff --git a/theseus/embodied/misc/gaussian_variable.py b/theseus/embodied/misc/gaussian_variable.py index 5aba34502..0682071d2 100644 --- a/theseus/embodied/misc/gaussian_variable.py +++ b/theseus/embodied/misc/gaussian_variable.py @@ -19,11 +19,11 @@ def __init__( "Prior variable type inconsistent with variables for the graph." ) self.var = var - self._prior = prior + self.prior = prior super().__init__(precision, name=name) def error(self) -> torch.Tensor: - return self._prior.local(self.variables[0]) + return self.prior.local(self.variables[0]) def jacobians(self) -> List[torch.Tensor]: return [torch.eye(self.dim()).to(self.variables[0].device)] @@ -46,5 +46,7 @@ def set_variable_at(self, variable: Variable, idx: int): else: raise ValueError("Index out of range.") - def _copy_impl(self) -> Factor: - return GaussianVariable(self.var, self.precision.copy(), self.prior.copy()) # type: ignore + def _copy_impl(self, new_name: Optional[str] = None) -> "GaussianVariable": + return GaussianVariable( # type: ignore + self.var.copy(), self.precision.copy(), self.prior.copy(), name=new_name + ) From 6bc48df336a64029b09603842765e7cfeb3ffec4 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 7 Jun 2021 08:26:19 -0700 Subject: [PATCH 28/39] Added copy tests for lie groups. --- theseus/geometry/tests/test_lie_group.py | 24 ++++++++++++++++++++++++ theseus/geometry/tests/test_vector.py | 14 ++++++++++++++ theseus/geometry/vector.py | 2 +- 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/theseus/geometry/tests/test_lie_group.py b/theseus/geometry/tests/test_lie_group.py index 46e29d1fb..7eb4a29c3 100644 --- a/theseus/geometry/tests/test_lie_group.py +++ b/theseus/geometry/tests/test_lie_group.py @@ -50,6 +50,17 @@ def _check_adjoint(group, tangent_vector): assert torch.allclose(tangent_left.squeeze(2), tangent_right, atol=EPS) +def _check_copy(group): + group.name = "old" + new_group = group.copy(new_name="new") + assert group is not new_group + assert group.data is not new_group.data + assert torch.allclose(group.data, new_group.data) + assert new_group.name == "new" + new_group_no_name = group.copy() + assert new_group_no_name.name == f"{group.name}_copy" + + # ---------------------------------------------------------------------------- # -------------------------------- SO2 tests --------------------------------- # ---------------------------------------------------------------------------- @@ -107,6 +118,13 @@ def test_adjoint_so2(): _check_adjoint(so2, tangent) +def test_copy_so2(): + rng = torch.Generator() + rng.manual_seed(0) + so2 = _create_random_so2(1, rng) + _check_copy(so2) + + # ---------------------------------------------------------------------------- # -------------------------------- SE2 tests --------------------------------- # ---------------------------------------------------------------------------- @@ -157,3 +175,9 @@ def test_adjoint_se2(): se2 = _create_random_se2(batch_size, rng) tangent = torch.randn(batch_size, 3).double() _check_adjoint(se2, tangent) + + +def test_copy_se2(): + rng = torch.Generator() + se2 = _create_random_se2(1, rng) + _check_copy(se2) diff --git a/theseus/geometry/tests/test_vector.py b/theseus/geometry/tests/test_vector.py index 0a78f6790..9da7b55cf 100644 --- a/theseus/geometry/tests/test_vector.py +++ b/theseus/geometry/tests/test_vector.py @@ -186,3 +186,17 @@ def test_update(): data = torch.rand(batch_size, sz) v.update(data) assert torch.allclose(data, v.data) + + +def test_copy(): + for i in range(1, 4): + for j in range(1, 5): + t1 = torch.rand(i, j) + v = Vector(j, data=t1.clone(), name="v") + v_new = v.copy(new_name="v_new") + assert v_new is not v + assert v_new.data is not v.data + assert torch.allclose(v_new.data, v.data) + assert v_new.name == "v_new" + v_new_no_name = v.copy() + assert v_new_no_name.name == "v_copy" diff --git a/theseus/geometry/vector.py b/theseus/geometry/vector.py index 80bd1ca06..85680385e 100644 --- a/theseus/geometry/vector.py +++ b/theseus/geometry/vector.py @@ -196,7 +196,7 @@ def __hash__(self): return id(self) def _copy_impl(self, new_name: Optional[str] = None) -> "Vector": - return Vector(self.dim(), data=self.data.clone()) + return Vector(self.dim(), data=self.data.clone(), name=new_name) # only added to avoid casting downstream def copy(self, new_name: Optional[str] = None) -> "Vector": From da268bb825d02718ac8406958570a5d191e26671 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Tue, 8 Jun 2021 08:32:25 -0700 Subject: [PATCH 29/39] Added tests for compose and inverse jacobians. --- theseus/core/variable.py | 2 + theseus/geometry/lie_group.py | 14 ++++--- theseus/geometry/se2.py | 4 +- theseus/geometry/so2.py | 13 +++++- theseus/geometry/tests/test_lie_group.py | 51 +++++++++++++++++++++--- 5 files changed, 72 insertions(+), 12 deletions(-) diff --git a/theseus/core/variable.py b/theseus/core/variable.py index 3ee1eff71..7ffd2dbe9 100644 --- a/theseus/core/variable.py +++ b/theseus/core/variable.py @@ -15,6 +15,8 @@ # - `dim`: # of degrees of freedom of the variable # # Constructor can optionally provide an initial data value as a keyword argument. +# TODO: add dtype property to Variable +# TODO: make dim a property class Variable(abc.ABC): def __init__( self, diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py index 4e12638b9..e447b1992 100644 --- a/theseus/geometry/lie_group.py +++ b/theseus/geometry/lie_group.py @@ -85,14 +85,18 @@ def _compose_jacobians( ) -> Tuple[torch.Tensor, torch.Tensor]: if not type(group1) is type(group2): raise ValueError("Lie groups for compose must be of the same type.") - g1_inverse = cast(LieGroup, group1.inverse()) - jac1 = g1_inverse.adjoint() - jac2 = torch.eye(group2.dim()).repeat(group2.shape[0], 1, 1).to(group2.device) + g2_inverse = cast(LieGroup, group2.inverse()) + jac1 = g2_inverse.adjoint() + jac2 = ( + torch.eye(group2.dim(), dtype=group1.data.dtype) + .repeat(group2.shape[0], 1, 1) + .to(group2.device) + ) return jac1, jac2 @staticmethod - def _inverse_jacobian(group1: "LieGroup") -> torch.Tensor: - return -group1.adjoint() + def _inverse_jacobian(group: "LieGroup") -> torch.Tensor: + return -group.adjoint() @staticmethod def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: diff --git a/theseus/geometry/se2.py b/theseus/geometry/se2.py index 86949d685..e4543a3cd 100644 --- a/theseus/geometry/se2.py +++ b/theseus/geometry/se2.py @@ -59,7 +59,9 @@ def _log_map(variable: LieGroup) -> torch.Tensor: idx_small_vals = cos_minus_one.abs() < theseus.constants.EPS if idx_small_vals.any(): theta_sq_at_idx = theta[idx_small_vals] ** 2 - halftheta_by_tan_of_halftheta[idx_small_vals] = -theta_sq_at_idx / 12 + 1 + halftheta_by_tan_of_halftheta[idx_small_vals] = ( + -theta_sq_at_idx.view(-1) / 12 + 1 + ) v_inv = torch.empty(se2.shape[0], 2, 2, dtype=variable.data.dtype) v_inv[:, 0, 0] = halftheta_by_tan_of_halftheta diff --git a/theseus/geometry/so2.py b/theseus/geometry/so2.py index 4acb7d4a1..1e2e94e70 100644 --- a/theseus/geometry/so2.py +++ b/theseus/geometry/so2.py @@ -86,9 +86,20 @@ def _inverse( return inverse def rotate(self, point: Vector) -> Vector: + if point.dim() != 2: + raise ValueError("SO2 can only rotate 2-D vectors.") + if ( + point.shape[0] != self.shape[0] + and point.shape[0] != 1 + and self.shape[0] != 1 + ): + raise ValueError( + "Input point batch size is not broadcastable with group batch size." + ) + batch_size = max(point.shape[0], self.shape[0]) px, py = point.data[:, 0], point.data[:, 1] cosine, sine = self.to_cos_sin() - new_point_data = torch.empty_like(point.data) + new_point_data = torch.empty(batch_size, 2, dtype=self.data.dtype) new_point_data[:, 0] = cosine * px - sine * py new_point_data[:, 1] = sine * px + cosine * py return Vector(2, data=new_point_data) diff --git a/theseus/geometry/tests/test_lie_group.py b/theseus/geometry/tests/test_lie_group.py index 46e29d1fb..56381561e 100644 --- a/theseus/geometry/tests/test_lie_group.py +++ b/theseus/geometry/tests/test_lie_group.py @@ -5,6 +5,39 @@ from theseus.constants import EPS from theseus.geometry import SE2, SO2, Vector +# Not sure if this is the right name for this test file, considering that Vector is +# also a LieGroup and has its own test file. Merging the two seems undesirable +# since Vector has a lot of additional operations. Maybe we can rename as +# test_lie_group_ops.py and move some of Vector tests here, correspondingly? + + +def _numeric_jacobian(function, group_args): + batch_size = group_args[0].shape[0] + dim = group_args[0].dim() + delta_mag = 1e-3 + + def _compute(group_idx): + jac = torch.zeros(batch_size, dim, dim, dtype=group_args[0].data.dtype) + for d in range(dim): + delta = torch.zeros(1, dim, dtype=group_args[group_idx].data.dtype) + delta[:, d] = delta_mag + + group_plus = group_args[group_idx].retract(delta) + group_minus = group_args[group_idx].retract(-delta) + group_plus_args = [g for g in group_args] + group_plus_args[group_idx] = group_plus + group_minus_args = [g for g in group_args] + group_minus_args[group_idx] = group_minus + + diff = function(group_minus_args).local(function(group_plus_args)) + jac[:, :, d] = diff / (2 * delta_mag) + return jac + + jacs = [] + for group_idx in range(len(group_args)): + jacs.append(_compute(group_idx)) + return jacs + def _check_exp_map(tangent_vector, group_cls): group = group_cls.exp_map(tangent_vector) @@ -22,21 +55,29 @@ def _check_log_map(tangent_vector, group_cls): def _check_compose(group_1, group_2): - composition = group_1.compose(group_2, get_jacobians=False) + composition, (jac1, jac2) = group_1.compose(group_2, get_jacobians=True) expected_matrix = group_1.to_matrix() @ group_2.to_matrix() + expected_jacs = _numeric_jacobian( + lambda groups: groups[0].compose(groups[1], get_jacobians=False), + [group_1, group_2], + ) assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) - # assert torch.allclose(jac1, torch.ones(batch_size, 1, 1)) - # assert torch.allclose(jac2, torch.ones(batch_size, 1, 1)) + assert torch.allclose(jac1, expected_jacs[0]) + assert torch.allclose(jac2, expected_jacs[1]) def _check_inverse(group): tangent_vector = group.log_map() inverse_group = group.exp_map(-tangent_vector) - inverse_result = group.inverse(get_jacobian=False) + inverse_result, jac = group.inverse(get_jacobian=True) + expected_jac = _numeric_jacobian( + lambda groups: groups[0].inverse(get_jacobian=False), + [group], + ) assert torch.allclose( inverse_group.to_matrix(), inverse_result.to_matrix(), atol=EPS ) - # assert torch.allclose(jac, -torch.ones(batch_size, 1, 1)) + assert torch.allclose(jac, expected_jac[0]) def _check_adjoint(group, tangent_vector): From 7e3bb36ee9b926a39141128190906397b16897bf Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 9 Jun 2021 10:32:49 -0700 Subject: [PATCH 30/39] Added code to make copies of precisions learnable if original is also learnable. --- theseus/core/precision.py | 40 ++++++++++++++-------------- theseus/core/tests/test_precision.py | 32 ++++++++++++++++------ theseus/geometry/lie_group.py | 2 +- 3 files changed, 45 insertions(+), 29 deletions(-) diff --git a/theseus/core/precision.py b/theseus/core/precision.py index ed0e1cece..e034a1862 100644 --- a/theseus/core/precision.py +++ b/theseus/core/precision.py @@ -7,7 +7,6 @@ # Abstract class for representing precision functions (inverse covariance) -# (equivalent to LossFunction in minisam). # Concrete classes must implement two methods: # - `weight_error`: return an error tensor weighted by the precision # - `weightJacobiansError`: returns jacobians an errors weighted by the precision @@ -20,6 +19,7 @@ def __init__( param_name: Optional[str] = None, ): self.data: torch.Tensor + self.learnable = learnable if data is not None: self.data = data else: @@ -84,24 +84,23 @@ def update(self, input_batch: torch.Tensor): pass -# Note that these operations are in-place -# (consider renaming weight functions as in minisam - or adding underscore suffix) class ScalePrecision(Precision): _ids = count(0) - def __init__(self, scale: float, learnable: bool = False): + def __init__( + self, scale: float, data: Optional[torch.Tensor] = None, learnable: bool = False + ): # TODO if we keep this _id mechanism, add test for this _id = next(self._ids) param_name = f"scale_cov_{_id}" self.scale = scale - super().__init__(scale, learnable=learnable, param_name=param_name) + super().__init__(scale, data=data, learnable=learnable, param_name=param_name) def _init_data(self, scale: float): # type: ignore - self.data = torch.tensor(scale) + self.data = torch.tensor(scale).float() def weight_error(self, error: torch.Tensor) -> torch.Tensor: - error.mul_(self.data) - return error + return error * self.data def weight_jacobians_and_error( self, @@ -110,17 +109,16 @@ def weight_jacobians_and_error( ) -> Tuple[List[torch.Tensor], torch.Tensor]: error.mul_(self.data) for jac in jacobians: - jac.data.mul_(self.data) + jac = jac * self.data return jacobians, error def _copy_impl(self) -> "ScalePrecision": - new_prec = ScalePrecision(self.scale) - new_prec.data = new_prec.data.clone() + new_prec = ScalePrecision( + self.scale, data=self.data.detach(), learnable=self.learnable + ) return new_prec -# Note that these operations are in-place -# (consider renaming weight functions as in minisam - or adding underscore suffix) class DiagonalPrecision(Precision): _ids = count(0) @@ -134,20 +132,20 @@ def __init__( _id = next(self._ids) param_name = f"diagonal_cov_{_id}" self.diagonal = diagonal - super().__init__(diagonal, learnable=learnable, param_name=param_name) + super().__init__( + diagonal, data=data, learnable=learnable, param_name=param_name + ) def _init_data(self, diagonal: float): # type: ignore if isinstance(diagonal, torch.Tensor): self.data = diagonal else: - self.data = torch.tensor(diagonal) + self.data = torch.tensor(diagonal).float() if self.data.ndim != 1: raise ValueError("DiagonalPrecision only accepts arrays of dim. 1.") def weight_error(self, error: torch.Tensor) -> torch.Tensor: - # TODO stop making these inplace (safer, and this is probably not faster anyway) - error.mul_(self.data) - return error + return error * self.data def weight_jacobians_and_error( self, @@ -158,8 +156,10 @@ def weight_jacobians_and_error( for jac in jacobians: # Jacobian is batch_size x factor_dim x var_dim # This left multiplies the weights (inv cov.) to jacobian - jac.data.mul_(self.data.view(1, -1, 1)) + jac = jac * self.data.view(1, -1, 1) return jacobians, error def _copy_impl(self) -> "DiagonalPrecision": - return DiagonalPrecision(self.diagonal, data=self.data.clone()) + return DiagonalPrecision( + self.diagonal, data=self.data.detach(), learnable=self.learnable + ) diff --git a/theseus/core/tests/test_precision.py b/theseus/core/tests/test_precision.py index e1fecb96f..edf13dc7a 100644 --- a/theseus/core/tests/test_precision.py +++ b/theseus/core/tests/test_precision.py @@ -1,9 +1,10 @@ import copy -import numpy as np import pytest # noqa: F401 import torch +import theseus.core as thcore + from .common import MockPrecision @@ -14,17 +15,32 @@ def test_cov_shape(): assert data.shape == precision.shape() -def test_copy(): - def _check_copy(precision, new_precision): - np.testing.assert_almost_equal( - precision.data.numpy(), new_precision.data.numpy() - ) - assert precision is not new_precision - assert precision.data is not new_precision.data +def _check_copy(precision, new_precision): + assert precision is not new_precision + assert precision.data is not new_precision.data + assert torch.allclose(precision.data, new_precision.data) + assert precision.data.requires_grad == new_precision.data.requires_grad + assert precision.learnable == new_precision.learnable + +def test_copy(): for i in range(100): size = torch.randint(low=1, high=21, size=(1,)).item() data = torch.rand(size=(size,)) precision = MockPrecision(data) _check_copy(precision, precision.copy()) _check_copy(precision, copy.deepcopy(precision)) + + +def test_copy_scale_precision(): + for learnable in [True, False]: + p1 = thcore.precision.ScalePrecision(2, learnable=learnable) + _check_copy(p1, p1.copy()) + _check_copy(p1, copy.deepcopy(p1)) + + +def test_copy_diagonal_precision(): + for learnable in [True, False]: + p1 = thcore.precision.DiagonalPrecision([1, 2, 3], learnable=learnable) + _check_copy(p1, p1.copy()) + _check_copy(p1, copy.deepcopy(p1)) diff --git a/theseus/geometry/lie_group.py b/theseus/geometry/lie_group.py index 5dd66a863..7f62ef3d3 100644 --- a/theseus/geometry/lie_group.py +++ b/theseus/geometry/lie_group.py @@ -107,7 +107,7 @@ def _local(variable1: Variable, variable2: Variable) -> torch.Tensor: return composition.log_map() @staticmethod - def _retract(variable: Variable, delta: torch.Tensor) -> Variable: + def _retract(variable: Variable, delta: torch.Tensor) -> "LieGroup": variable = cast(LieGroup, variable) return cast(LieGroup, variable.compose(variable.exp_map(delta))) From 87777488840081e1b34b29b39a67280bc0add056 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 9 Jun 2021 11:37:36 -0700 Subject: [PATCH 31/39] Added test for Between.copy() --- theseus/embodied/measurements/__init__.py | 3 +++ theseus/embodied/measurements/between.py | 3 ++- .../measurements/tests/test_between.py | 24 +++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 theseus/embodied/measurements/__init__.py create mode 100644 theseus/embodied/measurements/tests/test_between.py diff --git a/theseus/embodied/measurements/__init__.py b/theseus/embodied/measurements/__init__.py new file mode 100644 index 000000000..67c4866ae --- /dev/null +++ b/theseus/embodied/measurements/__init__.py @@ -0,0 +1,3 @@ +from .between import Between + +__all__ = ["Between"] diff --git a/theseus/embodied/measurements/between.py b/theseus/embodied/measurements/between.py index c3677a6ea..910455f29 100644 --- a/theseus/embodied/measurements/between.py +++ b/theseus/embodied/measurements/between.py @@ -17,6 +17,7 @@ def __init__( ): self.v0 = v0 self.v1 = v1 + self._variables = [v0, v1] self.measurement = measurement if not isinstance(v0, v1.__class__) or not isinstance( v0, measurement.__class__ @@ -44,7 +45,7 @@ def dim(self) -> int: return self.v0.dim() def _get_variables_impl(self) -> List[Variable]: - return [self.v0, self.v1] + return self._variables # type: ignore def set_variable_at(self, variable: Variable, idx: int): if not isinstance(variable, self.v0.__class__): diff --git a/theseus/embodied/measurements/tests/test_between.py b/theseus/embodied/measurements/tests/test_between.py new file mode 100644 index 000000000..3e7a97935 --- /dev/null +++ b/theseus/embodied/measurements/tests/test_between.py @@ -0,0 +1,24 @@ +import torch + +import theseus.core as thcore +import theseus.embodied.measurements as thmeas +import theseus.geometry as thgeom + + +def test_copy(): + v0 = thgeom.SE2() + v1 = thgeom.SE2() + meas = thgeom.SE2() + factor = thmeas.Between(v0, v1, thcore.ScalePrecision(1), meas, name="b1") + + def _check_var_copy(var1, var2): + assert var1 is not var2 + assert var1.data is not var2.data + assert torch.allclose(var1.data, var2.data) + + factor2 = factor.copy(new_name="b2") + assert factor is not factor2 + _check_var_copy(factor2.v0, v0) + _check_var_copy(factor2.v1, v1) + _check_var_copy(factor2.measurement, meas) + assert factor2.name == "b2" From 8bf765825dbf0ff3eb8550169981a2d4beb1fdf4 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Wed, 9 Jun 2021 14:27:53 -0700 Subject: [PATCH 32/39] Removed duplicate precision copy from factor's copy method --- theseus/core/factor.py | 1 - theseus/embodied/__init__.py | 4 ++ .../measurements/tests/test_between.py | 24 ----------- theseus/embodied/misc/__init__.py | 3 ++ theseus/embodied/tests/test_factors.py | 43 +++++++++++++++++++ 5 files changed, 50 insertions(+), 25 deletions(-) delete mode 100644 theseus/embodied/measurements/tests/test_between.py create mode 100644 theseus/embodied/misc/__init__.py create mode 100644 theseus/embodied/tests/test_factors.py diff --git a/theseus/core/factor.py b/theseus/core/factor.py index 091ba45fc..9cb9686f0 100644 --- a/theseus/core/factor.py +++ b/theseus/core/factor.py @@ -71,7 +71,6 @@ def copy( if not new_name: new_name = f"{self.name}_copy" new_factor = self._copy_impl(new_name=new_name) - new_factor.precision = self.precision.copy() if keep_variable_names: for i, v in enumerate(new_factor.variables): v.name = self.variables[i].name diff --git a/theseus/embodied/__init__.py b/theseus/embodied/__init__.py index e69de29bb..2723e566d 100644 --- a/theseus/embodied/__init__.py +++ b/theseus/embodied/__init__.py @@ -0,0 +1,4 @@ +from .measurements import Between +from .misc import GaussianVariable + +__all__ = ["Between", "GaussianVariable"] diff --git a/theseus/embodied/measurements/tests/test_between.py b/theseus/embodied/measurements/tests/test_between.py deleted file mode 100644 index 3e7a97935..000000000 --- a/theseus/embodied/measurements/tests/test_between.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch - -import theseus.core as thcore -import theseus.embodied.measurements as thmeas -import theseus.geometry as thgeom - - -def test_copy(): - v0 = thgeom.SE2() - v1 = thgeom.SE2() - meas = thgeom.SE2() - factor = thmeas.Between(v0, v1, thcore.ScalePrecision(1), meas, name="b1") - - def _check_var_copy(var1, var2): - assert var1 is not var2 - assert var1.data is not var2.data - assert torch.allclose(var1.data, var2.data) - - factor2 = factor.copy(new_name="b2") - assert factor is not factor2 - _check_var_copy(factor2.v0, v0) - _check_var_copy(factor2.v1, v1) - _check_var_copy(factor2.measurement, meas) - assert factor2.name == "b2" diff --git a/theseus/embodied/misc/__init__.py b/theseus/embodied/misc/__init__.py new file mode 100644 index 000000000..82fa961a2 --- /dev/null +++ b/theseus/embodied/misc/__init__.py @@ -0,0 +1,3 @@ +from .gaussian_variable import GaussianVariable + +__all__ = ["GaussianVariable"] diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/tests/test_factors.py new file mode 100644 index 000000000..594982ca3 --- /dev/null +++ b/theseus/embodied/tests/test_factors.py @@ -0,0 +1,43 @@ +import torch + +import theseus.core as thcore +import theseus.embodied as thembod +import theseus.geometry as thgeom + + +def _check_var_copy(var1, var2): + assert var1 is not var2 + assert var1.data is not var2.data + assert torch.allclose(var1.data, var2.data) + + +def test_copy_gaussian_variable(): + v0 = thgeom.SE2() + prior = thgeom.SE2() + factor = thembod.GaussianVariable(v0, thcore.ScalePrecision(1), prior, name="name") + factor2 = factor.copy(new_name="new_name") + assert factor is not factor2 + _check_var_copy(factor2.var, v0) + _check_var_copy(factor2.prior, prior) + assert torch.allclose(factor.precision.data, factor2.precision.data) + assert factor2.name == "new_name" + + +def test_copy_between(): + v0 = thgeom.SE2() + v1 = thgeom.SE2() + meas = thgeom.SE2() + factor = thembod.Between(v0, v1, thcore.ScalePrecision(1), meas, name="name") + + def _check_var_copy(var1, var2): + assert var1 is not var2 + assert var1.data is not var2.data + assert torch.allclose(var1.data, var2.data) + + factor2 = factor.copy(new_name="new_name") + assert factor is not factor2 + _check_var_copy(factor2.v0, v0) + _check_var_copy(factor2.v1, v1) + _check_var_copy(factor2.measurement, meas) + assert torch.allclose(factor.precision.data, factor2.precision.data) + assert factor2.name == "new_name" From cbf69afb5eeab203532265d745e383d9db26c6e9 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 12:15:38 -0700 Subject: [PATCH 33/39] Split SE2 and SO2 test into two files. --- theseus/geometry/tests/common.py | 94 ++++++++++ theseus/geometry/tests/test_lie_group.py | 224 ----------------------- theseus/geometry/tests/test_se2.py | 69 +++++++ theseus/geometry/tests/test_so2.py | 76 ++++++++ 4 files changed, 239 insertions(+), 224 deletions(-) create mode 100644 theseus/geometry/tests/common.py delete mode 100644 theseus/geometry/tests/test_lie_group.py create mode 100644 theseus/geometry/tests/test_se2.py create mode 100644 theseus/geometry/tests/test_so2.py diff --git a/theseus/geometry/tests/common.py b/theseus/geometry/tests/common.py new file mode 100644 index 000000000..4d8f6a429 --- /dev/null +++ b/theseus/geometry/tests/common.py @@ -0,0 +1,94 @@ +import torch + +from theseus.constants import EPS + + +def _numeric_jacobian(function, group_args): + batch_size = group_args[0].shape[0] + dim = group_args[0].dim() + delta_mag = 1e-3 + + def _compute(group_idx): + jac = torch.zeros(batch_size, dim, dim, dtype=group_args[0].data.dtype) + for d in range(dim): + delta = torch.zeros(1, dim, dtype=group_args[group_idx].data.dtype) + delta[:, d] = delta_mag + + group_plus = group_args[group_idx].retract(delta) + group_minus = group_args[group_idx].retract(-delta) + group_plus_args = [g for g in group_args] + group_plus_args[group_idx] = group_plus + group_minus_args = [g for g in group_args] + group_minus_args[group_idx] = group_minus + + diff = function(group_minus_args).local(function(group_plus_args)) + jac[:, :, d] = diff / (2 * delta_mag) + return jac + + jacs = [] + for group_idx in range(len(group_args)): + jacs.append(_compute(group_idx)) + return jacs + + +def check_exp_map(tangent_vector, group_cls): + group = group_cls.exp_map(tangent_vector) + assert torch.allclose( + group_cls.hat(tangent_vector).matrix_exp(), + group.to_matrix(), + atol=EPS, + ) + + +def check_log_map(tangent_vector, group_cls): + assert torch.allclose( + tangent_vector, group_cls.exp_map(tangent_vector).log_map(), atol=EPS + ) + + +def check_compose(group_1, group_2): + composition, (jac1, jac2) = group_1.compose(group_2, get_jacobians=True) + expected_matrix = group_1.to_matrix() @ group_2.to_matrix() + expected_jacs = _numeric_jacobian( + lambda groups: groups[0].compose(groups[1], get_jacobians=False), + [group_1, group_2], + ) + assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) + assert torch.allclose(jac1, expected_jacs[0]) + assert torch.allclose(jac2, expected_jacs[1]) + + +def check_inverse(group): + tangent_vector = group.log_map() + inverse_group = group.exp_map(-tangent_vector) + inverse_result, jac = group.inverse(get_jacobian=True) + expected_jac = _numeric_jacobian( + lambda groups: groups[0].inverse(get_jacobian=False), + [group], + ) + assert torch.allclose( + inverse_group.to_matrix(), inverse_result.to_matrix(), atol=EPS + ) + assert torch.allclose(jac, expected_jac[0]) + + +def check_adjoint(group, tangent_vector): + group_matrix = group.to_matrix() + tangent_left = group.__class__.adjoint(group) @ tangent_vector.unsqueeze(2) + tangent_right = group.__class__.vee( + group_matrix @ group.hat(tangent_vector) @ group.inverse().to_matrix() + ) + diff = tangent_left.squeeze(2) - tangent_right + print(diff.abs().max(dim=1)) + assert torch.allclose(tangent_left.squeeze(2), tangent_right, atol=EPS) + + +def check_copy(group): + group.name = "old" + new_group = group.copy(new_name="new") + assert group is not new_group + assert group.data is not new_group.data + assert torch.allclose(group.data, new_group.data) + assert new_group.name == "new" + new_group_no_name = group.copy() + assert new_group_no_name.name == f"{group.name}_copy" diff --git a/theseus/geometry/tests/test_lie_group.py b/theseus/geometry/tests/test_lie_group.py deleted file mode 100644 index d1d334049..000000000 --- a/theseus/geometry/tests/test_lie_group.py +++ /dev/null @@ -1,224 +0,0 @@ -import numpy as np -import pytest # noqa: F401 -import torch - -from theseus.constants import EPS -from theseus.geometry import SE2, SO2, Vector - -# Not sure if this is the right name for this test file, considering that Vector is -# also a LieGroup and has its own test file. Merging the two seems undesirable -# since Vector has a lot of additional operations. Maybe we can rename as -# test_lie_group_ops.py and move some of Vector tests here, correspondingly? - - -def _numeric_jacobian(function, group_args): - batch_size = group_args[0].shape[0] - dim = group_args[0].dim() - delta_mag = 1e-3 - - def _compute(group_idx): - jac = torch.zeros(batch_size, dim, dim, dtype=group_args[0].data.dtype) - for d in range(dim): - delta = torch.zeros(1, dim, dtype=group_args[group_idx].data.dtype) - delta[:, d] = delta_mag - - group_plus = group_args[group_idx].retract(delta) - group_minus = group_args[group_idx].retract(-delta) - group_plus_args = [g for g in group_args] - group_plus_args[group_idx] = group_plus - group_minus_args = [g for g in group_args] - group_minus_args[group_idx] = group_minus - - diff = function(group_minus_args).local(function(group_plus_args)) - jac[:, :, d] = diff / (2 * delta_mag) - return jac - - jacs = [] - for group_idx in range(len(group_args)): - jacs.append(_compute(group_idx)) - return jacs - - -def _check_exp_map(tangent_vector, group_cls): - group = group_cls.exp_map(tangent_vector) - assert torch.allclose( - group_cls.hat(tangent_vector).matrix_exp(), - group.to_matrix(), - atol=EPS, - ) - - -def _check_log_map(tangent_vector, group_cls): - assert torch.allclose( - tangent_vector, group_cls.exp_map(tangent_vector).log_map(), atol=EPS - ) - - -def _check_compose(group_1, group_2): - composition, (jac1, jac2) = group_1.compose(group_2, get_jacobians=True) - expected_matrix = group_1.to_matrix() @ group_2.to_matrix() - expected_jacs = _numeric_jacobian( - lambda groups: groups[0].compose(groups[1], get_jacobians=False), - [group_1, group_2], - ) - assert torch.allclose(composition.to_matrix(), expected_matrix, atol=EPS) - assert torch.allclose(jac1, expected_jacs[0]) - assert torch.allclose(jac2, expected_jacs[1]) - - -def _check_inverse(group): - tangent_vector = group.log_map() - inverse_group = group.exp_map(-tangent_vector) - inverse_result, jac = group.inverse(get_jacobian=True) - expected_jac = _numeric_jacobian( - lambda groups: groups[0].inverse(get_jacobian=False), - [group], - ) - assert torch.allclose( - inverse_group.to_matrix(), inverse_result.to_matrix(), atol=EPS - ) - assert torch.allclose(jac, expected_jac[0]) - - -def _check_adjoint(group, tangent_vector): - group_matrix = group.to_matrix() - tangent_left = group.__class__.adjoint(group) @ tangent_vector.unsqueeze(2) - tangent_right = group.__class__.vee( - group_matrix @ group.hat(tangent_vector) @ group.inverse().to_matrix() - ) - diff = tangent_left.squeeze(2) - tangent_right - print(diff.abs().max(dim=1)) - assert torch.allclose(tangent_left.squeeze(2), tangent_right, atol=EPS) - - -def _check_copy(group): - group.name = "old" - new_group = group.copy(new_name="new") - assert group is not new_group - assert group.data is not new_group.data - assert torch.allclose(group.data, new_group.data) - assert new_group.name == "new" - new_group_no_name = group.copy() - assert new_group_no_name.name == f"{group.name}_copy" - - -# ---------------------------------------------------------------------------- -# -------------------------------- SO2 tests --------------------------------- -# ---------------------------------------------------------------------------- -def _create_random_so2(batch_size, rng): - theta = torch.rand(batch_size, 1, generator=rng).double() * 2 * np.pi - np.pi - return SO2(theta=theta) - - -def test_exp_map_so2(): - for batch_size in [1, 20, 100]: - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) - _check_exp_map(theta, SO2) - - -def test_log_map_so2(): - for batch_size in [1, 2, 100]: - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) - _check_log_map(theta, SO2) - - -def test_compose_so2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - so2_1 = _create_random_so2(batch_size, rng) - so2_2 = _create_random_so2(batch_size, rng) - _check_compose(so2_1, so2_2) - - -def test_inverse_so2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - so2 = _create_random_so2(batch_size, rng) - _check_inverse(so2) - - -def test_rotate_so2(): - batch_size = 20 - rng = torch.Generator() - rng.manual_seed(0) - so2 = _create_random_so2(batch_size, rng) - vector = Vector(2, data=torch.randn(batch_size, 2).double()) - rotated_vector = so2.rotate(vector) - expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) - assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) - - -def test_adjoint_so2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - so2 = _create_random_so2(batch_size, rng) - tangent = torch.randn(batch_size, 1).double() - _check_adjoint(so2, tangent) - - -def test_copy_so2(): - rng = torch.Generator() - rng.manual_seed(0) - so2 = _create_random_so2(1, rng) - _check_copy(so2) - - -# ---------------------------------------------------------------------------- -# -------------------------------- SE2 tests --------------------------------- -# ---------------------------------------------------------------------------- -def _create_random_se2(batch_size, rng): - theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi - u = torch.randn(batch_size, 2) - tangent_vector = torch.cat([u, theta], dim=1) - return SE2.exp_map(tangent_vector.double()) - - -def test_exp_map_se2(): - for batch_size in [1, 20, 100]: - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) - u = torch.randn(batch_size, 2) - tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) - _check_exp_map(tangent_vector.double(), SE2) - - -def test_log_map_se2(): - for batch_size in [1, 20, 100]: - theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) - u = torch.randn(batch_size, 2) - tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) - _check_log_map(tangent_vector, SE2) - - -def test_compose_se2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - se2_1 = _create_random_se2(batch_size, rng) - se2_2 = _create_random_se2(batch_size, rng) - _check_compose(se2_1, se2_2) - - -def test_inverse_se2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - se2 = _create_random_se2(batch_size, rng) - _check_inverse(se2) - - -def test_adjoint_se2(): - rng = torch.Generator() - rng.manual_seed(0) - for batch_size in [1, 20, 100]: - se2 = _create_random_se2(batch_size, rng) - tangent = torch.randn(batch_size, 3).double() - _check_adjoint(se2, tangent) - - -def test_copy_se2(): - rng = torch.Generator() - se2 = _create_random_se2(1, rng) - _check_copy(se2) diff --git a/theseus/geometry/tests/test_se2.py b/theseus/geometry/tests/test_se2.py new file mode 100644 index 000000000..e6efdd085 --- /dev/null +++ b/theseus/geometry/tests/test_se2.py @@ -0,0 +1,69 @@ +import numpy as np +import pytest # noqa: F401 +import torch + +from theseus.geometry import SE2 + +from .common import ( + check_adjoint, + check_compose, + check_copy, + check_exp_map, + check_inverse, + check_log_map, +) + + +def _create_random_se2(batch_size, rng): + theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta], dim=1) + return SE2.exp_map(tangent_vector.double()) + + +def test_exp_map(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) + check_exp_map(tangent_vector.double(), SE2) + + +def test_log_map(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)) + u = torch.randn(batch_size, 2) + tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1) + check_log_map(tangent_vector, SE2) + + +def test_compose(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2_1 = _create_random_se2(batch_size, rng) + se2_2 = _create_random_se2(batch_size, rng) + check_compose(se2_1, se2_2) + + +def test_inverse(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2 = _create_random_se2(batch_size, rng) + check_inverse(se2) + + +def test_adjoint(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + se2 = _create_random_se2(batch_size, rng) + tangent = torch.randn(batch_size, 3).double() + check_adjoint(se2, tangent) + + +def test_copy(): + rng = torch.Generator() + se2 = _create_random_se2(1, rng) + check_copy(se2) diff --git a/theseus/geometry/tests/test_so2.py b/theseus/geometry/tests/test_so2.py new file mode 100644 index 000000000..bac997a55 --- /dev/null +++ b/theseus/geometry/tests/test_so2.py @@ -0,0 +1,76 @@ +import numpy as np +import pytest # noqa: F401 +import torch + +from theseus.constants import EPS +from theseus.geometry import SO2, Vector + +from .common import ( + check_adjoint, + check_compose, + check_copy, + check_exp_map, + check_inverse, + check_log_map, +) + + +def _create_random_so2(batch_size, rng): + theta = torch.rand(batch_size, 1, generator=rng).double() * 2 * np.pi - np.pi + return SO2(theta=theta) + + +def test_exp_map(): + for batch_size in [1, 20, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + check_exp_map(theta, SO2) + + +def test_log_map(): + for batch_size in [1, 2, 100]: + theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size)).view(-1, 1) + check_log_map(theta, SO2) + + +def test_compose(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + so2_1 = _create_random_so2(batch_size, rng) + so2_2 = _create_random_so2(batch_size, rng) + check_compose(so2_1, so2_2) + + +def test_inverse(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + so2 = _create_random_so2(batch_size, rng) + check_inverse(so2) + + +def test_rotate(): + batch_size = 20 + rng = torch.Generator() + rng.manual_seed(0) + so2 = _create_random_so2(batch_size, rng) + vector = Vector(2, data=torch.randn(batch_size, 2).double()) + rotated_vector = so2.rotate(vector) + expected_data = so2.to_matrix() @ vector.data.unsqueeze(2) + assert torch.allclose(expected_data.squeeze(2), rotated_vector.data, atol=EPS) + + +def test_adjoint(): + rng = torch.Generator() + rng.manual_seed(0) + for batch_size in [1, 20, 100]: + so2 = _create_random_so2(batch_size, rng) + tangent = torch.randn(batch_size, 1).double() + check_adjoint(so2, tangent) + + +def test_copy(): + rng = torch.Generator() + rng.manual_seed(0) + so2 = _create_random_so2(1, rng) + check_copy(so2) From 810aa86e1d4c6b7ae2a16524f3c39976c2d0153f Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 12:37:32 -0700 Subject: [PATCH 34/39] Renamed GaussianVariable as SquaredVariableDistance. --- setup.cfg | 3 +++ theseus/embodied/__init__.py | 4 ++-- theseus/embodied/misc/__init__.py | 2 +- ...ariable.py => squared_variable_distance.py} | 18 +++++++++--------- theseus/embodied/tests/test_factors.py | 10 ++++++---- 5 files changed, 21 insertions(+), 16 deletions(-) rename theseus/embodied/misc/{gaussian_variable.py => squared_variable_distance.py} (69%) diff --git a/setup.cfg b/setup.cfg index ff30743f5..b366f274a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,10 @@ max-line-length = 100 # E203: whitespace before ":", incompatible with black # W503: line break before binary operator (black also) +# F401: imported but unused ignore=E203, W503 +per-file-ignores = + *__init__.py:F401 [mypy] python_version = 3.7 diff --git a/theseus/embodied/__init__.py b/theseus/embodied/__init__.py index 2723e566d..a2e80284d 100644 --- a/theseus/embodied/__init__.py +++ b/theseus/embodied/__init__.py @@ -1,4 +1,4 @@ from .measurements import Between -from .misc import GaussianVariable +from .misc import SquaredVariableDistance -__all__ = ["Between", "GaussianVariable"] +__all__ = ["Between", "SquaredVariableDistance"] diff --git a/theseus/embodied/misc/__init__.py b/theseus/embodied/misc/__init__.py index 82fa961a2..99513d8de 100644 --- a/theseus/embodied/misc/__init__.py +++ b/theseus/embodied/misc/__init__.py @@ -1,3 +1,3 @@ -from .gaussian_variable import GaussianVariable +from .squared_variable_distance import SquaredVariableDistance __all__ = ["GaussianVariable"] diff --git a/theseus/embodied/misc/gaussian_variable.py b/theseus/embodied/misc/squared_variable_distance.py similarity index 69% rename from theseus/embodied/misc/gaussian_variable.py rename to theseus/embodied/misc/squared_variable_distance.py index 0682071d2..186dd3c9f 100644 --- a/theseus/embodied/misc/gaussian_variable.py +++ b/theseus/embodied/misc/squared_variable_distance.py @@ -6,24 +6,24 @@ from theseus.geometry import LieGroup -class GaussianVariable(Factor): +class SquaredVariableDistance(Factor): def __init__( self, var: LieGroup, precision: Precision, - prior: LieGroup, + target: LieGroup, name: Optional[str] = None, ): - if not isinstance(var, prior.__class__): + if not isinstance(var, target.__class__): raise ValueError( - "Prior variable type inconsistent with variables for the graph." + "Variable for the squared distance inconsistent with the given target." ) self.var = var - self.prior = prior + self.target = target super().__init__(precision, name=name) def error(self) -> torch.Tensor: - return self.prior.local(self.variables[0]) + return self.target.local(self.variables[0]) def jacobians(self) -> List[torch.Tensor]: return [torch.eye(self.dim()).to(self.variables[0].device)] @@ -46,7 +46,7 @@ def set_variable_at(self, variable: Variable, idx: int): else: raise ValueError("Index out of range.") - def _copy_impl(self, new_name: Optional[str] = None) -> "GaussianVariable": - return GaussianVariable( # type: ignore - self.var.copy(), self.precision.copy(), self.prior.copy(), name=new_name + def _copy_impl(self, new_name: Optional[str] = None) -> "SquaredVariableDistance": + return SquaredVariableDistance( # type: ignore + self.var.copy(), self.precision.copy(), self.target.copy(), name=new_name ) diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/tests/test_factors.py index 594982ca3..b89d892b3 100644 --- a/theseus/embodied/tests/test_factors.py +++ b/theseus/embodied/tests/test_factors.py @@ -11,14 +11,16 @@ def _check_var_copy(var1, var2): assert torch.allclose(var1.data, var2.data) -def test_copy_gaussian_variable(): +def test_copy_squared_variable_distance(): v0 = thgeom.SE2() - prior = thgeom.SE2() - factor = thembod.GaussianVariable(v0, thcore.ScalePrecision(1), prior, name="name") + target = thgeom.SE2() + factor = thembod.SquaredVariableDistance( + v0, thcore.ScalePrecision(1), target, name="name" + ) factor2 = factor.copy(new_name="new_name") assert factor is not factor2 _check_var_copy(factor2.var, v0) - _check_var_copy(factor2.prior, prior) + _check_var_copy(factor2.target, target) assert torch.allclose(factor.precision.data, factor2.precision.data) assert factor2.name == "new_name" From d4b77f7c31003f3ac2f9ddbea04d3672ef803ec3 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 13:00:29 -0700 Subject: [PATCH 35/39] Added a common method to check the copy method of a variable. --- theseus/core/tests/common.py | 20 ++++++++++++++++++++ theseus/core/tests/test_variable.py | 15 ++------------- theseus/embodied/tests/test_factors.py | 23 ++++++----------------- theseus/geometry/tests/common.py | 11 ----------- theseus/geometry/tests/test_se2.py | 4 ++-- theseus/geometry/tests/test_so2.py | 4 ++-- theseus/geometry/tests/test_vector.py | 9 ++------- 7 files changed, 34 insertions(+), 52 deletions(-) diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index bbbbe1fa3..d7de27c67 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -1,3 +1,5 @@ +import copy + import torch import theseus.core @@ -113,3 +115,21 @@ def create_graph_with_mock_factors(data=None, precision=NullPrecision()): graph.add(factor) return graph, factors, names, var_to_factors + + +def check_copy_var(var): + var.name = "old" + new_var = var.copy(new_name="new") + assert var is not new_var + assert var.data is not new_var.data + assert torch.allclose(var.data, new_var.data) + assert new_var.name == "new" + new_var_no_name = copy.deepcopy(var) + assert new_var_no_name.name == f"{var.name}_copy" + + +def check_another_var_is_copy(var, other_var): + assert isinstance(var, other_var.__class__) + assert var is not other_var + assert var.data is not other_var.data + assert torch.allclose(var.data, other_var.data) diff --git a/theseus/core/tests/test_variable.py b/theseus/core/tests/test_variable.py index 97e38358f..302c55323 100644 --- a/theseus/core/tests/test_variable.py +++ b/theseus/core/tests/test_variable.py @@ -1,28 +1,17 @@ -import copy - -import numpy as np import pytest # noqa: F401 import torch import theseus.core -from .common import MockVar +from .common import MockVar, check_copy_var def test_copy(): - def _check_copy(var, new_var, new_name): - np.testing.assert_almost_equal(var.data.numpy(), new_var.data.numpy()) - assert var is not new_var - assert var.data is not new_var.data - assert new_var.name == new_name - assert new_var.shape == var.shape - for i in range(100): size = torch.randint(low=1, high=21, size=(1,)).item() data = torch.rand(size=(1,) + (size,)) var = MockVar(size, data=data, name="var") - _check_copy(var, var.copy("new_name"), "new_name") - _check_copy(var, copy.deepcopy(var), "var_copy") + check_copy_var(var) def test_var_shape(): diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/tests/test_factors.py index b89d892b3..23b0f4285 100644 --- a/theseus/embodied/tests/test_factors.py +++ b/theseus/embodied/tests/test_factors.py @@ -3,12 +3,7 @@ import theseus.core as thcore import theseus.embodied as thembod import theseus.geometry as thgeom - - -def _check_var_copy(var1, var2): - assert var1 is not var2 - assert var1.data is not var2.data - assert torch.allclose(var1.data, var2.data) +from theseus.core.tests.common import check_another_var_is_copy def test_copy_squared_variable_distance(): @@ -19,8 +14,8 @@ def test_copy_squared_variable_distance(): ) factor2 = factor.copy(new_name="new_name") assert factor is not factor2 - _check_var_copy(factor2.var, v0) - _check_var_copy(factor2.target, target) + check_another_var_is_copy(factor2.var, v0) + check_another_var_is_copy(factor2.target, target) assert torch.allclose(factor.precision.data, factor2.precision.data) assert factor2.name == "new_name" @@ -30,16 +25,10 @@ def test_copy_between(): v1 = thgeom.SE2() meas = thgeom.SE2() factor = thembod.Between(v0, v1, thcore.ScalePrecision(1), meas, name="name") - - def _check_var_copy(var1, var2): - assert var1 is not var2 - assert var1.data is not var2.data - assert torch.allclose(var1.data, var2.data) - factor2 = factor.copy(new_name="new_name") assert factor is not factor2 - _check_var_copy(factor2.v0, v0) - _check_var_copy(factor2.v1, v1) - _check_var_copy(factor2.measurement, meas) + check_another_var_is_copy(factor2.v0, v0) + check_another_var_is_copy(factor2.v1, v1) + check_another_var_is_copy(factor2.measurement, meas) assert torch.allclose(factor.precision.data, factor2.precision.data) assert factor2.name == "new_name" diff --git a/theseus/geometry/tests/common.py b/theseus/geometry/tests/common.py index 4d8f6a429..9be6f0193 100644 --- a/theseus/geometry/tests/common.py +++ b/theseus/geometry/tests/common.py @@ -81,14 +81,3 @@ def check_adjoint(group, tangent_vector): diff = tangent_left.squeeze(2) - tangent_right print(diff.abs().max(dim=1)) assert torch.allclose(tangent_left.squeeze(2), tangent_right, atol=EPS) - - -def check_copy(group): - group.name = "old" - new_group = group.copy(new_name="new") - assert group is not new_group - assert group.data is not new_group.data - assert torch.allclose(group.data, new_group.data) - assert new_group.name == "new" - new_group_no_name = group.copy() - assert new_group_no_name.name == f"{group.name}_copy" diff --git a/theseus/geometry/tests/test_se2.py b/theseus/geometry/tests/test_se2.py index e6efdd085..e396bb754 100644 --- a/theseus/geometry/tests/test_se2.py +++ b/theseus/geometry/tests/test_se2.py @@ -2,12 +2,12 @@ import pytest # noqa: F401 import torch +from theseus.core.tests.common import check_copy_var from theseus.geometry import SE2 from .common import ( check_adjoint, check_compose, - check_copy, check_exp_map, check_inverse, check_log_map, @@ -66,4 +66,4 @@ def test_adjoint(): def test_copy(): rng = torch.Generator() se2 = _create_random_se2(1, rng) - check_copy(se2) + check_copy_var(se2) diff --git a/theseus/geometry/tests/test_so2.py b/theseus/geometry/tests/test_so2.py index bac997a55..c382152dd 100644 --- a/theseus/geometry/tests/test_so2.py +++ b/theseus/geometry/tests/test_so2.py @@ -3,12 +3,12 @@ import torch from theseus.constants import EPS +from theseus.core.tests.common import check_copy_var from theseus.geometry import SO2, Vector from .common import ( check_adjoint, check_compose, - check_copy, check_exp_map, check_inverse, check_log_map, @@ -73,4 +73,4 @@ def test_copy(): rng = torch.Generator() rng.manual_seed(0) so2 = _create_random_so2(1, rng) - check_copy(so2) + check_copy_var(so2) diff --git a/theseus/geometry/tests/test_vector.py b/theseus/geometry/tests/test_vector.py index 9da7b55cf..9cc0996b1 100644 --- a/theseus/geometry/tests/test_vector.py +++ b/theseus/geometry/tests/test_vector.py @@ -2,6 +2,7 @@ import pytest # noqa: F401 import torch +from theseus.core.tests.common import check_copy_var from theseus.core.variable import local, retract from theseus.geometry.vector import Vector @@ -193,10 +194,4 @@ def test_copy(): for j in range(1, 5): t1 = torch.rand(i, j) v = Vector(j, data=t1.clone(), name="v") - v_new = v.copy(new_name="v_new") - assert v_new is not v - assert v_new.data is not v.data - assert torch.allclose(v_new.data, v.data) - assert v_new.name == "v_new" - v_new_no_name = v.copy() - assert v_new_no_name.name == "v_copy" + check_copy_var(v) From 857335690e03ea648274074ad70264eb528fa2a7 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 20:33:04 -0700 Subject: [PATCH 36/39] Added tests for Between and SquaredDistance jacobians. --- .../misc/squared_variable_distance.py | 10 +++-- theseus/embodied/tests/test_factors.py | 44 +++++++++++++++++++ theseus/geometry/tests/common.py | 6 +-- theseus/geometry/tests/test_se2.py | 12 ++--- 4 files changed, 60 insertions(+), 12 deletions(-) diff --git a/theseus/embodied/misc/squared_variable_distance.py b/theseus/embodied/misc/squared_variable_distance.py index 186dd3c9f..b9cb55f6b 100644 --- a/theseus/embodied/misc/squared_variable_distance.py +++ b/theseus/embodied/misc/squared_variable_distance.py @@ -23,13 +23,17 @@ def __init__( super().__init__(precision, name=name) def error(self) -> torch.Tensor: - return self.target.local(self.variables[0]) + return self.target.local(self.var) def jacobians(self) -> List[torch.Tensor]: - return [torch.eye(self.dim()).to(self.variables[0].device)] + return [ + torch.eye(self.dim(), dtype=self.var.data.dtype) + .repeat(self.var.shape[0], 1, 1) + .to(self.var.device) + ] def dim(self) -> int: - return self.variables[0].dim() + return self.var.dim() def _get_variables_impl(self) -> List[Variable]: return [self.var] diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/tests/test_factors.py index 23b0f4285..a9af751d0 100644 --- a/theseus/embodied/tests/test_factors.py +++ b/theseus/embodied/tests/test_factors.py @@ -4,6 +4,8 @@ import theseus.embodied as thembod import theseus.geometry as thgeom from theseus.core.tests.common import check_another_var_is_copy +from theseus.geometry.tests.common import numeric_jacobian +from theseus.geometry.tests.test_se2 import create_random_se2 def test_copy_squared_variable_distance(): @@ -20,6 +22,26 @@ def test_copy_squared_variable_distance(): assert factor2.name == "new_name" +def test_jacobian_squared_variable_distance(): + rng = torch.Generator() + rng.manual_seed(0) + precision = thcore.ScalePrecision(1) + for batch_size in [1, 10, 100]: + v0 = create_random_se2(batch_size, rng) + target = create_random_se2(batch_size, rng) + factor = thembod.SquaredVariableDistance(v0, precision, target, name="factor") + + def new_error_fn(groups): + new_factor = thembod.SquaredVariableDistance( + groups[0], precision, target, name="new_factor" + ) + return new_factor.target.retract(new_factor.error()) + + expected_jacs = numeric_jacobian(new_error_fn, [v0]) + jacobians = factor.jacobians() + assert torch.allclose(jacobians[0], expected_jacs[0], atol=1e-8) + + def test_copy_between(): v0 = thgeom.SE2() v1 = thgeom.SE2() @@ -32,3 +54,25 @@ def test_copy_between(): check_another_var_is_copy(factor2.measurement, meas) assert torch.allclose(factor.precision.data, factor2.precision.data) assert factor2.name == "new_name" + + +def test_jacobian_between(): + rng = torch.Generator() + rng.manual_seed(0) + precision = thcore.ScalePrecision(1) + for batch_size in [1, 10, 100]: + v0 = create_random_se2(batch_size, rng) + v1 = create_random_se2(batch_size, rng) + measurement = create_random_se2(batch_size, rng) + factor = thembod.Between(v0, v1, precision, measurement, name="factor") + + def new_error_fn(groups): + new_factor = thembod.Between( + groups[0], groups[1], precision, measurement, name="new_factor" + ) + return new_factor.measurement.retract(new_factor.error()) + + expected_jacs = numeric_jacobian(new_error_fn, [v0, v1]) + jacobians = factor.jacobians() + assert torch.allclose(jacobians[0], expected_jacs[0], atol=1e-8) + assert torch.allclose(jacobians[1], expected_jacs[1], atol=1e-8) diff --git a/theseus/geometry/tests/common.py b/theseus/geometry/tests/common.py index 9be6f0193..80f5bf48b 100644 --- a/theseus/geometry/tests/common.py +++ b/theseus/geometry/tests/common.py @@ -3,7 +3,7 @@ from theseus.constants import EPS -def _numeric_jacobian(function, group_args): +def numeric_jacobian(function, group_args): batch_size = group_args[0].shape[0] dim = group_args[0].dim() delta_mag = 1e-3 @@ -49,7 +49,7 @@ def check_log_map(tangent_vector, group_cls): def check_compose(group_1, group_2): composition, (jac1, jac2) = group_1.compose(group_2, get_jacobians=True) expected_matrix = group_1.to_matrix() @ group_2.to_matrix() - expected_jacs = _numeric_jacobian( + expected_jacs = numeric_jacobian( lambda groups: groups[0].compose(groups[1], get_jacobians=False), [group_1, group_2], ) @@ -62,7 +62,7 @@ def check_inverse(group): tangent_vector = group.log_map() inverse_group = group.exp_map(-tangent_vector) inverse_result, jac = group.inverse(get_jacobian=True) - expected_jac = _numeric_jacobian( + expected_jac = numeric_jacobian( lambda groups: groups[0].inverse(get_jacobian=False), [group], ) diff --git a/theseus/geometry/tests/test_se2.py b/theseus/geometry/tests/test_se2.py index e396bb754..3bd7a8295 100644 --- a/theseus/geometry/tests/test_se2.py +++ b/theseus/geometry/tests/test_se2.py @@ -14,7 +14,7 @@ ) -def _create_random_se2(batch_size, rng): +def create_random_se2(batch_size, rng): theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi u = torch.randn(batch_size, 2) tangent_vector = torch.cat([u, theta], dim=1) @@ -41,8 +41,8 @@ def test_compose(): rng = torch.Generator() rng.manual_seed(0) for batch_size in [1, 20, 100]: - se2_1 = _create_random_se2(batch_size, rng) - se2_2 = _create_random_se2(batch_size, rng) + se2_1 = create_random_se2(batch_size, rng) + se2_2 = create_random_se2(batch_size, rng) check_compose(se2_1, se2_2) @@ -50,7 +50,7 @@ def test_inverse(): rng = torch.Generator() rng.manual_seed(0) for batch_size in [1, 20, 100]: - se2 = _create_random_se2(batch_size, rng) + se2 = create_random_se2(batch_size, rng) check_inverse(se2) @@ -58,12 +58,12 @@ def test_adjoint(): rng = torch.Generator() rng.manual_seed(0) for batch_size in [1, 20, 100]: - se2 = _create_random_se2(batch_size, rng) + se2 = create_random_se2(batch_size, rng) tangent = torch.randn(batch_size, 3).double() check_adjoint(se2, tangent) def test_copy(): rng = torch.Generator() - se2 = _create_random_se2(1, rng) + se2 = create_random_se2(1, rng) check_copy_var(se2) From 25a960722dcc6526d13e1c2e4504c5c7cfcd9598 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 20:36:44 -0700 Subject: [PATCH 37/39] Renamed SquareVariableDistance as SquaredDistance --- theseus/embodied/__init__.py | 4 ++-- theseus/embodied/misc/__init__.py | 2 +- ...ared_variable_distance.py => squared_distance.py} | 6 +++--- theseus/embodied/tests/test_factors.py | 12 +++++------- 4 files changed, 11 insertions(+), 13 deletions(-) rename theseus/embodied/misc/{squared_variable_distance.py => squared_distance.py} (93%) diff --git a/theseus/embodied/__init__.py b/theseus/embodied/__init__.py index a2e80284d..9a2d646f4 100644 --- a/theseus/embodied/__init__.py +++ b/theseus/embodied/__init__.py @@ -1,4 +1,4 @@ from .measurements import Between -from .misc import SquaredVariableDistance +from .misc import SquaredDistance -__all__ = ["Between", "SquaredVariableDistance"] +__all__ = ["Between", "SquaredDistance"] diff --git a/theseus/embodied/misc/__init__.py b/theseus/embodied/misc/__init__.py index 99513d8de..ccbe58967 100644 --- a/theseus/embodied/misc/__init__.py +++ b/theseus/embodied/misc/__init__.py @@ -1,3 +1,3 @@ -from .squared_variable_distance import SquaredVariableDistance +from .squared_distance import SquaredDistance __all__ = ["GaussianVariable"] diff --git a/theseus/embodied/misc/squared_variable_distance.py b/theseus/embodied/misc/squared_distance.py similarity index 93% rename from theseus/embodied/misc/squared_variable_distance.py rename to theseus/embodied/misc/squared_distance.py index b9cb55f6b..54683d108 100644 --- a/theseus/embodied/misc/squared_variable_distance.py +++ b/theseus/embodied/misc/squared_distance.py @@ -6,7 +6,7 @@ from theseus.geometry import LieGroup -class SquaredVariableDistance(Factor): +class SquaredDistance(Factor): def __init__( self, var: LieGroup, @@ -50,7 +50,7 @@ def set_variable_at(self, variable: Variable, idx: int): else: raise ValueError("Index out of range.") - def _copy_impl(self, new_name: Optional[str] = None) -> "SquaredVariableDistance": - return SquaredVariableDistance( # type: ignore + def _copy_impl(self, new_name: Optional[str] = None) -> "SquaredDistance": + return SquaredDistance( # type: ignore self.var.copy(), self.precision.copy(), self.target.copy(), name=new_name ) diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/tests/test_factors.py index a9af751d0..83d7d9bde 100644 --- a/theseus/embodied/tests/test_factors.py +++ b/theseus/embodied/tests/test_factors.py @@ -8,12 +8,10 @@ from theseus.geometry.tests.test_se2 import create_random_se2 -def test_copy_squared_variable_distance(): +def test_copy_squared_distance(): v0 = thgeom.SE2() target = thgeom.SE2() - factor = thembod.SquaredVariableDistance( - v0, thcore.ScalePrecision(1), target, name="name" - ) + factor = thembod.SquaredDistance(v0, thcore.ScalePrecision(1), target, name="name") factor2 = factor.copy(new_name="new_name") assert factor is not factor2 check_another_var_is_copy(factor2.var, v0) @@ -22,17 +20,17 @@ def test_copy_squared_variable_distance(): assert factor2.name == "new_name" -def test_jacobian_squared_variable_distance(): +def test_jacobian_squared_distance(): rng = torch.Generator() rng.manual_seed(0) precision = thcore.ScalePrecision(1) for batch_size in [1, 10, 100]: v0 = create_random_se2(batch_size, rng) target = create_random_se2(batch_size, rng) - factor = thembod.SquaredVariableDistance(v0, precision, target, name="factor") + factor = thembod.SquaredDistance(v0, precision, target, name="factor") def new_error_fn(groups): - new_factor = thembod.SquaredVariableDistance( + new_factor = thembod.SquaredDistance( groups[0], precision, target, name="new_factor" ) return new_factor.target.retract(new_factor.error()) From 236f3a8e80bb96ca73f9d9d6bb4c43471b0fef41 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Mon, 14 Jun 2021 20:43:47 -0700 Subject: [PATCH 38/39] Moved embodied factors tests each to their own test files. --- .../tests/test_between.py} | 32 --------------- .../misc/tests/test_squared_distance.py | 40 +++++++++++++++++++ 2 files changed, 40 insertions(+), 32 deletions(-) rename theseus/embodied/{tests/test_factors.py => measurements/tests/test_between.py} (58%) create mode 100644 theseus/embodied/misc/tests/test_squared_distance.py diff --git a/theseus/embodied/tests/test_factors.py b/theseus/embodied/measurements/tests/test_between.py similarity index 58% rename from theseus/embodied/tests/test_factors.py rename to theseus/embodied/measurements/tests/test_between.py index 83d7d9bde..09eb74683 100644 --- a/theseus/embodied/tests/test_factors.py +++ b/theseus/embodied/measurements/tests/test_between.py @@ -8,38 +8,6 @@ from theseus.geometry.tests.test_se2 import create_random_se2 -def test_copy_squared_distance(): - v0 = thgeom.SE2() - target = thgeom.SE2() - factor = thembod.SquaredDistance(v0, thcore.ScalePrecision(1), target, name="name") - factor2 = factor.copy(new_name="new_name") - assert factor is not factor2 - check_another_var_is_copy(factor2.var, v0) - check_another_var_is_copy(factor2.target, target) - assert torch.allclose(factor.precision.data, factor2.precision.data) - assert factor2.name == "new_name" - - -def test_jacobian_squared_distance(): - rng = torch.Generator() - rng.manual_seed(0) - precision = thcore.ScalePrecision(1) - for batch_size in [1, 10, 100]: - v0 = create_random_se2(batch_size, rng) - target = create_random_se2(batch_size, rng) - factor = thembod.SquaredDistance(v0, precision, target, name="factor") - - def new_error_fn(groups): - new_factor = thembod.SquaredDistance( - groups[0], precision, target, name="new_factor" - ) - return new_factor.target.retract(new_factor.error()) - - expected_jacs = numeric_jacobian(new_error_fn, [v0]) - jacobians = factor.jacobians() - assert torch.allclose(jacobians[0], expected_jacs[0], atol=1e-8) - - def test_copy_between(): v0 = thgeom.SE2() v1 = thgeom.SE2() diff --git a/theseus/embodied/misc/tests/test_squared_distance.py b/theseus/embodied/misc/tests/test_squared_distance.py new file mode 100644 index 000000000..9209e6756 --- /dev/null +++ b/theseus/embodied/misc/tests/test_squared_distance.py @@ -0,0 +1,40 @@ +import torch + +import theseus.core as thcore +import theseus.embodied as thembod +import theseus.geometry as thgeom +from theseus.core.tests.common import check_another_var_is_copy +from theseus.geometry.tests.common import numeric_jacobian +from theseus.geometry.tests.test_se2 import create_random_se2 + + +def test_copy_squared_distance(): + v0 = thgeom.SE2() + target = thgeom.SE2() + factor = thembod.SquaredDistance(v0, thcore.ScalePrecision(1), target, name="name") + factor2 = factor.copy(new_name="new_name") + assert factor is not factor2 + check_another_var_is_copy(factor2.var, v0) + check_another_var_is_copy(factor2.target, target) + assert torch.allclose(factor.precision.data, factor2.precision.data) + assert factor2.name == "new_name" + + +def test_jacobian_squared_distance(): + rng = torch.Generator() + rng.manual_seed(0) + precision = thcore.ScalePrecision(1) + for batch_size in [1, 10, 100]: + v0 = create_random_se2(batch_size, rng) + target = create_random_se2(batch_size, rng) + factor = thembod.SquaredDistance(v0, precision, target, name="factor") + + def new_error_fn(groups): + new_factor = thembod.SquaredDistance( + groups[0], precision, target, name="new_factor" + ) + return new_factor.target.retract(new_factor.error()) + + expected_jacs = numeric_jacobian(new_error_fn, [v0]) + jacobians = factor.jacobians() + assert torch.allclose(jacobians[0], expected_jacs[0], atol=1e-8) From 2a6bf231d0da4669915a560dc03a7e65bb7ab1a5 Mon Sep 17 00:00:00 2001 From: Luis Pineda Date: Thu, 17 Jun 2021 04:31:46 -0700 Subject: [PATCH 39/39] Fixed bug in Scale and Diagonal precisions weighting of jacobians, and added unit test. --- theseus/core/precision.py | 14 ++++++----- theseus/core/tests/common.py | 2 +- theseus/core/tests/test_precision.py | 35 +++++++++++++++++++++++++--- 3 files changed, 41 insertions(+), 10 deletions(-) diff --git a/theseus/core/precision.py b/theseus/core/precision.py index e034a1862..8c5c30f69 100644 --- a/theseus/core/precision.py +++ b/theseus/core/precision.py @@ -107,10 +107,11 @@ def weight_jacobians_and_error( jacobians: List[torch.Tensor], error: torch.Tensor, ) -> Tuple[List[torch.Tensor], torch.Tensor]: - error.mul_(self.data) + error = error * self.data + new_jacobians = [] for jac in jacobians: - jac = jac * self.data - return jacobians, error + new_jacobians.append(jac * self.data) + return new_jacobians, error def _copy_impl(self) -> "ScalePrecision": new_prec = ScalePrecision( @@ -152,12 +153,13 @@ def weight_jacobians_and_error( jacobians: List[torch.Tensor], error: torch.Tensor, ) -> Tuple[List[torch.Tensor], torch.Tensor]: - error.mul_(self.data) + error = error * self.data + new_jacobians = [] for jac in jacobians: # Jacobian is batch_size x factor_dim x var_dim # This left multiplies the weights (inv cov.) to jacobian - jac = jac * self.data.view(1, -1, 1) - return jacobians, error + new_jacobians.append(jac * self.data.view(1, -1, 1)) + return new_jacobians, error def _copy_impl(self) -> "DiagonalPrecision": return DiagonalPrecision( diff --git a/theseus/core/tests/common.py b/theseus/core/tests/common.py index d7de27c67..de67d20c1 100644 --- a/theseus/core/tests/common.py +++ b/theseus/core/tests/common.py @@ -72,7 +72,7 @@ def error(self): return mu * torch.ones(self._dim) def jacobians(self): - pass + return [self.error()] * len(self.variables) def dim(self) -> int: return self._dim diff --git a/theseus/core/tests/test_precision.py b/theseus/core/tests/test_precision.py index edf13dc7a..caa9cd568 100644 --- a/theseus/core/tests/test_precision.py +++ b/theseus/core/tests/test_precision.py @@ -5,7 +5,7 @@ import theseus.core as thcore -from .common import MockPrecision +from .common import MockPrecision, create_mock_factors def test_cov_shape(): @@ -34,13 +34,42 @@ def test_copy(): def test_copy_scale_precision(): for learnable in [True, False]: - p1 = thcore.precision.ScalePrecision(2, learnable=learnable) + p1 = thcore.ScalePrecision(2, learnable=learnable) _check_copy(p1, p1.copy()) _check_copy(p1, copy.deepcopy(p1)) def test_copy_diagonal_precision(): for learnable in [True, False]: - p1 = thcore.precision.DiagonalPrecision([1, 2, 3], learnable=learnable) + p1 = thcore.DiagonalPrecision([1, 2, 3], learnable=learnable) _check_copy(p1, p1.copy()) _check_copy(p1, copy.deepcopy(p1)) + + +def test_weight_error_scale_and_diagonal_precision(): + def _check(f1_, f2_, scale_): + j1, e1 = f1_.weighted_jacobians_error() + j2, e2 = f2_.weighted_jacobians_error() + assert torch.allclose(e1 * scale_, e2) + for i in range(len(j1)): + assert torch.allclose(j1[i] * scale_, j2[i]) + + for _ in range(10): + scale = torch.rand(1) + factors, *_ = create_mock_factors( + data=torch.ones(1, 10), precision=thcore.ScalePrecision(1.0) + ) + factors_2x, *_ = create_mock_factors( + data=torch.ones(1, 10), precision=thcore.ScalePrecision(scale) + ) + for f1, f2 in zip(factors, factors_2x): + _check(f1, f2, scale) + + factors, *_ = create_mock_factors( + data=torch.ones(1, 10), precision=thcore.DiagonalPrecision([1.0, 1.0]) + ) + factors_2x, *_ = create_mock_factors( + data=torch.ones(1, 10), precision=thcore.DiagonalPrecision([scale, scale]) + ) + for f1, f2 in zip(factors, factors_2x): + _check(f1, f2, scale)