Skip to content

Commit

Permalink
Merge pull request #41 from fairinternal/lep.fix_case
Browse files Browse the repository at this point in the history
Renamed methods named with camelCase to use lower_case_underscore
  • Loading branch information
luisenp authored May 17, 2021
2 parents 66d3629 + f5cff90 commit 4939b68
Show file tree
Hide file tree
Showing 10 changed files with 29 additions and 29 deletions.
8 changes: 4 additions & 4 deletions theseus/core/factor.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,16 @@ def jacobians(self) -> List[torch.Tensor]:
def dim(self) -> int:
pass

def weightedError(self) -> torch.Tensor:
def weighted_error(self) -> torch.Tensor:
error = self.error()
return self.precision.weightError(error)
return self.precision.weight_error(error)

def weightedJacobiansError(
def weighted_jacobians_error(
self,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
err = self.error()
jacobian = self.jacobians()
return self.precision.weightJacobiansAndError(jacobian, err)
return self.precision.weight_jacobians_and_error(jacobian, err)

def __len__(self):
return len(self.variables)
Expand Down
4 changes: 2 additions & 2 deletions theseus/core/factor_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,11 +143,11 @@ def error(self) -> torch.Tensor:
error_vector = torch.zeros(self.batch_size, self.dim())
pos = 0
for factor in self.factors.values():
error_vector[:, pos : pos + factor.dim()] = factor.weightedError()
error_vector[:, pos : pos + factor.dim()] = factor.weighted_error()
pos += factor.dim()
return error_vector

def errorSquaredNorm(self) -> torch.Tensor:
def error_squared_norm(self) -> torch.Tensor:
return (self.error() ** 2).sum(dim=1)

def copy(self) -> "FactorGraph":
Expand Down
14 changes: 7 additions & 7 deletions theseus/core/precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
# Abstract class for representing precision functions (inverse covariance)
# (equivalent to LossFunction in minisam).
# Concrete classes must implement two methods:
# - `weightError`: return an error tensor weighted by the precision
# - `weight_error`: return an error tensor weighted by the precision
# - `weightJacobiansError`: returns jacobians an errors weighted by the precision
class Precision(abc.ABC):
def __init__(
Expand Down Expand Up @@ -44,11 +44,11 @@ def shape(self) -> torch.Size:
return self.data.shape

@abc.abstractmethod
def weightError(self, error: torch.Tensor) -> torch.Tensor:
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
pass

@abc.abstractmethod
def weightJacobiansAndError(
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
Expand Down Expand Up @@ -96,11 +96,11 @@ def __init__(self, scale: float, learnable: bool = False):
def _init_data(self, scale: float): # type: ignore
self.data = torch.tensor(scale)

def weightError(self, error: torch.Tensor) -> torch.Tensor:
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
error.mul_(self.data)
return error

def weightJacobiansAndError(
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
Expand Down Expand Up @@ -135,11 +135,11 @@ def _init_data(self, diagonal: float): # type: ignore
if self.data.ndim != 1:
raise ValueError("DiagonalPrecision only accepts arrays of dim. 1.")

def weightError(self, error: torch.Tensor) -> torch.Tensor:
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
error.mul_(self.data)
return error

def weightJacobiansAndError(
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
Expand Down
8 changes: 4 additions & 4 deletions theseus/core/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ def __init__(self, the_data):
def _init_data(self, the_data):
self.data = the_data

def weightError(self, error):
def weight_error(self, error):
pass

def weightJacobiansAndError(self, jacobians, error):
def weight_jacobians_and_error(self, jacobians, error):
pass


Expand All @@ -43,10 +43,10 @@ def __init__(self):
def _init_data(self):
pass

def weightError(self, error):
def weight_error(self, error):
return error

def weightJacobiansAndError(self, jacobians, error):
def weight_jacobians_and_error(self, jacobians, error):
return jacobians, error


Expand Down
2 changes: 1 addition & 1 deletion theseus/core/tests/test_factor_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def test_factor_graph_error():

# Test the squared error function
squared_error = np.sum(expected_graph_error ** 2)
np.testing.assert_almost_equal(graph.errorSquaredNorm().numpy(), squared_error)
np.testing.assert_almost_equal(graph.error_squared_norm().numpy(), squared_error)


def test_get_factors_connected_to_var():
Expand Down
2 changes: 1 addition & 1 deletion theseus/optimizer/dense_linearization.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def _linearize_jacobian_impl(self):
(self.graph.batch_size, self.num_rows), device=self.graph.device
)
for factor in self.graph:
jacobians, error = factor.weightedJacobiansError()
jacobians, error = factor.weighted_jacobians_error()
num_rows = factor.dim()
for var_idx_in_factor, var_jacobian in enumerate(jacobians):
var_idx_in_order = self.ordering.index_of(
Expand Down
4 changes: 2 additions & 2 deletions theseus/optimizer/nonlinear/nonlinear_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def _update_info(self, info: NonlinearOptimizerInfo):
# of the best variables found, but it is optional to avoid unnecessary copying
# if this is not needed
def optimize(self, track_best_solution: bool = False) -> NonlinearOptimizerInfo:
last_err = self.graph.errorSquaredNorm() / 2
last_err = self.graph.error_squared_norm() / 2
self._err_increase_idx = torch.zeros_like(last_err).bool()
info = NonlinearOptimizerInfo(
best_solution=self._maybe_init_best_solution(do_init=track_best_solution),
Expand All @@ -110,7 +110,7 @@ def optimize(self, track_best_solution: bool = False) -> NonlinearOptimizerInfo:
return info
# check for convergence
with torch.no_grad():
err = self.graph.errorSquaredNorm() / 2
err = self.graph.error_squared_norm() / 2
converged_indices = self._check_convergence(err, last_err)
self._err_increase_idx |= err > last_err
converged_indices &= ~self._err_increase_idx
Expand Down
4 changes: 2 additions & 2 deletions theseus/optimizer/nonlinear/tests/test_gauss_newton.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ def test_error_increase_info():
assert not info.best_solution[var_name].allclose(
graph.get_variable(var_name).data
)
current_error = graph.errorSquaredNorm()
current_error = graph.error_squared_norm()
graph.update(info.best_solution)
best_error = graph.errorSquaredNorm()
best_error = graph.error_squared_norm()
assert (best_error <= current_error).all()
assert (best_error < current_error).any()

Expand Down
4 changes: 2 additions & 2 deletions theseus/optimizer/tests/test_dense_linearization.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ def __init__(self, dim, mult, name=None):
def _init_data(self, dim, mult):
self.data = torch.eye(dim).unsqueeze(0) * (mult ** 2)

def weightError(self, error):
def weight_error(self, error):
return NotImplemented

def weightJacobiansAndError(self, jacobians, error):
def weight_jacobians_and_error(self, jacobians, error):
batch_size = error.shape[0]
sqrt = self.sqrt.repeat((batch_size, 1, 1))
wjs = []
Expand Down
8 changes: 4 additions & 4 deletions theseus/tests/test_theseus_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ def _init_data(self, mlp):
def update(self, input_batch):
self.input_batch = input_batch

def weightError(self, error):
def weight_error(self, error):
weights = self.nn(self.input_batch)
error.mul_(weights)
return error

def weightJacobiansAndError(self, jacobians, error):
def weight_jacobians_and_error(self, jacobians, error):
weights = self.nn(self.input_batch)
error.mul_(weights)
for jac in jacobians:
Expand Down Expand Up @@ -73,12 +73,12 @@ def _init_data(self, diagonal):
else:
self.data = torch.tensor(diagonal)

def weightError(self, error):
def weight_error(self, error):
weights = F.softmax(self.data, dim=0)
error.mul_(weights)
return error

def weightJacobiansAndError(self, jacobians, error):
def weight_jacobians_and_error(self, jacobians, error):
weights = F.softmax(self.data, dim=0)
error.mul_(weights)
for jac in jacobians:
Expand Down

0 comments on commit 4939b68

Please sign in to comment.