Skip to content

Commit

Permalink
More data -> tensor renaming (#230)
Browse files Browse the repository at this point in the history
* Renamed Vectorize containers and methods using data->tensor convention.

* Renamed _init_data as _init_tensor for Variable subclasses.

* Renamed input_data as input_tensors in Objective's forward and update methods.

* Renamed _data_check as _check_tensor.

* Rename normalize(data) as normalize(tensor) for Manifold classes.

* Renamed variable in DLM forward for clarity.
  • Loading branch information
luisenp authored Jul 12, 2022
1 parent 5ffb436 commit 9b79b6d
Show file tree
Hide file tree
Showing 20 changed files with 195 additions and 193 deletions.
2 changes: 1 addition & 1 deletion examples/bundle_adjustment.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def run(cfg: omegaconf.OmegaConf, results_path: pathlib.Path):
theseus_inputs["log_loss_radius"] = loss_radius_tensor.unsqueeze(1).clone()

theseus_outputs, info = theseus_optim.forward(
input_data=theseus_inputs,
input_tensors=theseus_inputs,
optimizer_kwargs={
"verbose": cfg.inner_optim.verbose,
"track_err_history": cfg.inner_optim.track_err_history,
Expand Down
2 changes: 1 addition & 1 deletion examples/pose_graph/pose_graph_cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def run_batch(batch_idx: int):
log.info(f" ------------------- Batch {batch_idx} ------------------- ")
pg_batch = pg.get_batch_dataset(batch_idx=batch_idx)
theseus_inputs = get_batch_data(pg_batch, pose_indices)
objective.update(input_data=theseus_inputs)
objective.update(input_tensors=theseus_inputs)

start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
Expand Down
2 changes: 1 addition & 1 deletion examples/pose_graph/pose_graph_synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def run_batch(batch_idx: int):
torch.cuda.reset_peak_memory_stats()
pr.enable()
theseus_outputs, _ = theseus_optim.forward(
input_data=theseus_inputs,
input_tensors=theseus_inputs,
optimizer_kwargs={
"verbose": cfg.inner_optim.verbose,
"track_err_history": cfg.inner_optim.track_err_history,
Expand Down
42 changes: 21 additions & 21 deletions theseus/core/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,32 +359,32 @@ def size_aux_vars(self) -> int:

def error(
self,
input_data: Optional[Dict[str, torch.Tensor]] = None,
input_tensors: Optional[Dict[str, torch.Tensor]] = None,
also_update: bool = False,
) -> torch.Tensor:
old_data = {}
if input_data is not None:
old_tensors = {}
if input_tensors is not None:
if not also_update:
for var in self.optim_vars:
old_data[var] = self.optim_vars[var].tensor
self.update(input_data=input_data)
old_tensors[var] = self.optim_vars[var].tensor
self.update(input_tensors=input_tensors)

error_vector = torch.cat(
[cf.weighted_error() for cf in self._get_iterator()], dim=1
)

if input_data is not None and not also_update:
self.update(old_data)
if input_tensors is not None and not also_update:
self.update(old_tensors)
return error_vector

def error_squared_norm(
self,
input_data: Optional[Dict[str, torch.Tensor]] = None,
input_tensors: Optional[Dict[str, torch.Tensor]] = None,
also_update: bool = False,
) -> torch.Tensor:
return (self.error(input_data=input_data, also_update=also_update) ** 2).sum(
dim=1
)
return (
self.error(input_tensors=input_tensors, also_update=also_update) ** 2
).sum(dim=1)

def copy(self) -> "Objective":
new_objective = Objective(dtype=self.dtype)
Expand Down Expand Up @@ -442,7 +442,7 @@ def __deepcopy__(self, memo):
memo[id(self)] = the_copy
return the_copy

def update(self, input_data: Optional[Dict[str, torch.Tensor]] = None):
def update(self, input_tensors: Optional[Dict[str, torch.Tensor]] = None):
self._batch_size = None

def _get_batch_size(batch_sizes: Sequence[int]) -> int:
Expand All @@ -456,20 +456,20 @@ def _get_batch_size(batch_sizes: Sequence[int]) -> int:
return max_bs
raise ValueError("Provided tensors must be broadcastable.")

input_data = input_data or {}
for var_name, data in input_data.items():
if data.ndim < 2:
input_tensors = input_tensors or {}
for var_name, tensor in input_tensors.items():
if tensor.ndim < 2:
raise ValueError(
f"Input data tensors must have a batch dimension and "
f"one ore more data dimensions, but data.ndim={data.ndim} for "
f"Input tensors must have a batch dimension and "
f"one ore more data dimensions, but tensor.ndim={tensor.ndim} for "
f"tensor with name {var_name}."
)
if var_name in self.optim_vars:
self.optim_vars[var_name].update(data)
self.optim_vars[var_name].update(tensor)
elif var_name in self.aux_vars:
self.aux_vars[var_name].update(data)
self.aux_vars[var_name].update(tensor)
elif var_name in self.cost_weight_optim_vars:
self.cost_weight_optim_vars[var_name].update(data)
self.cost_weight_optim_vars[var_name].update(tensor)
warnings.warn(
"Updated a variable declared as optimization, but it is "
"only associated to cost weights and not to any cost functions. "
Expand All @@ -482,7 +482,7 @@ def _get_batch_size(batch_sizes: Sequence[int]) -> int:
"which is not associated to any variable in the objective."
)

# Check that the batch size of all data is consistent after update
# Check that the batch size of all tensors is consistent after update
batch_sizes = [v.tensor.shape[0] for v in self.optim_vars.values()]
batch_sizes.extend([v.tensor.shape[0] for v in self.aux_vars.values()])
self._batch_size = _get_batch_size(batch_sizes)
Expand Down
10 changes: 5 additions & 5 deletions theseus/core/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,16 @@ def __init__(self, length, tensor=None, name=None):
super().__init__(length, tensor=tensor, name=name)

@staticmethod
def _init_data(length):
def _init_tensor(length):
return torch.empty(1, length)

@staticmethod
def _data_check_impl(data: torch.Tensor) -> bool:
def _check_tensor_impl(tensor: torch.Tensor) -> bool:
return True

@staticmethod
def normalize(data: torch.Tensor) -> torch.Tensor:
return data
def normalize(tensor: torch.Tensor) -> torch.Tensor:
return tensor

def dof(self):
return 0
Expand Down Expand Up @@ -80,7 +80,7 @@ class NullCostWeight(th.CostWeight):
def __init__(self):
super().__init__(name="null_cost_weight")

def _init_data(self):
def _init_tensor(self):
pass

def weight_error(self, error):
Expand Down
8 changes: 4 additions & 4 deletions theseus/core/tests/test_manifold.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,16 @@ def __init__(self, tensor=None, name=None):
super().__init__(tensor=tensor, name=name)

@staticmethod
def _init_data():
def _init_tensor():
return torch.ones(1, 1)

@staticmethod
def _data_check_impl(data: torch.Tensor) -> bool:
def _check_tensor_impl(tensor: torch.Tensor) -> bool:
return True

@staticmethod
def normalize(data: torch.Tensor) -> torch.Tensor:
return data
def normalize(tensor: torch.Tensor) -> torch.Tensor:
return tensor

def dof(self):
return 0
Expand Down
72 changes: 36 additions & 36 deletions theseus/core/tests/test_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,25 +248,25 @@ def _check_error_for_data(v1_data_, v2_data_, error_, error_type):
else:
assert error_.allclose(expected_error.norm(dim=1) ** 2)

def _check_variables(objective, input_data, v1_data, v2_data, also_update):
def _check_variables(objective, input_tensors, v1_data, v2_data, also_update):

if also_update:
assert objective.optim_vars["v1"].tensor is input_data["v1"]
assert objective.optim_vars["v2"].tensor is input_data["v2"]
assert objective.optim_vars["v1"].tensor is input_tensors["v1"]
assert objective.optim_vars["v2"].tensor is input_tensors["v2"]
else:
assert objective.optim_vars["v1"].tensor is not input_data["v1"]
assert objective.optim_vars["v2"].tensor is not input_data["v2"]
assert objective.optim_vars["v1"].tensor is not input_tensors["v1"]
assert objective.optim_vars["v2"].tensor is not input_tensors["v2"]

assert objective.optim_vars["v1"].tensor is v1_data
assert objective.optim_vars["v2"].tensor is v2_data

def _check_error_and_variables(
v1_data_, v2_data_, error_, error_type, objective, input_data, also_update
v1_data_, v2_data_, error_, error_type, objective, input_tensors, also_update
):

_check_error_for_data(v1_data_, v2_data_, error_, error_type)

_check_variables(objective, input_data, v1_data, v2_data, also_update)
_check_variables(objective, input_tensors, v1_data, v2_data, also_update)

for _ in range(10):
f1, f2 = np.random.random(), np.random.random()
Expand Down Expand Up @@ -299,27 +299,27 @@ def _check_error_and_variables(
v1_data_new = torch.ones(batch_size, dof) * f1 * 0.1
v2_data_new = torch.ones(batch_size, dof) * f2 * 0.1

input_data = {"v1": v1_data_new, "v2": v2_data_new}
input_tensors = {"v1": v1_data_new, "v2": v2_data_new}

error = objective.error(input_data=input_data, also_update=False)
error = objective.error(input_tensors=input_tensors, also_update=False)

_check_error_and_variables(
v1_data_new,
v2_data_new,
error,
"error",
objective,
input_data,
input_tensors,
also_update=False,
)

v1_data_new = torch.ones(batch_size, dof) * f1 * 0.3
v2_data_new = torch.ones(batch_size, dof) * f2 * 0.3

input_data = {"v1": v1_data_new, "v2": v2_data_new}
input_tensors = {"v1": v1_data_new, "v2": v2_data_new}

error_norm_2 = objective.error_squared_norm(
input_data=input_data, also_update=False
input_tensors=input_tensors, also_update=False
)

_check_error_and_variables(
Expand All @@ -328,34 +328,34 @@ def _check_error_and_variables(
error_norm_2,
"error_norm_2",
objective,
input_data,
input_tensors,
also_update=False,
)

v1_data_new = torch.ones(batch_size, dof) * f1 * 0.4
v2_data_new = torch.ones(batch_size, dof) * f2 * 0.4

input_data = {"v1": v1_data_new, "v2": v2_data_new}
input_tensors = {"v1": v1_data_new, "v2": v2_data_new}

error = objective.error(input_data=input_data, also_update=True)
error = objective.error(input_tensors=input_tensors, also_update=True)

_check_error_and_variables(
v1_data_new,
v2_data_new,
error,
"error",
objective,
input_data,
input_tensors,
also_update=True,
)

v1_data_new = torch.ones(batch_size, dof) * f1 * 0.4
v2_data_new = torch.ones(batch_size, dof) * f2 * 0.4

input_data = {"v1": v1_data_new, "v2": v2_data_new}
input_tensors = {"v1": v1_data_new, "v2": v2_data_new}

error_norm_2 = objective.error_squared_norm(
input_data=input_data, also_update=True
input_tensors=input_tensors, also_update=True
)

_check_error_and_variables(
Expand All @@ -364,7 +364,7 @@ def _check_error_and_variables(
error_norm_2,
"error_norm_2",
objective,
input_data,
input_tensors,
also_update=True,
)

Expand Down Expand Up @@ -466,16 +466,16 @@ def test_update_updates_properly():
MockCostWeight(th.Variable(torch.ones(1), name="cost_weight_aux")),
)

input_data = {}
input_tensors = {}
for var in var_to_cost_functions:
input_data[var.name] = 2 * var.tensor.clone()
input_tensors[var.name] = 2 * var.tensor.clone()
for aux in aux_to_cost_functions:
input_data[aux.name] = 2 * aux.tensor.clone()
input_tensors[aux.name] = 2 * aux.tensor.clone()

objective.update(input_data=input_data)
objective.update(input_tensors=input_tensors)
assert objective.batch_size == 1

for var_name, data in input_data.items():
for var_name, data in input_tensors.items():
if var_name in [v.name for v in var_to_cost_functions]:
var_ = objective.get_optim_var(var_name)
if var_name in [aux.name for aux in aux_to_cost_functions]:
Expand All @@ -495,36 +495,36 @@ def test_update_raises_batch_size_error():
MockCostWeight(th.Variable(torch.ones(1), name="cost_weight_aux")),
)

input_data = {}
input_tensors = {}
batch_size = 2
# first check that we can change the current batch size (doubling the size)s
for var in var_to_cost_functions:
new_data = torch.ones(batch_size, 1)
input_data[var.name] = new_data
input_tensors[var.name] = new_data
for aux in aux_to_cost_functions:
new_data = torch.ones(batch_size, 1)
input_data[aux.name] = new_data
objective.update(input_data=input_data)
input_tensors[aux.name] = new_data
objective.update(input_tensors=input_tensors)
assert objective.batch_size == batch_size

# change one of the variables, no error since batch_size = 1 is broadcastable
input_data["var1"] = torch.ones(1, 1)
objective.update(input_data=input_data)
input_tensors["var1"] = torch.ones(1, 1)
objective.update(input_tensors=input_tensors)
assert objective.batch_size == batch_size

# change another variable, this time throws errors since found batch size 2 and 3
input_data["var2"] = torch.ones(batch_size + 1, 1)
input_tensors["var2"] = torch.ones(batch_size + 1, 1)
with pytest.raises(ValueError):
objective.update(input_data=input_data)
objective.update(input_tensors=input_tensors)

# change back before testing the aux. variable
input_data["var2"] = torch.ones(batch_size, 1)
objective.update(input_data=input_data) # shouldn't throw error
input_tensors["var2"] = torch.ones(batch_size, 1)
objective.update(input_tensors=input_tensors) # shouldn't throw error

# auxiliary variables should also throw error
input_data["aux1"] = torch.ones(batch_size + 1, 1)
input_tensors["aux1"] = torch.ones(batch_size + 1, 1)
with pytest.raises(ValueError):
objective.update(input_data=input_data)
objective.update(input_tensors=input_tensors)


def test_iterator():
Expand Down
Loading

0 comments on commit 9b79b6d

Please sign in to comment.