Skip to content

Commit b4dea43

Browse files
davnov134facebook-github-bot
authored andcommitted
Support for multi-dimensional list_to_padded/padded_to_list.
Summary: Extends `list_to_padded`/`padded_to_list` to work for tensors with an arbitrary number of input dimensions. Reviewed By: nikhilaravi, gkioxari Differential Revision: D23813969 fbshipit-source-id: 52c212a2ecdb3c4dfb6ac47217715e07998f37f1
1 parent 0ba55a8 commit b4dea43

File tree

2 files changed

+132
-82
lines changed

2 files changed

+132
-82
lines changed

pytorch3d/structures/utils.py

Lines changed: 52 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,77 +1,97 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
22

3-
from typing import List, Union
3+
from typing import List, Sequence, Union
44

55
import torch
66

77

88
"""
9-
Util functions containing representation transforms for points/verts/faces.
9+
Util functions for points/verts/faces/volumes.
1010
"""
1111

1212

1313
def list_to_padded(
1414
x: List[torch.Tensor],
15-
pad_size: Union[list, tuple, None] = None,
15+
pad_size: Union[Sequence[int], None] = None,
1616
pad_value: float = 0.0,
1717
equisized: bool = False,
1818
) -> torch.Tensor:
1919
r"""
20-
Transforms a list of N tensors each of shape (Mi, Ki) into a single tensor
21-
of shape (N, pad_size(0), pad_size(1)), or (N, max(Mi), max(Ki))
22-
if pad_size is None.
20+
Transforms a list of N tensors each of shape (Si_0, Si_1, ... Si_D)
21+
into:
22+
- a single tensor of shape (N, pad_size(0), pad_size(1), ..., pad_size(D))
23+
if pad_size is provided
24+
- or a tensor of shape (N, max(Si_0), max(Si_1), ..., max(Si_D)) if pad_size is None.
2325
2426
Args:
2527
x: list of Tensors
26-
pad_size: list(int) specifying the size of the padded tensor
28+
pad_size: list(int) specifying the size of the padded tensor.
29+
If `None` (default), the largest size of each dimension
30+
is set as the `pad_size`.
2731
pad_value: float value to be used to fill the padded tensor
2832
equisized: bool indicating whether the items in x are of equal size
2933
(sometimes this is known and if provided saves computation)
3034
3135
Returns:
32-
x_padded: tensor consisting of padded input tensors
36+
x_padded: tensor consisting of padded input tensors stored
37+
over the newly allocated memory.
3338
"""
3439
if equisized:
3540
return torch.stack(x, 0)
3641

42+
if not all(torch.is_tensor(y) for y in x):
43+
raise ValueError("All items have to be instances of a torch.Tensor.")
44+
45+
# we set the common number of dimensions to the maximum
46+
# of the dimensionalities of the tensors in the list
47+
element_ndim = max(y.ndim for y in x)
48+
49+
# replace empty 1D tensors with empty tensors with a correct number of dimensions
50+
x = [
51+
(y.new_zeros([0] * element_ndim) if (y.ndim == 1 and y.nelement() == 0) else y)
52+
for y in x
53+
] # pyre-ignore
54+
55+
if any(y.ndim != x[0].ndim for y in x):
56+
raise ValueError("All items have to have the same number of dimensions!")
57+
3758
if pad_size is None:
38-
pad_dim0 = max(y.shape[0] for y in x if len(y) > 0)
39-
pad_dim1 = max(y.shape[1] for y in x if len(y) > 0)
59+
pad_dims = [
60+
max(y.shape[dim] for y in x if len(y) > 0) for dim in range(x[0].ndim)
61+
]
4062
else:
41-
if len(pad_size) != 2:
42-
raise ValueError("Pad size must contain target size for 1st and 2nd dim")
43-
pad_dim0, pad_dim1 = pad_size
63+
if any(len(pad_size) != y.ndim for y in x):
64+
raise ValueError("Pad size must contain target size for all dimensions.")
65+
pad_dims = pad_size
4466

4567
N = len(x)
46-
x_padded = torch.full(
47-
(N, pad_dim0, pad_dim1), pad_value, dtype=x[0].dtype, device=x[0].device
48-
)
68+
x_padded = x[0].new_full((N, *pad_dims), pad_value)
4969
for i, y in enumerate(x):
5070
if len(y) > 0:
51-
if y.ndim != 2:
52-
raise ValueError("Supports only 2-dimensional tensor items")
53-
x_padded[i, : y.shape[0], : y.shape[1]] = y
71+
slices = (i, *(slice(0, y.shape[dim]) for dim in range(y.ndim)))
72+
x_padded[slices] = y
5473
return x_padded
5574

5675

57-
def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None):
76+
def padded_to_list(
77+
x: torch.Tensor,
78+
split_size: Union[Sequence[int], Sequence[Sequence[int]], None] = None,
79+
):
5880
r"""
59-
Transforms a padded tensor of shape (N, M, K) into a list of N tensors
60-
of shape (Mi, Ki) where (Mi, Ki) is specified in split_size(i), or of shape
61-
(M, K) if split_size is None.
62-
Support only for 3-dimensional input tensor.
81+
Transforms a padded tensor of shape (N, S_1, S_2, ..., S_D) into a list
82+
of N tensors of shape:
83+
- (Si_1, Si_2, ..., Si_D) where (Si_1, Si_2, ..., Si_D) is specified in split_size(i)
84+
- or (S_1, S_2, ..., S_D) if split_size is None
85+
- or (Si_1, S_2, ..., S_D) if split_size(i) is an integer.
6386
6487
Args:
6588
x: tensor
66-
split_size: list, tuple or int defining the number of items for each tensor
67-
in the output list.
89+
split_size: optional 1D or 2D list/tuple of ints defining the number of
90+
items for each tensor.
6891
6992
Returns:
70-
x_list: a list of tensors
93+
x_list: a list of tensors sharing the memory with the input.
7194
"""
72-
if x.ndim != 3:
73-
raise ValueError("Supports only 3-dimensional input tensors")
74-
7595
x_list = list(x.unbind(0))
7696

7797
if split_size is None:
@@ -84,13 +104,9 @@ def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None)
84104
for i in range(N):
85105
if isinstance(split_size[i], int):
86106
x_list[i] = x_list[i][: split_size[i]]
87-
elif len(split_size[i]) == 2:
88-
x_list[i] = x_list[i][: split_size[i][0], : split_size[i][1]]
89107
else:
90-
raise ValueError(
91-
"Support only for 2-dimensional unbinded tensor. \
92-
Split size for more dimensions provided"
93-
)
108+
slices = tuple(slice(0, s) for s in split_size[i]) # pyre-ignore
109+
x_list[i] = x_list[i][slices]
94110
return x_list
95111

96112

tests/test_struct_utils.py

Lines changed: 80 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -9,42 +9,74 @@
99

1010

1111
class TestStructUtils(TestCaseMixin, unittest.TestCase):
12+
def setUp(self) -> None:
13+
super().setUp()
14+
torch.manual_seed(43)
15+
16+
def _check_list_to_padded_slices(self, x, x_padded, ndim):
17+
N = len(x)
18+
for i in range(N):
19+
slices = [i]
20+
for dim in range(ndim):
21+
if x[i].nelement() == 0 and x[i].ndim == 1:
22+
slice_ = slice(0, 0, 1)
23+
else:
24+
slice_ = slice(0, x[i].shape[dim], 1)
25+
slices.append(slice_)
26+
if x[i].nelement() == 0 and x[i].ndim == 1:
27+
x_correct = x[i].new_zeros(*[[0] * ndim])
28+
else:
29+
x_correct = x[i]
30+
self.assertClose(x_padded[slices], x_correct)
31+
1232
def test_list_to_padded(self):
1333
device = torch.device("cuda:0")
1434
N = 5
1535
K = 20
16-
ndim = 2
17-
x = []
18-
for _ in range(N):
19-
dims = torch.randint(K, size=(ndim,)).tolist()
20-
x.append(torch.rand(dims, device=device))
21-
pad_size = [K] * ndim
22-
x_padded = struct_utils.list_to_padded(
23-
x, pad_size=pad_size, pad_value=0.0, equisized=False
24-
)
36+
for ndim in [1, 2, 3, 4]:
37+
x = []
38+
for _ in range(N):
39+
dims = torch.randint(K, size=(ndim,)).tolist()
40+
x.append(torch.rand(dims, device=device))
2541

26-
self.assertEqual(x_padded.shape[1], K)
27-
self.assertEqual(x_padded.shape[2], K)
28-
for i in range(N):
29-
self.assertClose(x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i])
30-
31-
# check for no pad size (defaults to max dimension)
32-
x_padded = struct_utils.list_to_padded(x, pad_value=0.0, equisized=False)
33-
max_size0 = max(y.shape[0] for y in x)
34-
max_size1 = max(y.shape[1] for y in x)
35-
self.assertEqual(x_padded.shape[1], max_size0)
36-
self.assertEqual(x_padded.shape[2], max_size1)
37-
for i in range(N):
38-
self.assertClose(x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i])
42+
# set 0th element to an empty 1D tensor
43+
x[0] = torch.tensor([], dtype=x[0].dtype, device=device)
44+
45+
# set 1st element to an empty tensor with correct number of dims
46+
x[1] = x[1].new_zeros(*[[0] * ndim])
47+
48+
pad_size = [K] * ndim
49+
x_padded = struct_utils.list_to_padded(
50+
x, pad_size=pad_size, pad_value=0.0, equisized=False
51+
)
52+
53+
for dim in range(ndim):
54+
self.assertEqual(x_padded.shape[dim + 1], K)
3955

40-
# check for equisized
41-
x = [torch.rand((K, 10), device=device) for _ in range(N)]
42-
x_padded = struct_utils.list_to_padded(x, equisized=True)
43-
self.assertClose(x_padded, torch.stack(x, 0))
56+
self._check_list_to_padded_slices(x, x_padded, ndim)
57+
58+
# check for no pad size (defaults to max dimension)
59+
x_padded = struct_utils.list_to_padded(x, pad_value=0.0, equisized=False)
60+
max_sizes = (
61+
max(
62+
(0 if (y.nelement() == 0 and y.ndim == 1) else y.shape[dim])
63+
for y in x
64+
)
65+
for dim in range(ndim)
66+
)
67+
for dim, max_size in enumerate(max_sizes):
68+
self.assertEqual(x_padded.shape[dim + 1], max_size)
69+
70+
self._check_list_to_padded_slices(x, x_padded, ndim)
71+
72+
# check for equisized
73+
x = [torch.rand((K, *([10] * (ndim - 1))), device=device) for _ in range(N)]
74+
x_padded = struct_utils.list_to_padded(x, equisized=True)
75+
self.assertClose(x_padded, torch.stack(x, 0))
4476

4577
# catch ValueError for invalid dimensions
4678
with self.assertRaisesRegex(ValueError, "Pad size must"):
47-
pad_size = [K] * 4
79+
pad_size = [K] * (ndim + 1)
4880
struct_utils.list_to_padded(
4981
x, pad_size=pad_size, pad_value=0.0, equisized=False
5082
)
@@ -56,7 +88,7 @@ def test_list_to_padded(self):
5688
dims = torch.randint(K, size=(ndim,)).tolist()
5789
x.append(torch.rand(dims, device=device))
5890
pad_size = [K] * 2
59-
with self.assertRaisesRegex(ValueError, "Supports only"):
91+
with self.assertRaisesRegex(ValueError, "Pad size must"):
6092
x_padded = struct_utils.list_to_padded(
6193
x, pad_size=pad_size, pad_value=0.0, equisized=False
6294
)
@@ -66,27 +98,29 @@ def test_padded_to_list(self):
6698
N = 5
6799
K = 20
68100
ndim = 2
69-
dims = [K] * ndim
70-
x = torch.rand([N] + dims, device=device)
71101

72-
x_list = struct_utils.padded_to_list(x)
73-
for i in range(N):
74-
self.assertClose(x_list[i], x[i])
102+
for ndim in (2, 3, 4):
75103

76-
split_size = torch.randint(1, K, size=(N,)).tolist()
77-
x_list = struct_utils.padded_to_list(x, split_size)
78-
for i in range(N):
79-
self.assertClose(x_list[i], x[i, : split_size[i]])
104+
dims = [K] * ndim
105+
x = torch.rand([N] + dims, device=device)
80106

81-
split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0)
82-
x_list = struct_utils.padded_to_list(x, split_size)
83-
for i in range(N):
84-
self.assertClose(x_list[i], x[i, : split_size[i][0], : split_size[i][1]])
107+
x_list = struct_utils.padded_to_list(x)
108+
for i in range(N):
109+
self.assertClose(x_list[i], x[i])
85110

86-
with self.assertRaisesRegex(ValueError, "Supports only"):
87-
x = torch.rand((N, K, K, K, K), device=device)
88-
split_size = torch.randint(1, K, size=(N,)).tolist()
89-
struct_utils.padded_to_list(x, split_size)
111+
split_size = torch.randint(1, K, size=(N, ndim)).unbind(0)
112+
x_list = struct_utils.padded_to_list(x, split_size)
113+
for i in range(N):
114+
slices = [i]
115+
for dim in range(ndim):
116+
slices.append(slice(0, split_size[i][dim], 1))
117+
self.assertClose(x_list[i], x[slices])
118+
119+
# split size is a list of ints
120+
split_size = [int(z) for z in torch.randint(1, K, size=(N,)).unbind(0)]
121+
x_list = struct_utils.padded_to_list(x, split_size)
122+
for i in range(N):
123+
self.assertClose(x_list[i], x[i][: split_size[i]])
90124

91125
def test_padded_to_packed(self):
92126
device = torch.device("cuda:0")
@@ -160,7 +194,7 @@ def test_padded_to_packed(self):
160194
with self.assertRaisesRegex(ValueError, "Supports only"):
161195
x = torch.rand((N, K, K, K, K), device=device)
162196
split_size = torch.randint(1, K, size=(N,)).tolist()
163-
struct_utils.padded_to_list(x, split_size)
197+
struct_utils.padded_to_packed(x, split_size=split_size)
164198

165199
def test_list_to_packed(self):
166200
device = torch.device("cuda:0")

0 commit comments

Comments
 (0)