Skip to content

Commit 5ac2f42

Browse files
bottlerfacebook-github-bot
authored andcommitted
test & compilation fixes
Summary: Fixes mostly related to the "main" build on circleci. -Avoid error to do with tuple copy from initializer_list which is `explicit` on old compiler. -Add better reporting to copyright test. -Move to PackedTensorAccessor64 from the deprecated PackedTensorAccessor -Avoid some warnings about mismatched comparisons. The "main" build is the only one that runs the test_build stuff. In that area -Fix my bad copyright fix D26275931 (facebookresearch@3463f41) / 965c9c -Add test that all tutorials are valid json. Reviewed By: nikhilaravi Differential Revision: D26366466 fbshipit-source-id: c4ab8b7e6647987069f7cb7144aa6ab7c24bcdac
1 parent e13e63a commit 5ac2f42

File tree

5 files changed

+69
-44
lines changed

5 files changed

+69
-44
lines changed

projects/nerf/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved
1+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

pytorch3d/csrc/blending/sigmoid_alpha_blend.cu

+16-16
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
template <typename scalar_t>
1010
__global__ void SigmoidAlphaBlendForwardKernel(
1111
// clang-format off
12-
const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> distances, // (N, H, W, K)
13-
const torch::PackedTensorAccessor<int64_t, 4, torch::RestrictPtrTraits, size_t> pix_to_face, // (N, H, W, K)
14-
torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> alphas, // (N, H, W)
12+
const torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> distances, // (N, H, W, K)
13+
const torch::PackedTensorAccessor64<int64_t, 4, torch::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
14+
torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> alphas, // (N, H, W)
1515
// clang-format on
1616
const scalar_t sigma,
1717
const int N,
@@ -93,9 +93,9 @@ torch::Tensor SigmoidAlphaBlendForwardCuda(
9393
distances.scalar_type(), "sigmoid_alpha_blend_kernel", ([&] {
9494
// clang-format off
9595
SigmoidAlphaBlendForwardKernel<scalar_t><<<blocks, threads, 0, stream>>>(
96-
distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
97-
pix_to_face.packed_accessor<int64_t, 4, torch::RestrictPtrTraits, size_t>(),
98-
alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
96+
distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
97+
pix_to_face.packed_accessor64<int64_t, 4, torch::RestrictPtrTraits>(),
98+
alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
9999
sigma,
100100
N,
101101
H,
@@ -111,11 +111,11 @@ torch::Tensor SigmoidAlphaBlendForwardCuda(
111111
template <typename scalar_t>
112112
__global__ void SigmoidAlphaBlendBackwardKernel(
113113
// clang-format off
114-
const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grad_alphas, // (N, H, W)
115-
const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> alphas, // (N, H, W)
116-
const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> distances, // (N, H, W, K)
117-
const torch::PackedTensorAccessor<int64_t, 4, torch::RestrictPtrTraits, size_t> pix_to_face, // (N, H, W, K)
118-
torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grad_distances, // (N, H, W)
114+
const torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> grad_alphas, // (N, H, W)
115+
const torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> alphas, // (N, H, W)
116+
const torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> distances, // (N, H, W, K)
117+
const torch::PackedTensorAccessor64<int64_t, 4, torch::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
118+
torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> grad_distances, // (N, H, W)
119119
// clang-format on
120120
const scalar_t sigma,
121121
const int N,
@@ -192,11 +192,11 @@ torch::Tensor SigmoidAlphaBlendBackwardCuda(
192192
SigmoidAlphaBlendBackwardKernel<scalar_t>
193193
<<<blocks, threads, 0, stream>>>(
194194
// clang-format off
195-
grad_alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
196-
alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
197-
distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
198-
pix_to_face.packed_accessor<int64_t, 4, torch::RestrictPtrTraits, size_t>(),
199-
grad_distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
195+
grad_alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
196+
alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
197+
distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
198+
pix_to_face.packed_accessor64<int64_t, 4, torch::RestrictPtrTraits>(),
199+
grad_distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
200200
// clang-format on
201201
sigma,
202202
N,

pytorch3d/csrc/pulsar/pytorch/renderer.cpp

+27-18
Original file line numberDiff line numberDiff line change
@@ -214,65 +214,71 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
214214
batch_processing = true;
215215
batch_size = vert_pos.size(0);
216216
THArgCheck(
217-
vert_col.ndimension() == 3 && vert_col.size(0) == batch_size,
217+
vert_col.ndimension() == 3 &&
218+
vert_col.size(0) == static_cast<int64_t>(batch_size),
218219
2,
219220
"vert_col needs to have batch size.");
220221
THArgCheck(
221-
vert_radii.ndimension() == 2 && vert_radii.size(0) == batch_size,
222+
vert_radii.ndimension() == 2 &&
223+
vert_radii.size(0) == static_cast<int64_t>(batch_size),
222224
3,
223225
"vert_radii must be specified per batch.");
224226
THArgCheck(
225-
cam_pos.ndimension() == 2 && cam_pos.size(0) == batch_size,
227+
cam_pos.ndimension() == 2 &&
228+
cam_pos.size(0) == static_cast<int64_t>(batch_size),
226229
4,
227230
"cam_pos must be specified per batch and have the correct batch size.");
228231
THArgCheck(
229232
pixel_0_0_center.ndimension() == 2 &&
230-
pixel_0_0_center.size(0) == batch_size,
233+
pixel_0_0_center.size(0) == static_cast<int64_t>(batch_size),
231234
5,
232235
"pixel_0_0_center must be specified per batch.");
233236
THArgCheck(
234-
pixel_vec_x.ndimension() == 2 && pixel_vec_x.size(0) == batch_size,
237+
pixel_vec_x.ndimension() == 2 &&
238+
pixel_vec_x.size(0) == static_cast<int64_t>(batch_size),
235239
6,
236240
"pixel_vec_x must be specified per batch.");
237241
THArgCheck(
238-
pixel_vec_y.ndimension() == 2 && pixel_vec_y.size(0) == batch_size,
242+
pixel_vec_y.ndimension() == 2 &&
243+
pixel_vec_y.size(0) == static_cast<int64_t>(batch_size),
239244
7,
240245
"pixel_vec_y must be specified per batch.");
241246
THArgCheck(
242-
focal_length.ndimension() == 1 && focal_length.size(0) == batch_size,
247+
focal_length.ndimension() == 1 &&
248+
focal_length.size(0) == static_cast<int64_t>(batch_size),
243249
8,
244250
"focal_length must be specified per batch.");
245251
THArgCheck(
246252
principal_point_offsets.ndimension() == 2 &&
247-
principal_point_offsets.size(0) == batch_size,
253+
principal_point_offsets.size(0) == static_cast<int64_t>(batch_size),
248254
9,
249255
"principal_point_offsets must be specified per batch.");
250256
if (opacity.has_value()) {
251257
THArgCheck(
252258
opacity.value().ndimension() == 2 &&
253-
opacity.value().size(0) == batch_size,
259+
opacity.value().size(0) == static_cast<int64_t>(batch_size),
254260
13,
255261
"Opacity needs to be specified batch-wise.");
256262
}
257263
// Check all parameters are for a matching number of points.
258264
n_points = vert_pos.size(1);
259265
THArgCheck(
260-
vert_col.size(1) == n_points,
266+
vert_col.size(1) == static_cast<int64_t>(n_points),
261267
2,
262268
("The number of points for vertex positions (" +
263269
std::to_string(n_points) + ") and vertex colors (" +
264270
std::to_string(vert_col.size(1)) + ") doesn't agree.")
265271
.c_str());
266272
THArgCheck(
267-
vert_radii.size(1) == n_points,
273+
vert_radii.size(1) == static_cast<int64_t>(n_points),
268274
3,
269275
("The number of points for vertex positions (" +
270276
std::to_string(n_points) + ") and vertex radii (" +
271277
std::to_string(vert_col.size(1)) + ") doesn't agree.")
272278
.c_str());
273279
if (opacity.has_value()) {
274280
THArgCheck(
275-
opacity.value().size(1) == n_points,
281+
opacity.value().size(1) == static_cast<int64_t>(n_points),
276282
13,
277283
"Opacity needs to be specified per point.");
278284
}
@@ -352,22 +358,22 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
352358
// Check each.
353359
n_points = vert_pos.size(0);
354360
THArgCheck(
355-
vert_col.size(0) == n_points,
361+
vert_col.size(0) == static_cast<int64_t>(n_points),
356362
2,
357363
("The number of points for vertex positions (" +
358364
std::to_string(n_points) + ") and vertex colors (" +
359365
std::to_string(vert_col.size(0)) + ") doesn't agree.")
360366
.c_str());
361367
THArgCheck(
362-
vert_radii.size(0) == n_points,
368+
vert_radii.size(0) == static_cast<int64_t>(n_points),
363369
3,
364370
("The number of points for vertex positions (" +
365371
std::to_string(n_points) + ") and vertex radii (" +
366372
std::to_string(vert_col.size(0)) + ") doesn't agree.")
367373
.c_str());
368374
if (opacity.has_value()) {
369375
THArgCheck(
370-
opacity.value().size(0) == n_points,
376+
opacity.value().size(0) == static_cast<int64_t>(n_points),
371377
12,
372378
"Opacity needs to be specified per point.");
373379
}
@@ -958,12 +964,15 @@ Renderer::backward(
958964
}
959965
if (batch_processing) {
960966
THArgCheck(
961-
grad_im.size(0) == batch_size,
967+
grad_im.size(0) == static_cast<int64_t>(batch_size),
962968
1,
963969
"Gradient image batch size must agree.");
964-
THArgCheck(image.size(0) == batch_size, 2, "Image batch size must agree.");
965970
THArgCheck(
966-
forw_info.size(0) == batch_size,
971+
image.size(0) == static_cast<int64_t>(batch_size),
972+
2,
973+
"Image batch size must agree.");
974+
THArgCheck(
975+
forw_info.size(0) == static_cast<int64_t>(batch_size),
967976
3,
968977
"forward info must have batch size.");
969978
}

pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -291,8 +291,8 @@ RasterizeMeshesNaiveCpu(
291291
const float dist_neighbor = std::abs(std::get<2>(neighbor));
292292
if (dist < dist_neighbor) {
293293
// Overwrite the neighbor face values.
294-
q[idx_top_k] = {
295-
pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z};
294+
q[idx_top_k] = std::make_tuple(
295+
pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z);
296296
}
297297
} else {
298298
// Handle as a normal face.

tests/test_build.py

+23-7
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2+
import json
23
import os
34
import unittest
45
from collections import Counter
@@ -39,18 +40,33 @@ def test_copyright(self):
3940
+ " All rights reserved.\n"
4041
)
4142

43+
files_missing_copyright_header = []
44+
4245
for extension in extensions:
43-
for i in root_dir.glob(f"**/*.{extension}"):
44-
if str(i).endswith(
46+
for path in root_dir.glob(f"**/*.{extension}"):
47+
if str(path).endswith(
4548
"pytorch3d/transforms/external/kornia_angle_axis_to_rotation_matrix.py"
4649
):
4750
continue
48-
if str(i).endswith("pytorch3d/csrc/pulsar/include/fastermath.h"):
51+
if str(path).endswith("pytorch3d/csrc/pulsar/include/fastermath.h"):
4952
continue
50-
with open(i) as f:
53+
with open(path) as f:
5154
firstline = f.readline()
5255
if firstline.startswith(("# -*-", "#!")):
5356
firstline = f.readline()
54-
self.assertTrue(
55-
firstline.endswith(expect), f"{i} missing copyright header."
56-
)
57+
if not firstline.endswith(expect):
58+
files_missing_copyright_header.append(str(path))
59+
60+
if len(files_missing_copyright_header) != 0:
61+
self.fail("\n".join(files_missing_copyright_header))
62+
63+
@unittest.skipIf(in_conda_build, "In conda build")
64+
def test_valid_ipynbs(self):
65+
# Check that the ipython notebooks are valid json
66+
test_dir = Path(__file__).resolve().parent
67+
tutorials_dir = test_dir.parent / "docs" / "tutorials"
68+
tutorials = sorted(tutorials_dir.glob("*.ipynb"))
69+
70+
for tutorial in tutorials:
71+
with open(tutorial) as f:
72+
json.load(f)

0 commit comments

Comments
 (0)