Skip to content

Commit

Permalink
auto updates (Project-MONAI#6153)
Browse files Browse the repository at this point in the history
Signed-off-by: monai-bot <monai.miccai2019@gmail.com>

---------

Signed-off-by: monai-bot <monai.miccai2019@gmail.com>
Signed-off-by: Wenqi Li <wenqil@nvidia.com>
Co-authored-by: Wenqi Li <wenqil@nvidia.com>
  • Loading branch information
monai-bot and wyli authored Mar 15, 2023
1 parent af46d7b commit 6a113e6
Show file tree
Hide file tree
Showing 10 changed files with 15 additions and 11 deletions.
1 change: 1 addition & 0 deletions .github/workflows/codeql-analysis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ jobs:

- name: Build
run: |
python -m pip install -U pip wheel
python -m pip install -r requirements-dev.txt
BUILD_MONAI=1 ./runtests.sh --build
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ jobs:
- shell: bash
run: |
git describe
python -m pip install -U pip wheel setuptools
python setup.py build
cat build/lib/monai/_version.py
- name: Upload version
Expand Down
2 changes: 0 additions & 2 deletions monai/bundle/properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
The predefined properties for a bundle workflow, other applications can leverage the properties
to interact with the bundle workflow.
Expand Down Expand Up @@ -149,7 +148,6 @@
},
}


InferProperties = {
"bundle_root": {
BundleProperty.DESC: "root path of the bundle.",
Expand Down
2 changes: 1 addition & 1 deletion monai/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ def worker_init_fn(worker_id: int) -> None:
"""
worker_info = torch.utils.data.get_worker_info()
set_rnd(worker_info.dataset, seed=worker_info.seed)
set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]


def set_rnd(obj, seed: int) -> int:
Expand Down
2 changes: 2 additions & 0 deletions monai/networks/layers/filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ class BilateralFilter(torch.autograd.Function):

@staticmethod
def forward(ctx, input, spatial_sigma=5, color_sigma=0.5, fast_approx=True):
"""autograd forward"""
ctx.ss = spatial_sigma
ctx.cs = color_sigma
ctx.fa = fast_approx
Expand All @@ -57,6 +58,7 @@ def forward(ctx, input, spatial_sigma=5, color_sigma=0.5, fast_approx=True):

@staticmethod
def backward(ctx, grad_output):
"""autograd backward"""
spatial_sigma, color_sigma, fast_approx = ctx.ss, ctx.cs, ctx.fa
grad_input = _C.bilateral_filter(grad_output, spatial_sigma, color_sigma, fast_approx)
return grad_input, None, None, None
Expand Down
2 changes: 1 addition & 1 deletion monai/networks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ def convert_to_torchscript(
for r1, r2 in zip(torch_out, torchscript_out):
if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor):
assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose
assert_fn(r1, r2, rtol=rtol, atol=atol)
assert_fn(r1, r2, rtol=rtol, atol=atol) # type: ignore

return script_module

Expand Down
12 changes: 6 additions & 6 deletions monai/transforms/spatial/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1836,7 +1836,7 @@ def resolve_modes(interp_mode, padding_mode):
elif interp_mode == "bilinear":
_interp_mode = 1 # type: ignore
else:
_interp_mode = GridSampleMode(interp_mode) # type: ignore
_interp_mode = GridSampleMode(interp_mode)
else: # TransformBackends.NUMPY
_interp_mode = int(interp_mode) # type: ignore
_padding_mode = look_up_option(padding_mode, NdimageMode)
Expand Down Expand Up @@ -1898,7 +1898,7 @@ def __call__(

if USE_COMPILED or backend == TransformBackends.NUMPY:
grid_t, *_ = convert_to_dst_type(grid[:sr], img_t, dtype=grid.dtype, wrap_sequence=True)
if hasattr(grid, "storage") and grid_t.storage().data_ptr() == grid.storage().data_ptr():
if isinstance(grid, torch.Tensor) and grid_t.data_ptr() == grid.data_ptr():
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
_dim = max(2, dim)
Expand Down Expand Up @@ -1928,7 +1928,7 @@ def __call__(
else:
grid_t = moveaxis(grid[list(range(sr - 1, -1, -1))], 0, -1) # type: ignore
grid_t = convert_to_dst_type(grid_t, img_t, wrap_sequence=True)[0].unsqueeze(0)
if hasattr(grid, "storage") and grid_t.storage().data_ptr() == grid.storage().data_ptr():
if isinstance(grid, torch.Tensor) and grid_t.data_ptr() == grid.data_ptr():
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
if self.norm_coords:
for i, dim in enumerate(img_t.shape[sr + 1 : 0 : -1]):
Expand Down Expand Up @@ -2079,7 +2079,7 @@ def __call__(
sp_size = fall_back_tuple(self.spatial_size if spatial_size is None else spatial_size, img_size)
_mode = mode if mode is not None else self.mode
_padding_mode = padding_mode if padding_mode is not None else self.padding_mode
grid, affine = self.affine_grid(spatial_size=sp_size) # type: ignore
grid, affine = self.affine_grid(spatial_size=sp_size)

return affine_func( # type: ignore
img,
Expand Down Expand Up @@ -2327,15 +2327,15 @@ def __call__(
img = convert_to_tensor(img, track_meta=get_track_meta())
if self.lazy_evaluation:
if self._do_transform:
affine = self.rand_affine_grid.get_transformation_matrix() # type: ignore
affine = self.rand_affine_grid.get_transformation_matrix()
else:
affine = convert_to_dst_type(torch.eye(len(sp_size) + 1), img, dtype=self.rand_affine_grid.dtype)[0]
else:
if grid is None:
grid = self.get_identity_grid(sp_size)
if self._do_transform:
grid = self.rand_affine_grid(grid=grid, randomize=randomize)
affine = self.rand_affine_grid.get_transformation_matrix() # type: ignore
affine = self.rand_affine_grid.get_transformation_matrix()
return affine_func( # type: ignore
img,
affine,
Expand Down
1 change: 1 addition & 0 deletions tests/test_handler_clearml_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

@unittest.skipUnless(has_clearml, "Requires 'clearml' installation")
@unittest.skipUnless(has_tb, "Requires SummaryWriter installation")
@unittest.skip("temp mute clearml tests https://github.com/Project-MONAI/MONAI/issues/6148")
class TestHandlerClearMLImageHandler(unittest.TestCase):
def test_task_init(self):
Task.set_offline(offline_mode=True)
Expand Down
1 change: 1 addition & 0 deletions tests/test_handler_clearml_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

@unittest.skipUnless(has_clearml, "Requires 'clearml' installation")
@unittest.skipUnless(has_tb, "Requires SummaryWriter installation")
@unittest.skip("temp mute clearml tests https://github.com/Project-MONAI/MONAI/issues/6148")
class TestHandlerClearMLStatsHandler(unittest.TestCase):
def test_task_init(self):
Task.set_offline(offline_mode=True)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_spacingd.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def test_spacingd(self, _, data, kw_args, expected_shape, expected_affine, devic
@parameterized.expand(TESTS_TORCH)
def test_orntd_torch(self, init_param, img: torch.Tensor, track_meta: bool, device):
set_track_meta(track_meta)
tr = Spacingd(**init_param) # type: ignore
tr = Spacingd(**init_param)
call_param = {"data": {"seg": img.to(device)}}
res_data = tr(**call_param) # type: ignore
res = res_data["seg"]
Expand Down

0 comments on commit 6a113e6

Please sign in to comment.