From 628a8df1c9de02fa7a332c1cbea615db08a01c4e Mon Sep 17 00:00:00 2001 From: Ke Wen Date: Fri, 28 Apr 2023 17:38:12 +0000 Subject: [PATCH] [c10d] Comment out ddp_hook_with_optimizer_parity tests (#100215) This is a mirror PR of D45339293 Summary: These tests cause the following errors internally with unknown reason: ``` AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adam' AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw' AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_sgd' ``` Commenting these tests out to unblock other PRs. Test Plan: Sandcastle Pull Request resolved: https://github.com/pytorch/pytorch/pull/100215 Approved by: https://github.com/wz337, https://github.com/fduwjj --- torch/testing/_internal/distributed/distributed_test.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index edebe103957824..3fc7e4096fadaf 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -74,7 +74,6 @@ IS_FBCODE, NO_MULTIPROCESSING_SPAWN, IS_SANDCASTLE, - parametrize, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, ) @@ -4905,6 +4904,13 @@ def _test_ddp_hook_with_optimizer_parity( ) dist.barrier() + """ + # Commenting out the following 3 tests as they cause Sandcastle jobs to fail + # Failure signature: + # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw + + from torch.testing._internal.common_utils import parametrize + @skip_but_pass_in_sandcastle_if( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", @@ -4973,6 +4979,7 @@ def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): momentum=sgd_momentum, weight_decay=sgd_weight_decay, ) + """ @skip_if_lt_x_gpu(2) def test_get_data_parallel_params(self):