Skip to content

Commit a91275a

Browse files
authored
add assertion error about config migration to prototype/autoround (#1852)
* Update [ghstack-poisoned] * Update [ghstack-poisoned] * Update [ghstack-poisoned]
1 parent bc509dc commit a91275a

File tree

2 files changed

+12
-2
lines changed

2 files changed

+12
-2
lines changed

test/prototype/test_autoround.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,10 @@ def _check_params_and_buffers_type(module, check_fun):
8686

8787

8888
class TestAutoRound(TestCase):
89-
@pytest.mark.skip(not TORCH_VERSION_AT_LEAST_2_5, "Requires torch 2.5 or later")
89+
@pytest.mark.skip("these tests are broken on main branch")
90+
@pytest.mark.skipif(
91+
not TORCH_VERSION_AT_LEAST_2_5, reason="Requires torch 2.5 or later"
92+
)
9093
@parametrize("device", _AVAILABLE_DEVICES)
9194
@torch.no_grad()
9295
def test_auto_round(self, device: str):
@@ -127,7 +130,10 @@ def test_auto_round(self, device: str):
127130
after_quant = m(*example_inputs)
128131
assert after_quant is not None, "Quantized model forward pass failed"
129132

130-
@pytest.mark.skip(not TORCH_VERSION_AT_LEAST_2_5, "Requires torch 2.5 or later")
133+
@pytest.mark.skip("these tests are broken on main branch")
134+
@pytest.mark.skipif(
135+
not TORCH_VERSION_AT_LEAST_2_5, reason="Requires torch 2.5 or later"
136+
)
131137
@parametrize("device", _AVAILABLE_DEVICES)
132138
@torch.no_grad()
133139
def test_wrap_model_with_multi_tensor(self, device: str):

torchao/prototype/autoround/core.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,10 @@ def apply_auto_round():
165165
More details about the auto-round can be found at https://arxiv.org/abs/2309.05516.
166166
"""
167167

168+
raise AssertionError(
169+
"Please migrate this function to direct configuration, see https://github.com/pytorch/ao/issues/1690 for details"
170+
)
171+
168172
def _apply_auto_round(optimized_model: torch.nn.Module):
169173
"""
170174
The `optimized_model` includes `Linear` layers optimized by auto-round, which includes `qdq_weight`, `scale`, `zp`.

0 commit comments

Comments
 (0)