We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a1c67b9 commit 69f3795Copy full SHA for 69f3795
torchao/quantization/qat/utils.py
@@ -16,14 +16,6 @@
16
_get_per_token_block_size,
17
)
18
19
-# Attribute name representing the forward prehook wrapping the
20
-# linear input in an `AffineFakeQuantizedTensor` on a linear module.
21
-#
22
-# The value of this attribute is a 2-tuple of (prehook, handle).
23
-# The prehook can be disabled by calling `handle.remove()`, and
24
-# re-enabled by calling `module.register_forward_pre_hook(prehook)`.
25
-_QAT_LINEAR_SUBCLASS_INPUT_PREHOOK = "_qat_linear_subclass_input_prehook"
26
-
27
28
class _GenericFakeQuantize(torch.autograd.Function):
29
"""
0 commit comments