Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
ec3e065
Add Int4XPUTensorIntZP
liangan1 Aug 22, 2025
1dc5b2c
Add int4_xpu_tensor
liangan1 Aug 22, 2025
e63b100
Update int4_xpu_tensor.py
liangan1 Aug 25, 2025
5ef1ca2
Fix typo
liangan1 Aug 25, 2025
a28dd89
Fix code format issue
liangan1 Aug 25, 2025
8a0f124
fix bug
liangan1 Aug 25, 2025
a0ff36f
Fix code format
liangan1 Aug 25, 2025
5e9c476
Merge branch 'main' into liangan1/int4_xpu_int_zp
liangan1 Aug 26, 2025
2c4c2ce
Update int4_xpu_tensor.py
liangan1 Aug 26, 2025
e48ea0b
change the pack format to plain
liangan1 Aug 26, 2025
c4e5b9d
fix typo
liangan1 Aug 26, 2025
7063e56
Update quant_api.py
liangan1 Aug 26, 2025
5b87d8b
merge main branch
liangan1 Aug 28, 2025
6076877
Merge branch 'main' into liangan1/int4_xpu_int_zp
liangan1 Aug 27, 2025
8d2acd2
Update __init__.py
liangan1 Aug 29, 2025
43acd66
Update __init__.py
liangan1 Aug 29, 2025
a047c00
change Int4XPUTensorIntZP to Int4PlainInt32
liangan1 Aug 29, 2025
3f70b2b
Update __init__.py
liangan1 Aug 29, 2025
402dd72
Refine code
liangan1 Aug 29, 2025
282f1a8
Refine code
liangan1 Aug 29, 2025
cd781fc
Update __init__.py
liangan1 Sep 1, 2025
afadf69
Update __init__.py
liangan1 Sep 1, 2025
b68beef
Add more comments about the original weight dtype
liangan1 Sep 1, 2025
66e05ff
Merge branch 'main' into liangan1/int4_xpu_int_zp
liangan1 Sep 1, 2025
105b4b9
fix code format issue
liangan1 Sep 1, 2025
b24ff1a
fix code format issue
liangan1 Sep 1, 2025
77868bc
skip ut if no xpu
liangan1 Sep 1, 2025
970aa17
Update test_int4_plain_int32_tensor.py
liangan1 Sep 1, 2025
78f6bb2
Add assert for the original weight data type
liangan1 Sep 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD 3-Clause license found in the
# LICENSE file in the root directory of this source tree.

import tempfile
import unittest

import torch
from torch.testing._internal.common_utils import (
TestCase,
instantiate_parametrized_tests,
parametrize,
run_tests,
)

from torchao.quantization import (
Int4WeightOnlyConfig,
quantize_,
)
from torchao.quantization.utils import compute_error
from torchao.utils import (
torch_version_at_least,
)


def get_config(group_size):
return Int4WeightOnlyConfig(
group_size=group_size,
packing_format="plain_int32",
version=2,
)


@unittest.skipIf(not torch_version_at_least("2.8.0"), "Need pytorch 2.8+")
@unittest.skipIf(not torch.xpu.is_available(), "XPU not available")
class Int4PlainInt32Tensor(TestCase):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we probably need more tests like serailization etc. but can add these later

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok. we are working on the XPU CI enabling in other PRs. Pls refer to #2917

@parametrize(
"sizes",
[
((128,), 256, 128),
((32, 128), 512, 128),
((2, 32, 128), 256, 12),
],
)
@parametrize("dtype", [torch.bfloat16, torch.half])
@parametrize("group_size", [32, 64, 128])
def test_linear(self, sizes, dtype, group_size):
device = "xpu"
M, N, K = sizes
input = torch.randn(*M, K, dtype=dtype, device=device)
linear = torch.nn.Linear(K, N, dtype=dtype, device=device)
original = linear(input)
quantize_(linear, get_config(group_size))
quantized = linear(input)
self.assertTrue(compute_error(original, quantized) > 20)

compiled_linear = torch.compile(linear)
quantized_and_compiled = compiled_linear(input)
self.assertTrue(compute_error(original, quantized_and_compiled) > 20)

@parametrize("dtype", [torch.bfloat16, torch.half])
def test_module_path(self, dtype):
linear = torch.nn.Linear(128, 256, dtype=dtype, device="xpu")
quantize_(linear, get_config(group_size=128))
self.assertEqual(
str(type(linear.weight)),
"<class 'torchao.quantization.Int4PlainInt32Tensor'>",
)

with tempfile.NamedTemporaryFile() as f:
torch.save(linear.state_dict(), f)
f.seek(0)
state_dict = torch.load(f)
self.assertEqual(
str(type(state_dict["weight"])),
"<class 'torchao.quantization.Int4PlainInt32Tensor'>",
)


instantiate_parametrized_tests(Int4PlainInt32Tensor)


if __name__ == "__main__":
run_tests()
2 changes: 2 additions & 0 deletions torchao/quantization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@
Float8Tensor,
Int4MarlinSparseTensor,
Int4OpaqueTensor,
Int4PlainInt32Tensor,
Int4PreshuffledTensor,
Int4Tensor,
Int4TilePackedTo4dTensor,
Expand Down Expand Up @@ -163,6 +164,7 @@
"FbgemmConfig",
# tensor subclasses
"Int4Tensor",
"Int4PlainInt32Tensor",
"Int4PreshuffledTensor",
"Int4MarlinSparseTensor",
"IntxOpaqueTensor",
Expand Down
8 changes: 7 additions & 1 deletion torchao/quantization/quant_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
Float8Tensor,
Int4MarlinSparseTensor,
Int4OpaqueTensor,
Int4PlainInt32Tensor,
Int4PreshuffledTensor,
Int4Tensor,
Int4TilePackedTo4dTensor,
Expand Down Expand Up @@ -522,7 +523,6 @@ def quantize_(
torch._C._log_api_usage_once("torchao.quantization.quantize_")

filter_fn = _is_linear if filter_fn is None else filter_fn

if isinstance(config, ModuleFqnToConfig):
_replace_with_custom_fn_if_matches_filter_with_name(
model,
Expand Down Expand Up @@ -1131,6 +1131,12 @@ def _int4_weight_only_quantize_tensor(weight, config):
block_size,
)
return new_weight
elif packing_format == PackingFormat.PLAIN_INT32:
new_weight = Int4PlainInt32Tensor.from_hp(
weight,
block_size,
)
return new_weight
elif packing_format == PackingFormat.MARLIN_SPARSE:
new_weight = Int4MarlinSparseTensor.from_hp(
weight,
Expand Down
6 changes: 6 additions & 0 deletions torchao/quantization/quantize_/common/packing_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ class PackingFormat(str, Enum):
"""
UNPACKED_TO_INT8 = "unpacked_to_int8"

"""
plain_int32 is referring to the format used by int4 weight-only quantization.
which is a groupwise quantization format 2*int4 is store in a byte and 4*(int4*2) is stored in a int32.
"""
PLAIN_INT32 = "plain_int32"

"""
tile_packed_to_4d is referring to the format used by tinygemm kernels for int4 quantization
"""
Expand Down
4 changes: 4 additions & 0 deletions torchao/quantization/quantize_/workflows/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
from .int4.int4_opaque_tensor import (
Int4OpaqueTensor,
)
from .int4.int4_plain_int32_tensor import (
Int4PlainInt32Tensor,
)
from .int4.int4_preshuffled_tensor import (
Int4PreshuffledTensor,
)
Expand All @@ -26,6 +29,7 @@
"Int4Tensor",
"Int4PreshuffledTensor",
"Int4MarlinSparseTensor",
"Int4PlainInt32Tensor",
"Int4TilePackedTo4dTensor",
"Float8Tensor",
"QuantizeTensorToFloat8Kwargs",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD 3-Clause license found in the
# LICENSE file in the root directory of this source tree.


from typing import List

import torch

from torchao.quantization.quant_primitives import (
MappingType,
choose_qparams_affine,
quantize_affine,
)
from torchao.utils import (
TorchAOBaseTensor,
)

__all__ = [
"Int4PlainInt32Tensor",
]

aten = torch.ops.aten


class Int4PlainInt32Tensor(TorchAOBaseTensor):
"""
int4 weight-only quantization on XPU with oneDNN as backend (groupwise quantization only)

Tensor Attributes:
qdata: (N, K/8), packed int4 weight, the data type is int32 here with 4*(int4*2), the original data type can be half and bfloat16
scale: (K/group_size, N), dtype is the same as the original Tensor dtype
zero_point: (K/group_size, N), dtype is int8

Non-Tensor Attributes:
block_size: the block size for quantization, representing the granularity.
shape: shape of the original Tensor

"""

tensor_data_names = ["qdata", "scale", "zero_point"]
tensor_attribute_names = ["block_size", "shape"]

def __new__(
cls,
qdata,
scale,
zero_point,
block_size,
shape,
):
kwargs = {}
kwargs["device"] = qdata.device
kwargs["dtype"] = scale.dtype
kwargs["requires_grad"] = False
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined]

def __init__(self, qdata, scale, zero_point, block_size, shape):
self.qdata = qdata
self.scale = scale
self.zero_point = zero_point
self.block_size = block_size

def _quantization_type(self):
return f"shape={self.shape}, block_size={self.block_size}, device={self.device}"

@classmethod
def from_hp(
cls,
w: torch.Tensor,
block_size: List[int],
):
assert w.ndim == 2 and w.device.type == "xpu", (
f"Expecting 2D tensor on XPU, but got: {w.shape} on {w.device.type}"
)
assert len(block_size) == w.ndim
assert w.dtype in [torch.float16, torch.bfloat16], (
f"Expecting float16 or bfloat16 weight tensor, but got: {w.dtype}"
)
original_shape = w.shape
mapping_type = MappingType.ASYMMETRIC
target_dtype = torch.int32
quant_min = 0
quant_max = 15
eps = 1e-6
scale_dtype = None
zero_point_dtype = torch.int32
scale, zero_point = choose_qparams_affine(
w,
mapping_type,
block_size,
target_dtype,
quant_min,
quant_max,
eps,
scale_dtype,
zero_point_dtype,
)
int_data = quantize_affine(
w,
block_size,
scale,
zero_point,
target_dtype,
quant_min,
quant_max,
)
assert int_data.dtype == torch.int32, (
"torch.ops.aten._convert_weight_to_int4pack expects `int32` dtype"
)
packed_weight = (int_data[::, 1::2] << 4 | int_data[::, ::2]).to(torch.uint8)
packed_weight = torch.ops.aten._convert_weight_to_int4pack(
packed_weight.contiguous(), 8
)
scale = scale.reshape(int_data.shape[0], -1)
zero_point = zero_point.reshape(int_data.shape[0], -1)
return Int4PlainInt32Tensor(
packed_weight,
scale.transpose(0, 1).contiguous(),
zero_point.transpose(0, 1).contiguous().to(torch.int8),
block_size,
original_shape,
)


implements = Int4PlainInt32Tensor.implements


@implements([torch.nn.functional.linear, aten.linear.default])
def _(func, types, args, kwargs):
input_tensor, weight_tensor, bias = (
args[0],
args[1],
args[2] if len(args) > 2 else None,
)
assert input_tensor.device.type == "xpu", (
f"For XPU device only but got: {input_tensor.device}"
)
assert isinstance(weight_tensor, Int4PlainInt32Tensor), (
f"Expected weight_tensor to be Int4PlainInt32Tensor, got: {type(weight_tensor)}"
)
assert weight_tensor.block_size[0] == 1, (
f"Requires groupwise quantization, got block_size: {weight_tensor.block_size}"
)
assert input_tensor.shape[-1] == weight_tensor.shape[1], (
f"Shapes of input and weight do not match, input:{input_tensor.shape}, weight: {weight_tensor.shape}"
)

act_mat = input_tensor
packed_weight = weight_tensor.qdata
scale = weight_tensor.scale
zero_point = weight_tensor.zero_point

orig_act_size = act_mat.size()
orig_dtype = act_mat.dtype

# reshape to 2D
act_mat = act_mat.reshape(-1, act_mat.shape[-1])

# groupwise int4 quantization
groupsize = weight_tensor.block_size[1]
y = torch.ops.aten._weight_int4pack_mm_with_scales_and_zeros(
act_mat, packed_weight, groupsize, scale, zero_point
)

# remove out_feature padding
assert weight_tensor.ndim == 2
orig_out_features = weight_tensor.shape[-2]
y = y[:, :orig_out_features]
y = y.reshape(*orig_act_size[:-1], orig_out_features)

if bias is not None:
y += bias
return y.to(orig_dtype)


Int4PlainInt32Tensor.__module__ = "torchao.quantization"

# Allow a model with Int4PlainInt32Tensor weights to be loaded with `weights_only=True`
torch.serialization.add_safe_globals([Int4PlainInt32Tensor])
Loading