Skip to content

[resubmit] Gemlite fix #1435

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion test/integration/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -958,11 +958,20 @@ def test_gemlite_layout(self, device, dtype):
self._test_lin_weight_subclass_api_impl(
api,
device,
15,
15,
test_shape=test_shape,
test_dtype=dtype,
)

# test that shapes with non divisible by 128 shapes aren't causing errors
self._test_lin_weight_subclass_api_impl(
lambda mod: quantize_(mod, gemlite_uintx_weight_only(None, 4, 32)),
device,
15,
test_shape=[1, 1025, 513],
test_dtype=dtype,
)


@parameterized.expand(COMMON_DEVICE_DTYPE)
@unittest.skipIf(not TORCH_VERSION_AT_LEAST_2_3, "int4 requires torch nightly.")
Expand Down
15 changes: 15 additions & 0 deletions torchao/dtypes/uintx/gemlite_layout.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from dataclasses import dataclass
from typing import Dict, Optional, Tuple

Expand Down Expand Up @@ -76,6 +77,15 @@ def apply_gemlite_quant(
out_features, in_features = weight.shape
group_size = in_features if group_size is None else group_size

if in_features % 128 != 0 and out_features % 128 != 0:
warnings.simplefilter("once", UserWarning)
warnings.warn(
"Gemlite only works for layers with in_features or out_features divisible by 128, "
+ "some layers have been skipped",
UserWarning,
)
return weight

quant_kwargs = get_gemlite_quant_kwargs(bit_width, group_size)

layout = GemlitePackedLayout(
Expand Down Expand Up @@ -173,6 +183,11 @@ def from_plain(
exhaustive=False,
use_cuda_graph=False,
)
if _layout.group_size is None and _layout.bit_width == 4:
from gemlite.core import GEMLITE_ACC_DTYPE
from gemlite.dtypes import DType

GEMLITE_ACC_DTYPE[DType.FP16] = DType.FP32

out_features, in_features = int_data.shape
input_dtype, output_dtype = DType.FP16, DType.FP16
Expand Down
Loading