Skip to content

Commit 015a059

Browse files
I found a case where this is needed (#10875)
1 parent acfaa5c commit 015a059

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

comfy/quant_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -405,8 +405,8 @@ def quantize(cls, tensor, scale=None, dtype=torch.float8_e4m3fn):
405405

406406
tensor_scaled = tensor * (1.0 / scale).to(tensor.dtype)
407407
# TODO: uncomment this if it's actually needed because the clamp has a small performance penality'
408-
# lp_amax = torch.finfo(dtype).max
409-
# torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled)
408+
lp_amax = torch.finfo(dtype).max
409+
torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled)
410410
qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format)
411411

412412
layout_params = {

0 commit comments

Comments
 (0)