Skip to content

Commit

Permalink
make style for #10368 (#10370)
Browse files Browse the repository at this point in the history
* fix bug for torch.uint1-7 not support in torch<2.6

* up

---------

Co-authored-by: baymax591 <[email protected]>
  • Loading branch information
yiyixuxu and baymax591 authored Dec 24, 2024
1 parent c1e7fd5 commit 6dfaec3
Showing 1 changed file with 23 additions and 16 deletions.
39 changes: 23 additions & 16 deletions src/diffusers/quantizers/torchao/torchao_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from packaging import version

from ...utils import get_module_from_name, is_torch_available, is_torchao_available, logging
from ...utils import get_module_from_name, is_torch_available, is_torch_version, is_torchao_available, logging
from ..base import DiffusersQuantizer


Expand All @@ -35,21 +35,28 @@
import torch
import torch.nn as nn

SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION = (
# At the moment, only int8 is supported for integer quantization dtypes.
# In Torch 2.6, int1-int7 will be introduced, so this can be visited in the future
# to support more quantization methods, such as intx_weight_only.
torch.int8,
torch.float8_e4m3fn,
torch.float8_e5m2,
torch.uint1,
torch.uint2,
torch.uint3,
torch.uint4,
torch.uint5,
torch.uint6,
torch.uint7,
)
if is_torch_version(">=", "2.5"):
SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION = (
# At the moment, only int8 is supported for integer quantization dtypes.
# In Torch 2.6, int1-int7 will be introduced, so this can be visited in the future
# to support more quantization methods, such as intx_weight_only.
torch.int8,
torch.float8_e4m3fn,
torch.float8_e5m2,
torch.uint1,
torch.uint2,
torch.uint3,
torch.uint4,
torch.uint5,
torch.uint6,
torch.uint7,
)
else:
SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION = (
torch.int8,
torch.float8_e4m3fn,
torch.float8_e5m2,
)

if is_torchao_available():
from torchao.quantization import quantize_
Expand Down

0 comments on commit 6dfaec3

Please sign in to comment.