diff --git a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_straight_through_grad.py b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_straight_through_grad.py index 9cdd40312cc..58be1bc2278 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/quantsim_straight_through_grad.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/quantsim_straight_through_grad.py @@ -44,7 +44,7 @@ from aimet_torch.tensor_factory_utils import constant_like if TYPE_CHECKING: - from aimet_torch.tensor_quantizer import LearnedGridTensorQuantizer + from aimet_torch.tensor_quantizer import LearnedGridTensorQuantizer # pylint:disable = cyclic-import @dataclass diff --git a/TrainingExtensions/torch/src/python/aimet_torch/tensor_quantizer.py b/TrainingExtensions/torch/src/python/aimet_torch/tensor_quantizer.py index acafed7efa6..d5333dde152 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/tensor_quantizer.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/tensor_quantizer.py @@ -855,7 +855,7 @@ def _set_encoding_min_max_parameters(self, encodings: Union[libpymo.TfEncoding, enc_min_param = self.name + '_encoding_min' enc_max_param = self.name + '_encoding_max' # TODO: refactor to not call internal state of wrapper - params = self.wrapper_ref._parameters + params = self.wrapper_ref._parameters # pylint:disable = no-member # TODO: Remove this check when encodings is always a sequence if isinstance(encodings, List): @@ -884,7 +884,7 @@ def freeze_encoding(self): enc_min_param = self.name + '_encoding_min' enc_max_param = self.name + '_encoding_max' # TODO: refactor to not call internal state of wrapper. - params = self.wrapper_ref._parameters + params = self.wrapper_ref._parameters # pylint:disable = no-member if params[enc_min_param] is None and params[enc_max_param] is None: raise RuntimeError("Encoding can be frozen only when it is not None.") diff --git a/TrainingExtensions/torch/src/python/aimet_torch/utils.py b/TrainingExtensions/torch/src/python/aimet_torch/utils.py index 2f16ee210d7..ec41717a0e2 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/utils.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/utils.py @@ -61,7 +61,7 @@ from aimet_common.utils import profile as _profile, deprecated, _red # pylint:disable = unused-import import aimet_common.libpymo as libpymo import aimet_torch.v1.nn.modules.custom as aimet_modules -from aimet_torch.tensor_quantizer import TensorQuantizer, StaticGridPerChannelQuantizer, StaticGridPerTensorQuantizer +from aimet_torch.tensor_quantizer import TensorQuantizer, StaticGridPerChannelQuantizer, StaticGridPerTensorQuantizer # pylint:disable = cyclic-import logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils) @@ -953,6 +953,7 @@ def get_all_quantizers(model: torch.nn.Module): :param model: Root module :returns: List of parameter, input, and output quantizers """ + # pylint:disable = cyclic-import from aimet_torch.v2.nn.base import BaseQuantizationMixin# pylint: disable=import-outside-toplevel from aimet_torch.qc_quantize_op import QcQuantizeWrapper# pylint: disable=import-outside-toplevel from aimet_torch.qc_quantize_recurrent import QcQuantizeRecurrent# pylint: disable=import-outside-toplevel diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v2/quantsim/quantsim.py b/TrainingExtensions/torch/src/python/aimet_torch/v2/quantsim/quantsim.py index 87652e8e22f..966acaddf31 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v2/quantsim/quantsim.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v2/quantsim/quantsim.py @@ -70,7 +70,7 @@ class QuantizationSimModel(V1QuantizationSimModel): """ Overriden QuantizationSimModel that does off-target quantization simulation using v2 quantsim blocks. """ - def __init__(self, # pylint: disable=too-many-arguments + def __init__(self, # pylint: disable=too-many-arguments, too-many-locals model: torch.nn.Module, dummy_input: Union[torch.Tensor, Tuple], quant_scheme: Union[str, QuantScheme] = None, # NOTE: Planned to be deprecated diff --git a/TrainingExtensions/torch/src/python/aimet_torch/v2/utils.py b/TrainingExtensions/torch/src/python/aimet_torch/v2/utils.py index 7f8d6e1d618..33b66a0224a 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/v2/utils.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/v2/utils.py @@ -282,7 +282,7 @@ def decorator(fn_or_cls: Callable): def _map_qmodule(modules, func): # pylint: disable=import-outside-toplevel - # pylint: disable=protected-access + # pylint: disable=protected-access, cyclic-import from aimet_torch.v2.nn import BaseQuantizationMixin contexts = [] ctx = _ContextManager(action=lambda: None, cleanup=lambda:[context._cleanup() for context in contexts])