Skip to content

Commit

Permalink
Resolve pylint failures
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Tuttle <[email protected]>
  • Loading branch information
quic-mtuttle authored Oct 10, 2024
1 parent 5fa55b3 commit 6b79aaf
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
from aimet_torch.tensor_factory_utils import constant_like

if TYPE_CHECKING:
from aimet_torch.tensor_quantizer import LearnedGridTensorQuantizer
from aimet_torch.tensor_quantizer import LearnedGridTensorQuantizer # pylint:disable = cyclic-import


@dataclass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -855,7 +855,7 @@ def _set_encoding_min_max_parameters(self, encodings: Union[libpymo.TfEncoding,
enc_min_param = self.name + '_encoding_min'
enc_max_param = self.name + '_encoding_max'
# TODO: refactor to not call internal state of wrapper
params = self.wrapper_ref._parameters
params = self.wrapper_ref._parameters # pylint:disable = no-member

# TODO: Remove this check when encodings is always a sequence
if isinstance(encodings, List):
Expand Down Expand Up @@ -884,7 +884,7 @@ def freeze_encoding(self):
enc_min_param = self.name + '_encoding_min'
enc_max_param = self.name + '_encoding_max'
# TODO: refactor to not call internal state of wrapper.
params = self.wrapper_ref._parameters
params = self.wrapper_ref._parameters # pylint:disable = no-member

if params[enc_min_param] is None and params[enc_max_param] is None:
raise RuntimeError("Encoding can be frozen only when it is not None.")
Expand Down
3 changes: 2 additions & 1 deletion TrainingExtensions/torch/src/python/aimet_torch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
from aimet_common.utils import profile as _profile, deprecated, _red # pylint:disable = unused-import
import aimet_common.libpymo as libpymo
import aimet_torch.v1.nn.modules.custom as aimet_modules
from aimet_torch.tensor_quantizer import TensorQuantizer, StaticGridPerChannelQuantizer, StaticGridPerTensorQuantizer
from aimet_torch.tensor_quantizer import TensorQuantizer, StaticGridPerChannelQuantizer, StaticGridPerTensorQuantizer # pylint:disable = cyclic-import

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)

Expand Down Expand Up @@ -953,6 +953,7 @@ def get_all_quantizers(model: torch.nn.Module):
:param model: Root module
:returns: List of parameter, input, and output quantizers
"""
# pylint:disable = cyclic-import
from aimet_torch.v2.nn.base import BaseQuantizationMixin# pylint: disable=import-outside-toplevel
from aimet_torch.qc_quantize_op import QcQuantizeWrapper# pylint: disable=import-outside-toplevel
from aimet_torch.qc_quantize_recurrent import QcQuantizeRecurrent# pylint: disable=import-outside-toplevel
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class QuantizationSimModel(V1QuantizationSimModel):
"""
Overriden QuantizationSimModel that does off-target quantization simulation using v2 quantsim blocks.
"""
def __init__(self, # pylint: disable=too-many-arguments
def __init__(self, # pylint: disable=too-many-arguments, too-many-locals
model: torch.nn.Module,
dummy_input: Union[torch.Tensor, Tuple],
quant_scheme: Union[str, QuantScheme] = None, # NOTE: Planned to be deprecated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def decorator(fn_or_cls: Callable):

def _map_qmodule(modules, func):
# pylint: disable=import-outside-toplevel
# pylint: disable=protected-access
# pylint: disable=protected-access, cyclic-import
from aimet_torch.v2.nn import BaseQuantizationMixin
contexts = []
ctx = _ContextManager(action=lambda: None, cleanup=lambda:[context._cleanup() for context in contexts])
Expand Down

0 comments on commit 6b79aaf

Please sign in to comment.