diff --git a/backends/arm/_passes/decompose_embedding_pass.py b/backends/arm/_passes/decompose_embedding_pass.py index a87b26366d7..158d1f824d7 100644 --- a/backends/arm/_passes/decompose_embedding_pass.py +++ b/backends/arm/_passes/decompose_embedding_pass.py @@ -17,7 +17,6 @@ from .arm_pass_utils import create_node, get_first_fake_tensor logger = logging.getLogger(__name__) -logger.setLevel(logging.WARNING) class DecomposeEmbeddingPass(ArmPass): diff --git a/backends/arm/ethosu/backend.py b/backends/arm/ethosu/backend.py index c2feab6478b..9a2d24a9684 100644 --- a/backends/arm/ethosu/backend.py +++ b/backends/arm/ethosu/backend.py @@ -63,7 +63,7 @@ def _compile_tosa_flatbuffer( binary = vela_compile( tosa_flatbuffer, compile_flags, - verbose=logger.getEffectiveLevel() == logging.INFO, + verbose=logger.getEffectiveLevel() <= logging.INFO, intermediate_path=compile_spec.get_intermediate_path(), ) return binary diff --git a/backends/arm/operator_support/right_shift_support.py b/backends/arm/operator_support/right_shift_support.py index 82c4387fc85..7670edec0a9 100644 --- a/backends/arm/operator_support/right_shift_support.py +++ b/backends/arm/operator_support/right_shift_support.py @@ -48,5 +48,5 @@ def is_node_tosa_supported( """ # TODO MLETORCH-525 Remove warning if tosa_spec.is_U55_subset: - logging.warning(f"{node.target} may introduce one-off errors.") + logger.warning(f"{node.target} may introduce one-off errors.") return True diff --git a/backends/arm/operator_support/slice_copy_support.py b/backends/arm/operator_support/slice_copy_support.py index 14ca505635c..b6c11bb73c7 100644 --- a/backends/arm/operator_support/slice_copy_support.py +++ b/backends/arm/operator_support/slice_copy_support.py @@ -32,6 +32,6 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification) -> args = node.args if len(args) == 5 and (step := args[4]) != 1: - logging.warning(f"{node.target} with step size of {step} not supported.") + logger.warning(f"{node.target} with step size of {step} not supported.") return False return True diff --git a/backends/arm/test/conftest.py b/backends/arm/test/conftest.py index 8a08c74efc4..99e9afc01b4 100644 --- a/backends/arm/test/conftest.py +++ b/backends/arm/test/conftest.py @@ -3,10 +3,8 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import logging import os import random -import sys from typing import Any import pytest @@ -29,8 +27,6 @@ def pytest_configure(config): if config.option.arm_run_tosa_version: pytest._test_options["tosa_version"] = config.option.arm_run_tosa_version - logging.basicConfig(level=logging.INFO, stream=sys.stdout) - def pytest_collection_modifyitems(config, items): pass diff --git a/backends/arm/test/misc/test_debug_feats.py b/backends/arm/test/misc/test_debug_feats.py index 40dccc4197e..6e961457db4 100644 --- a/backends/arm/test/misc/test_debug_feats.py +++ b/backends/arm/test/misc/test_debug_feats.py @@ -23,7 +23,6 @@ ) from executorch.backends.test.harness.stages import StageType - input_t1 = Tuple[torch.Tensor] # Input x @@ -261,14 +260,14 @@ def test_dump_tosa_debug_tosa(test_data: input_t1): @common.parametrize("test_data", Linear.inputs) -def test_dump_tosa_ops(caplog, test_data: input_t1): +def test_dump_tosa_ops(capsys, test_data: input_t1): aten_ops: list[str] = [] exir_ops: list[str] = [] pipeline = TosaPipelineINT[input_t1](Linear(), test_data, aten_ops, exir_ops) pipeline.pop_stage("run_method_and_compare_outputs") pipeline.dump_operator_distribution("to_edge_transform_and_lower") pipeline.run() - assert "TOSA operators:" in caplog.text + assert "TOSA operators:" in capsys.readouterr().out class Add(torch.nn.Module): @@ -282,7 +281,7 @@ def forward(self, x): @common.parametrize("test_data", Add.inputs) @common.XfailIfNoCorstone300 -def test_fail_dump_tosa_ops(caplog, test_data: input_t1): +def test_fail_dump_tosa_ops(capsys, test_data: input_t1): aten_ops: list[str] = [] exir_ops: list[str] = [] pipeline = EthosU55PipelineINT[input_t1]( @@ -290,4 +289,7 @@ def test_fail_dump_tosa_ops(caplog, test_data: input_t1): ) pipeline.dump_operator_distribution("to_edge_transform_and_lower") pipeline.run() - assert "Can not get operator distribution for Vela command stream." in caplog.text + assert ( + "Can not get operator distribution for Vela command stream." + in capsys.readouterr().out + ) diff --git a/backends/arm/test/models/test_deit_tiny_arm.py b/backends/arm/test/models/test_deit_tiny_arm.py index 22685a079bd..f6dc5884688 100644 --- a/backends/arm/test/models/test_deit_tiny_arm.py +++ b/backends/arm/test/models/test_deit_tiny_arm.py @@ -23,7 +23,6 @@ from torchvision import transforms logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) deit_tiny = timm.models.deit.deit_tiny_patch16_224(pretrained=True) diff --git a/backends/arm/test/runner_utils.py b/backends/arm/test/runner_utils.py index ae1fc136ce7..f0a1c540e3d 100644 --- a/backends/arm/test/runner_utils.py +++ b/backends/arm/test/runner_utils.py @@ -763,11 +763,11 @@ def run_tosa_graph( if isinstance(tosa_version, Tosa_1_00): import tosa_reference_model as reference_model # type: ignore[import-untyped] - debug_mode = "ALL" if logger.level <= logging.DEBUG else None + debug_mode = "ALL" if logger.getEffectiveLevel() <= logging.DEBUG else None outputs_np, status = reference_model.run( graph, inputs_np, - verbosity=_tosa_refmodel_loglevel(logger.level), + verbosity=_tosa_refmodel_loglevel(logger.getEffectiveLevel()), initialize_variable_tensor_from_numpy=True, debug_mode=debug_mode, ) diff --git a/backends/arm/test/tester/analyze_output_utils.py b/backends/arm/test/tester/analyze_output_utils.py index 9bea6337655..3bcac603a9e 100644 --- a/backends/arm/test/tester/analyze_output_utils.py +++ b/backends/arm/test/tester/analyze_output_utils.py @@ -312,11 +312,8 @@ def dump_error_output( if __name__ == "__main__": - import sys - logging.basicConfig(stream=sys.stdout, level=logging.INFO) - - """ This is expected to produce the example output of print_diff""" + """This is expected to produce the example output of print_diff""" torch.manual_seed(0) a = torch.rand(3, 3, 2, 2) * 0.01 b = a.clone().detach() diff --git a/backends/arm/test/tester/arm_tester.py b/backends/arm/test/tester/arm_tester.py index 1d9ee42c19e..047f2a4c003 100644 --- a/backends/arm/test/tester/arm_tester.py +++ b/backends/arm/test/tester/arm_tester.py @@ -832,7 +832,7 @@ def _dump_str(to_print: str, path_to_dump: Optional[str] = None): with open(path_to_dump, "a") as fp: fp.write(to_print) else: - logger.info(to_print) + print(to_print) def _format_dict(to_print: dict, print_table: bool = True) -> str: