diff --git a/src/bindings/python/src/openvino/frontend/pytorch/gptq.py b/src/bindings/python/src/openvino/frontend/pytorch/gptq.py index 2095d624434f5c..0beb7f12ebf601 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/gptq.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/gptq.py @@ -4,8 +4,12 @@ # flake8: noqa # mypy: ignore-errors -import torch from functools import partial +import logging +import torch + +log = logging.getLogger(__name__) + # Wraps a single tensor to a module to prevent it from jit.freezing # It depends on a tensor dtype whether it will be preserved from freezing. Refer to the decoder code to learn which types will be preserved. @@ -168,13 +172,18 @@ def unpatch_model(model): del m._openvino_u4_compression_submodule_qweights del m._openvino_u4_compression_submodule_qzeros except Exception as error: - print('[ WARNING ] Exception raised during GPTQ model unpatching. Depending on the exact issue it may lead to broken original model') - print(error) + log.warning("Exception raised during GPTQ model unpatching. " + "Depending on the exact issue it may lead to broken " + "original model.\n%s", error) def detect_gptq_model_raw(model): - return model and getattr(model, 'config', None) and getattr(model.config, 'quantization_config', None) and model.config.quantization_config.quant_method == 'gptq' + return (model and getattr(model, 'config', None) and + getattr(model.config, 'quantization_config', None) and + model.config.quantization_config.quant_method == 'gptq') def detect_gptq_model(model): - return detect_gptq_model_raw(model) or getattr(model, 'model', None) and detect_gptq_model_raw(model.model) + return (detect_gptq_model_raw(model) or + getattr(model, 'model', None) and + detect_gptq_model_raw(model.model)) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py b/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py index 173ec572cb20ae..908a7f8660a94c 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py @@ -4,9 +4,12 @@ # flake8: noqa # mypy: ignore-errors +import logging import torch from openvino.frontend.pytorch import ModuleExtension +log = logging.getLogger(__name__) + class no_jit_trace: def __enter__(self): @@ -65,8 +68,10 @@ def new_forward(*args, **kwargs): for name, m in model.named_modules(): if hasattr(m, orig_forward_name): # already patched, skipping with a warning because it is unexpected - print(f'[ WARNING ] Unexpectedly found already patched module {name} while applying ModuleExtension during PyTorch model conversion. ' - 'Result of the conversion maybe broken. Depending on the exact issue it may lead to broken original model.') + log.warning("Unexpectedly found already patched module %s while applying " + "ModuleExtension during PyTorch model conversion. " + "Result of the conversion maybe broken. Depending on the exact issue " + "it may lead to broken original model.", name) continue module_patcher(m, name) @@ -78,9 +83,9 @@ def unpatch_model(model, orig_forward_name): m.forward = getattr(m, orig_forward_name) delattr(m, orig_forward_name) except Exception as error: - print('[ WARNING ] Exception raised during model unpatching. Depending on the exact issue it may lead to broken original model.') - print('Original exception details:') - print(error) + log.warning("Exception raised during model unpatching. " + "Depending on the exact issue it may lead to broken original model.\n" + "Original exception details:\n%s", error) def __make_16bit_traceable(model: torch.nn.Module): diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 3a082d8153b7e0..28bd6bbd2dfbb0 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -16,13 +16,15 @@ graph_has_ops, ) from openvino.runtime import opset11 as ops -from openvino.frontend.pytorch import gptq -from openvino.frontend.pytorch import patch_model +from openvino.frontend.pytorch import gptq, patch_model from openvino.frontend.pytorch.module_extension import ModuleExtension +import inspect +import logging import typing import torch -import inspect + +log = logging.getLogger(__name__) class TorchScriptPythonDecoder(Decoder): @@ -57,19 +59,21 @@ def __init__( except Exception as e: if example_input is not None: msg = "tracing" - help_msg = "Please check correctness of provided 'example_input'. " - "Sometimes models can be converted in scripted mode, please try running " - "conversion without 'example_input'." + help_msg = "Please check correctness of provided 'example_input'. " \ + "Sometimes models can be converted in scripted mode, please try running " \ + "conversion without 'example_input'.\n" else: msg = "scripting" - help_msg = "\nTracing sometimes provide better results, please provide valid 'example_input' argument." + help_msg = "Tracing sometimes provide better results, " \ + "please provide valid 'example_input' argument.\n" raise RuntimeError( - f"Couldn't get TorchScript module by {msg}. With exception:\n{e}\n{help_msg} " + f"Couldn't get TorchScript module by {msg}.\n{help_msg} " "You can also provide TorchScript module that you obtained" " yourself, please refer to PyTorch documentation: " "https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html." - ) + ) from e self.graph_element = pt_module.inlined_graph + log.debug("Inlined graph:\n%s", pt_module.inlined_graph) self.alias_db = self.graph_element.alias_db() else: self.graph_element = graph_element @@ -121,7 +125,8 @@ def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False) if example_inputs is None: if self.module_extensions: raise RuntimeError( - "ModuleExtension is not supported for scripting. Please provide valid example_input argument to run tracing.") + "ModuleExtension is not supported for scripting. " + "Please provide valid example_input argument to run tracing.") scripted = torch.jit.script(pt_module) freeze_by_default = True else: @@ -140,10 +145,10 @@ def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False) gptq.patch_model(pt_module) gptq_patched = True except Exception as error: - print( - "[ WARNING ] Failed patching of AutoGPTQ model. Error message:\n", error) - print( - "[ WARNING ] Tracing of the model will likely be unsuccessful or incorrect") + log.warning( + "Failed patching of AutoGPTQ model. Error message:\n%s" + "\nTracing of the model will likely be unsuccessful or incorrect", + error) gptq.unpatch_model(pt_module) gptq_patched = False diff --git a/src/frontends/pytorch/src/translate_session.cpp b/src/frontends/pytorch/src/translate_session.cpp index d1e8f3d418571d..91eeb52cd6f5cf 100644 --- a/src/frontends/pytorch/src/translate_session.cpp +++ b/src/frontends/pytorch/src/translate_session.cpp @@ -211,7 +211,7 @@ std::shared_ptr TranslateSession::convert_pytorch_model( fw_tensor_id); #ifdef ENABLE_OPENVINO_DEBUG - const auto out_type = context.get_output_type(i); + const auto out_type = simplified_type_interpret(context.get_output_type(i)); if (out_type.is()) { if (!converted_outputs[i].get_element_type().compatible(out_type.as())) { OPENVINO_DEBUG("[WARNING] Produced output type for operation ",