Skip to content

Commit

Permalink
[PT FE] Use logging instead of print (openvinotoolkit#26739)
Browse files Browse the repository at this point in the history
### Details:
 - *Use logging instead of print*
 - *Improve logs*

### Tickets:
 - *CVS-136015*
  • Loading branch information
mvafin authored Sep 23, 2024
1 parent 529350d commit 1e9768b
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 25 deletions.
19 changes: 14 additions & 5 deletions src/bindings/python/src/openvino/frontend/pytorch/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,12 @@
# flake8: noqa
# mypy: ignore-errors

import torch
from functools import partial
import logging
import torch

log = logging.getLogger(__name__)


# Wraps a single tensor to a module to prevent it from jit.freezing
# It depends on a tensor dtype whether it will be preserved from freezing. Refer to the decoder code to learn which types will be preserved.
Expand Down Expand Up @@ -168,13 +172,18 @@ def unpatch_model(model):
del m._openvino_u4_compression_submodule_qweights
del m._openvino_u4_compression_submodule_qzeros
except Exception as error:
print('[ WARNING ] Exception raised during GPTQ model unpatching. Depending on the exact issue it may lead to broken original model')
print(error)
log.warning("Exception raised during GPTQ model unpatching. "
"Depending on the exact issue it may lead to broken "
"original model.\n%s", error)


def detect_gptq_model_raw(model):
return model and getattr(model, 'config', None) and getattr(model.config, 'quantization_config', None) and model.config.quantization_config.quant_method == 'gptq'
return (model and getattr(model, 'config', None) and
getattr(model.config, 'quantization_config', None) and
model.config.quantization_config.quant_method == 'gptq')


def detect_gptq_model(model):
return detect_gptq_model_raw(model) or getattr(model, 'model', None) and detect_gptq_model_raw(model.model)
return (detect_gptq_model_raw(model) or
getattr(model, 'model', None) and
detect_gptq_model_raw(model.model))
15 changes: 10 additions & 5 deletions src/bindings/python/src/openvino/frontend/pytorch/patch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,12 @@
# flake8: noqa
# mypy: ignore-errors

import logging
import torch
from openvino.frontend.pytorch import ModuleExtension

log = logging.getLogger(__name__)


class no_jit_trace:
def __enter__(self):
Expand Down Expand Up @@ -65,8 +68,10 @@ def new_forward(*args, **kwargs):
for name, m in model.named_modules():
if hasattr(m, orig_forward_name):
# already patched, skipping with a warning because it is unexpected
print(f'[ WARNING ] Unexpectedly found already patched module {name} while applying ModuleExtension during PyTorch model conversion. '
'Result of the conversion maybe broken. Depending on the exact issue it may lead to broken original model.')
log.warning("Unexpectedly found already patched module %s while applying "
"ModuleExtension during PyTorch model conversion. "
"Result of the conversion maybe broken. Depending on the exact issue "
"it may lead to broken original model.", name)
continue
module_patcher(m, name)

Expand All @@ -78,9 +83,9 @@ def unpatch_model(model, orig_forward_name):
m.forward = getattr(m, orig_forward_name)
delattr(m, orig_forward_name)
except Exception as error:
print('[ WARNING ] Exception raised during model unpatching. Depending on the exact issue it may lead to broken original model.')
print('Original exception details:')
print(error)
log.warning("Exception raised during model unpatching. "
"Depending on the exact issue it may lead to broken original model.\n"
"Original exception details:\n%s", error)


def __make_16bit_traceable(model: torch.nn.Module):
Expand Down
33 changes: 19 additions & 14 deletions src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@
graph_has_ops,
)
from openvino.runtime import opset11 as ops
from openvino.frontend.pytorch import gptq
from openvino.frontend.pytorch import patch_model
from openvino.frontend.pytorch import gptq, patch_model
from openvino.frontend.pytorch.module_extension import ModuleExtension

import inspect
import logging
import typing
import torch
import inspect

log = logging.getLogger(__name__)


class TorchScriptPythonDecoder(Decoder):
Expand Down Expand Up @@ -57,19 +59,21 @@ def __init__(
except Exception as e:
if example_input is not None:
msg = "tracing"
help_msg = "Please check correctness of provided 'example_input'. "
"Sometimes models can be converted in scripted mode, please try running "
"conversion without 'example_input'."
help_msg = "Please check correctness of provided 'example_input'. " \
"Sometimes models can be converted in scripted mode, please try running " \
"conversion without 'example_input'.\n"
else:
msg = "scripting"
help_msg = "\nTracing sometimes provide better results, please provide valid 'example_input' argument."
help_msg = "Tracing sometimes provide better results, " \
"please provide valid 'example_input' argument.\n"
raise RuntimeError(
f"Couldn't get TorchScript module by {msg}. With exception:\n{e}\n{help_msg} "
f"Couldn't get TorchScript module by {msg}.\n{help_msg} "
"You can also provide TorchScript module that you obtained"
" yourself, please refer to PyTorch documentation: "
"https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html."
)
) from e
self.graph_element = pt_module.inlined_graph
log.debug("Inlined graph:\n%s", pt_module.inlined_graph)
self.alias_db = self.graph_element.alias_db()
else:
self.graph_element = graph_element
Expand Down Expand Up @@ -121,7 +125,8 @@ def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False)
if example_inputs is None:
if self.module_extensions:
raise RuntimeError(
"ModuleExtension is not supported for scripting. Please provide valid example_input argument to run tracing.")
"ModuleExtension is not supported for scripting. "
"Please provide valid example_input argument to run tracing.")
scripted = torch.jit.script(pt_module)
freeze_by_default = True
else:
Expand All @@ -140,10 +145,10 @@ def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False)
gptq.patch_model(pt_module)
gptq_patched = True
except Exception as error:
print(
"[ WARNING ] Failed patching of AutoGPTQ model. Error message:\n", error)
print(
"[ WARNING ] Tracing of the model will likely be unsuccessful or incorrect")
log.warning(
"Failed patching of AutoGPTQ model. Error message:\n%s"
"\nTracing of the model will likely be unsuccessful or incorrect",
error)
gptq.unpatch_model(pt_module)
gptq_patched = False

Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/translate_session.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
fw_tensor_id);

#ifdef ENABLE_OPENVINO_DEBUG
const auto out_type = context.get_output_type(i);
const auto out_type = simplified_type_interpret(context.get_output_type(i));
if (out_type.is<element::Type>()) {
if (!converted_outputs[i].get_element_type().compatible(out_type.as<element::Type>())) {
OPENVINO_DEBUG("[WARNING] Produced output type for operation ",
Expand Down

0 comments on commit 1e9768b

Please sign in to comment.