From 6b2f6b89d3fbe79b947c62b4e0de65d9c9012123 Mon Sep 17 00:00:00 2001 From: ivkalgin Date: Tue, 6 Feb 2024 03:53:59 +0600 Subject: [PATCH] Apply suggestions from code review (docstrings) Co-authored-by: Arseny <82811840+senysenyseny16@users.noreply.github.com> --- README.md | 2 +- src/ti_vit/attention.py | 2 +- src/ti_vit/export.py | 6 +++--- src/ti_vit/mlp.py | 2 +- src/ti_vit/model.py | 8 ++++---- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index b72fc2d..dd6a7aa 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # TI-ViT -The repository contains script for export pytorch VIT model to onnx format in form that compatible with +The repository contains script for exporting PyTorch VIT model to ONNX format in the form that compatible with [edgeai-tidl-tools](https://github.com/TexasInstruments/edgeai-tidl-tools) (version 8.6.0.5). ## Installation diff --git a/src/ti_vit/attention.py b/src/ti_vit/attention.py index e67ee43..d41b71b 100644 --- a/src/ti_vit/attention.py +++ b/src/ti_vit/attention.py @@ -117,7 +117,7 @@ def from_module( attention_type: AttentionType = AttentionType.CONV_CONV, ) -> "TICompatibleAttention": """ - Create TI compatible attention block from common Vit attention block. + Create TI compatible attention block from common ViT attention block. Parameters ---------- diff --git a/src/ti_vit/export.py b/src/ti_vit/export.py index 9b76f48..04b79e9 100644 --- a/src/ti_vit/export.py +++ b/src/ti_vit/export.py @@ -24,11 +24,11 @@ def export( Parameters ---------- output_onnx_path : Union[str, Path] - Path to the output onnx. + Path to the output ONNX file. model_type : str Type of the final model. Possible values are "npu-max-acc", "npu-max-perf" or "cpu". checkpoint_path : Optional[Union[str, Path]] = None - Path to the pytorch model checkpoint. If value is None, then ViT_B_16 pretrained torchvision model is used. + Path to the PyTorch model checkpoint. If value is None, then ViT_B_16 pretrained torchvision model is used. Default value is None. resolution : int Resolution of input image. Default value is 224. @@ -89,7 +89,7 @@ def export_ti_compatible_vit() -> None: # pylint: disable=missing-function-docs "--checkpoint", type=str, required=False, - help="Path to the Vit checkpoint (optional argument). By default we download the torchvision checkpoint " + help="Path to the ViT checkpoint (optional argument). By default torchvision checkpoint is downloaded." "(VIT_B_16).", default=None, ) diff --git a/src/ti_vit/mlp.py b/src/ti_vit/mlp.py index 78fe03e..37f8184 100644 --- a/src/ti_vit/mlp.py +++ b/src/ti_vit/mlp.py @@ -117,7 +117,7 @@ def from_module( gelu_approx_type: GeluApproximationType = GeluApproximationType.NONE, ) -> "TICompatibleMLP": """ - Create TI compatible MLP block from common Vit MLP block. + Create TI compatible MLP block from common ViT MLP block. Parameters ---------- diff --git a/src/ti_vit/model.py b/src/ti_vit/model.py index 6ad9179..01399e9 100644 --- a/src/ti_vit/model.py +++ b/src/ti_vit/model.py @@ -59,14 +59,14 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: # pylint: disable=missing-f class TICompatibleVitOrtMaxPerf(_TICompatibleVit): - """TI compatible Vit model with maximal performance.""" + """TI compatible ViT model with maximum performance.""" def __init__(self, model: VisionTransformer, ignore_tidl_errors: bool = False): """ Parameters ---------- model : VisionTransformer - Source Vit model. + Source ViT model. ignore_tidl_errors : bool Experimental option. """ @@ -95,14 +95,14 @@ def _mlp_perf_block_cfg() -> _BlockCfg: class TICompatibleVitOrtMaxAcc(_TICompatibleVit): - """TI compatible Vit model with minimal accuracy drop.""" + """TI compatible ViT model with minimal accuracy drop.""" def __init__(self, model: VisionTransformer): """ Parameters ---------- model : VisionTransformer - Source Vit model. + Source ViT model. """ super().__init__( model=model,