From 0a9818032a1586a444ec0d03257182e491a10725 Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Thu, 14 Nov 2024 18:25:46 +0900 Subject: [PATCH] chore: Initialize shape key as non-empty string to validate no input tensor --- core/runtime/TRTEngine.h | 2 +- .../dynamo/runtime/_PythonTorchTensorRTModule.py | 2 +- py/torch_tensorrt/dynamo/runtime/_TorchTensorRTModule.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/runtime/TRTEngine.h b/core/runtime/TRTEngine.h index 41db51158b..af71d204a6 100644 --- a/core/runtime/TRTEngine.h +++ b/core/runtime/TRTEngine.h @@ -85,7 +85,7 @@ struct TRTEngine : torch::CustomClassHolder { at::cuda::CUDAStream caller_stream = c10::cuda::getDefaultCUDAStream(); std::vector input_buffers = {}; std::vector output_buffers = {}; - std::string shape_key; + std::string shape_key = "None"; bool cudagraphs_enabled = false; bool use_pre_allocated_outputs = true; std::vector pre_allocated_outputs; diff --git a/py/torch_tensorrt/dynamo/runtime/_PythonTorchTensorRTModule.py b/py/torch_tensorrt/dynamo/runtime/_PythonTorchTensorRTModule.py index afb67d1165..c856ff3be6 100644 --- a/py/torch_tensorrt/dynamo/runtime/_PythonTorchTensorRTModule.py +++ b/py/torch_tensorrt/dynamo/runtime/_PythonTorchTensorRTModule.py @@ -248,7 +248,7 @@ def create_output_tensors(self) -> List[torch.Tensor]: outputs.append(output) return outputs - def set_output_opt(self, enable: bool) -> None: + def set_pre_allocated_outputs(self, enable: bool) -> None: self.use_pre_allocated_outputs = enable def forward(self, *inputs: torch.Tensor) -> torch.Tensor | Tuple[torch.Tensor, ...]: diff --git a/py/torch_tensorrt/dynamo/runtime/_TorchTensorRTModule.py b/py/torch_tensorrt/dynamo/runtime/_TorchTensorRTModule.py index b3ec3258f0..1c9ef0a9c5 100644 --- a/py/torch_tensorrt/dynamo/runtime/_TorchTensorRTModule.py +++ b/py/torch_tensorrt/dynamo/runtime/_TorchTensorRTModule.py @@ -207,7 +207,7 @@ def setup_engine(self) -> None: if self.engine is not None: return self.engine = torch.classes.tensorrt.Engine(self._pack_engine_info()) - self.set_output_opt(True) + self.set_pre_allocated_outputs(False) def encode_metadata(self, metadata: Any) -> str: metadata = copy.deepcopy(metadata) @@ -272,7 +272,7 @@ def set_extra_state(self, state: SerializedTorchTensorRTModuleFmt) -> None: self.input_binding_names = state[2] self.output_binding_names = state[3] - def set_output_opt(self, enable: bool) -> None: + def set_pre_allocated_outputs(self, enable: bool) -> None: self.engine.use_pre_allocated_outputs = enable def forward(self, *inputs: Any) -> torch.Tensor | Tuple[torch.Tensor, ...]: