Skip to content

Commit

Permalink
Add an option to dump textual pipeline during compilation
Browse files Browse the repository at this point in the history
  • Loading branch information
jhalakpatel committed Oct 17, 2024
1 parent 5e4f406 commit b559e1d
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ MLIR_CAPI_EXPORTED MTRT_Status mtrtStableHloToExecutableOptionsCreateFromArgs(
MLIR_CAPI_EXPORTED MTRT_Status mtrtStableHloToExecutableOptionsSetDebugOptions(
MTRT_StableHLOToExecutableOptions options, bool enableDebugging,
const char **debugTypes, size_t debugTypeSizes,
const char *dumpIrTreeDir = nullptr, const char *dumpTensorRTDir = nullptr);
const char *dumpIrTreeDir = nullptr, const char *dumpTensorRTDir = nullptr,
bool dumpTextualPipeline = false);

/// Sets the layer metadata callback. The `userData` argument is passed along
/// to the callback when it is invoked.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,16 @@ struct DebugOptions {
/// `-debug-types=...` from the command line.
mlir::SmallVector<std::string> llvmDebugTypes = {};

/// Dump textual pipeline passes.
bool dumpTextualPipeline = false;

void addToOptions(mlir::OptionsContext &context) {
context.addOption("mlir-print-ir-tree-dir", dumpIRPath, llvm::cl::init(""));
context.addOption("debug", enableLLVMDebugFlag);
context.addList<std::string>("debug-only", llvmDebugTypes,
llvm::cl::ZeroOrMore,
llvm::cl::CommaSeparated);
context.addOption("dump-textual-pipeline", dumpTextualPipeline);
}
};

Expand Down
3 changes: 2 additions & 1 deletion mlir-tensorrt/compiler/lib/CAPI/Compiler/Compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ MTRT_Status mtrtStableHloToExecutableOptionsCreateFromArgs(
MTRT_Status mtrtStableHloToExecutableOptionsSetDebugOptions(
MTRT_StableHLOToExecutableOptions options, bool enableDebugging,
const char **debugTypes, size_t debugTypeSizes, const char *dumpIrTreeDir,
const char *dumpTensorRTDir) {
const char *dumpTensorRTDir, bool dumpTextualPipeline) {

StableHLOToExecutableOptions *cppOpts = unwrap(options);
cppOpts->debugOptions.enableLLVMDebugFlag = enableDebugging;
Expand All @@ -195,6 +195,7 @@ MTRT_Status mtrtStableHloToExecutableOptionsSetDebugOptions(
if (dumpIrTreeDir)
cppOpts->debugOptions.dumpIRPath = std::string(dumpIrTreeDir);

cppOpts->debugOptions.dumpTextualPipeline = dumpTextualPipeline;
return mtrtStatusGetOk();
}

Expand Down
3 changes: 3 additions & 0 deletions mlir-tensorrt/compiler/lib/Compiler/StableHloToExecutable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,9 @@ StableHloToExecutableTask::compileStableHLOToExecutable(
runner = pm.get();
}

if (options.debugOptions.dumpTextualPipeline)
runner->printAsTextualPipeline(llvm::dbgs());

// Setup pass manager
if (failed(runner->run(module)))
return getInternalErrorStatus(
Expand Down
9 changes: 6 additions & 3 deletions mlir-tensorrt/python/bindings/Compiler/CompilerPyBind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,8 @@ PYBIND11_MODULE(_api, m) {
[](PyStableHLOToExecutableOptions &self, bool enabled,
std::vector<std::string> debugTypes,
std::optional<std::string> dumpIrTreeDir,
std::optional<std::string> dumpTensorRTDir) {
std::optional<std::string> dumpTensorRTDir,
std::optional<bool> dumpTextualPipeline) {
// The strings are copied by the CAPI call, so we just need to
// refence the C-strings temporarily.
std::vector<const char *> literals;
Expand All @@ -270,12 +271,14 @@ PYBIND11_MODULE(_api, m) {
THROW_IF_MTRT_ERROR(mtrtStableHloToExecutableOptionsSetDebugOptions(
self, enabled, literals.data(), literals.size(),
dumpIrTreeDir ? dumpIrTreeDir->c_str() : nullptr,
dumpTensorRTDir ? dumpTensorRTDir->c_str() : nullptr));
dumpTensorRTDir ? dumpTensorRTDir->c_str() : nullptr,
dumpTextualPipeline ? *dumpTextualPipeline : false));
},
py::arg("enabled"),
py::arg("debug_types") = std::vector<std::string>{},
py::arg("dump_ir_tree_dir") = py::none(),
py::arg("dump_tensorrt_dir") = py::none())
py::arg("dump_tensorrt_dir") = py::none(),
py::arg("dump_textual_pipeline") = py::none())

#ifdef MLIR_TRT_TARGET_TENSORRT
.def(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# RUN: %PYTHON %s 2>&1
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# REQUIRES: host-has-at-least-1-gpus
import os
import tempfile
Expand Down Expand Up @@ -39,6 +39,7 @@ def compile_asm(ASM):
"--tensorrt-strongly-typed=false",
"--debug=true",
"--debug-only=translate-to-tensorrt,stablehlo-clustering",
"--dump-textual-pipeline",
f"--mlir-print-ir-tree-dir={mlir_tree_path.name}",
f"--tensorrt-layer-info-dir={trt_path.name}",
f"--tensorrt-engines-dir={trt_path.name}",
Expand All @@ -50,3 +51,10 @@ def compile_asm(ASM):


compile_asm(ASM)

# CHECK: builtin.module
# CHECK: [translate-to-tensorrt] TranslateToTensorRTEnginePass is generating a new TensorRT builder
# CHECK: [translate-to-tensorrt] timing cache path was not specified, creating a fresh timing cache
# CHECK: [translate-to-tensorrt] deserializing TensorRT builder timing cache (0 bytes)
# CHECK: [translate-to-tensorrt] Setting builder optimization level to 3
# CHECK: [translate-to-tensorrt] replacing cache with updated data

0 comments on commit b559e1d

Please sign in to comment.