Skip to content

Commit

Permalink
Merge pull request #59 from NVIDIA/python_api
Browse files Browse the repository at this point in the history
Implementation of a Python API for TRTorch
  • Loading branch information
narendasan authored May 17, 2020
2 parents 227dea3 + 639c2a3 commit 5f84977
Show file tree
Hide file tree
Showing 379 changed files with 84,670 additions and 1,354 deletions.
11 changes: 10 additions & 1 deletion .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# limitations under the License.
#
# File: DL4AGX/.bazelrc
# Description: Default bazel settings and toolchain configuration
# Description: Default bazel settings and toolchain configuration
##########################################################################

# +------------------------------------------------------------+
Expand All @@ -24,3 +24,12 @@
build --cxxopt="-fdiagnostics-color=always"
build --cxxopt='-std=c++14'
#build --linkopt="-Wl,--no-as-needed"


build:python --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
build:python --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
build:python --define=abi=pre_cxx11_abi

build:pre_cxx11_abi --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
build:pre_cxx11_abi --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
build:pre_cxx11_abi --define=abi=pre_cxx11_abi
12 changes: 10 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,13 @@ tests/accuracy/datasets/data/*
*.tar.gz
*.tgz
docsrc/_build
docsrc/_api
docsrc/_tmp
docsrc/_cpp_api
docsrc/_tmp
*.so
__pycache__
*.egg-info
dist
bdist
py/trtorch/_version.py
py/wheelhouse
py/.eggs
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,16 @@ A tarball with the include files and library can then be found in bazel-bin
bazel run //cpp/trtorchexec -- $(realpath <PATH TO GRAPH>) <input-size>
```

## Compiling the Python Package

To compile the python package for your local machine, just run `python3 setup.py install` in the `//py` directory.
To build wheel files for different python versions, first build the Dockerfile in ``//py`` then run the following
command
```
docker run -it -v$(pwd)/..:/workspace/TRTorch build_trtorch_wheel /bin/bash /workspace/TRTorch/py/build_whl.sh
```
Python compilation expects using the tarball based compilation strategy from above.

## How do I add support for a new op...

### In TRTorch?
Expand Down
42 changes: 32 additions & 10 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@ workspace(name = "TRTorch")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")


http_archive(
git_repository(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
remote = "https://github.com/bazelbuild/rules_python.git",
commit = "4fcc24fd8a850bdab2ef2e078b1de337eea751a6",
shallow_since = "1589292086 -0400"
)

load("@rules_python//python:repositories.bzl", "py_repositories")
py_repositories()
# Only needed if using the packaging rules.
load("@rules_python//python:pip.bzl", "pip_repositories", "pip_import")

load("@rules_python//python:pip.bzl", "pip_repositories", "pip3_import")
pip_repositories()

http_archive(
Expand All @@ -32,6 +32,14 @@ new_local_repository(
build_file = "@//third_party/cuda:BUILD",
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
sha256 = "ea8de17c5f70015583f3a7a43c7a5cdf91a1d4bd19a6a7bc11f074ef6cd69e27",
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.5.0.zip"],
)

http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
Expand All @@ -40,20 +48,34 @@ http_archive(
sha256 = "0efdd4e709ab11088fa75f0501c19b0e294404231442bab1d1fb953924feb6b5"
)

pip3_import(
name = "trtorch_py_deps",
requirements = "//py:requirements.txt"
)

load("@trtorch_py_deps//:requirements.bzl", "pip_install")
pip_install()

pip3_import(
name = "py_test_deps",
requirements = "//tests/py:requirements.txt"
)

load("@py_test_deps//:requirements.bzl", "pip_install")
pip_install()

# Downloaded distributions to use with --distdir
http_archive(
name = "cudnn",
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/7.6.5.32/Production/10.2_20191118/cudnn-10.2-linux-x64-v7.6.5.32.tgz",],

urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/7.6.5.32/Production/10.2_20191118/cudnn-10.2-linux-x64-v7.6.5.32.tgz"],
build_file = "@//third_party/cudnn/archive:BUILD",
sha256 = "600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20",
strip_prefix = "cuda"
)

http_archive(
name = "tensorrt",
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.0/7.0.0.11/tars/TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz",],

urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.0/7.0.0.11/tars/TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz"],
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "c7d73b2585b18aae68b740249efa8c8ba5ae852abe9a023720595432a8eb4efd",
strip_prefix = "TensorRT-7.0.0.11"
Expand Down
13 changes: 11 additions & 2 deletions core/BUILD
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
package(default_visibility = ["//visibility:public"])

config_setting(
name = "use_pre_cxx11_abi",
values = {
"define": "abi=pre_cxx11_abi",
}
)

cc_library(
name = "core",
hdrs = [
Expand All @@ -13,9 +20,11 @@ cc_library(
"//core/execution",
"//core/lowering",
"//core/util/logging",
"@libtorch//:libtorch",
"@tensorrt//:nvinfer"
],
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
alwayslink=True,
)

Expand Down
14 changes: 11 additions & 3 deletions core/conversion/BUILD
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
package(default_visibility = ["//visibility:public"])

config_setting(
name = "use_pre_cxx11_abi",
values = {
"define": "abi=pre_cxx11_abi",
}
)

cc_library(
name = "conversion",
hdrs = [
Expand All @@ -8,17 +15,18 @@ cc_library(
srcs = [
"conversion.cpp",
"conversion_blacklist.cpp",
"string_to_type_lut.cpp",
"InterfaceTypes.cpp"
],
deps = [
"@tensorrt//:nvinfer",
"@libtorch//:libtorch",
"//core/conversion/conversionctx",
"//core/conversion/converters",
"//core/conversion/evaluators",
"//core/util:prelude"
]
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
)

load("@rules_pkg//:pkg.bzl", "pkg_tar")
Expand Down
13 changes: 6 additions & 7 deletions core/conversion/InterfaceTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ namespace conversion {
GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs,
std::vector<at::Tensor> params) {
GraphParams named_params;
auto type_lut = torch::jit::script::string_to_type_lut();
auto param_it = params.begin();
for (auto in : inputs) {
if (in->type() != type_lut["Tensor"] \
if (in->type() != c10::TensorType::get() \
&& in->isCompleteTensor() && param_it != params.end()) {
named_params[in] = *param_it;
++param_it;
Expand All @@ -35,7 +34,7 @@ InputRange::InputRange(std::vector<int64_t> d) {
min = util::toDims(d);
max = util::toDims(d);
input_shape = util::toDims(d);

}


Expand All @@ -48,14 +47,14 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_
sizes.insert(min_shape.size());
sizes.insert(opt_shape.size());
sizes.insert(max_shape.size());

if (sizes.size() != 1) {
LOG_ERROR("Expected all input sizes have the same dimensions, but found dimensions: min(" \
<< min_shape.size() << "), opt("
<< opt_shape.size() << "), max("
<< max_shape.size() << ")");
}

min = util::toDimsPad(min_shape, 4);
opt = util::toDimsPad(opt_shape, 4);
max = util::toDimsPad(max_shape, 4);
Expand All @@ -72,9 +71,9 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_
dyn_shape.push_back(opt_shape[i]);
}
}

input_shape = util::toDimsPad(dyn_shape, 4);

}

} // namespace conversion
Expand Down
4 changes: 1 addition & 3 deletions core/conversion/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,14 @@ void AddInputs(ConversionCtx* ctx,
at::ArrayRef<const torch::jit::Value*> inputs,
std::vector<InputRange>& input_dims) {

auto type_lut = torch::jit::script::string_to_type_lut();
std::vector<const torch::jit::Value*> input_tensors;
for (auto in : inputs) {
// Disregarding inputs that are not tensors
//
// Ex.
// self.1:__torch__.alexnet -> ignored
// input.1:Tensor -> used
auto pt_type = in->type();
if (pt_type == type_lut["Tensor"]) {
if (in->type()->isSubtypeOf(c10::TensorType::get()) && ctx->evaluated_value_map.find(in) == ctx->evaluated_value_map.end()) {
input_tensors.push_back(in);
}
}
Expand Down
8 changes: 0 additions & 8 deletions core/conversion/conversion.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,6 @@
#include "torch/csrc/jit/ir/ir.h"
#include "core/conversion/conversionctx/ConversionCtx.h"

namespace torch {
namespace jit {
namespace script {
const std::unordered_map<std::string, c10::TypePtr>& string_to_type_lut();
}
}
}

namespace trtorch {
namespace core {
namespace conversion {
Expand Down
1 change: 1 addition & 0 deletions core/conversion/conversion_blacklist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ const std::unordered_set<std::string>& get_non_convertable_nodes() {
"prim::device",
"prim::GetAttr",
"prim::CallMethod",
"prim::Drop",
"aten:dropout",
};
return nonconvertable_nodes;
Expand Down
13 changes: 11 additions & 2 deletions core/conversion/conversionctx/BUILD
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
package(default_visibility = ["//visibility:public"])

config_setting(
name = "use_pre_cxx11_abi",
values = {
"define": "abi=pre_cxx11_abi",
}
)

cc_library(
name = "conversionctx",
hdrs = [
Expand All @@ -10,9 +17,11 @@ cc_library(
],
deps = [
"@tensorrt//:nvinfer",
"@libtorch//:libtorch",
"//core/util:prelude",
]
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
)

load("@rules_pkg//:pkg.bzl", "pkg_tar")
Expand Down
15 changes: 12 additions & 3 deletions core/conversion/converters/BUILD
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
package(default_visibility = ["//visibility:public"])

config_setting(
name = "use_pre_cxx11_abi",
values = {
"define": "abi=pre_cxx11_abi",
}
)

cc_library(
name = "converters",
hdrs = [
Expand All @@ -24,11 +31,13 @@ cc_library(
"impl/unary.cpp",
],
deps = [
"@libtorch//:libtorch",
"@tensorrt//:nvinfer",
"//core/util:prelude",
"//core/conversion/conversionctx"
],
"//core/conversion/conversionctx",
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
alwayslink = True,
)

Expand Down
13 changes: 11 additions & 2 deletions core/conversion/evaluators/BUILD
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
package(default_visibility = ["//visibility:public"])

config_setting(
name = "use_pre_cxx11_abi",
values = {
"define": "abi=pre_cxx11_abi",
}
)

cc_library(
name = "evaluators",
hdrs = [
Expand All @@ -10,9 +17,11 @@ cc_library(
"prim.cpp",
],
deps = [
"@libtorch//:libtorch",
"//core/util:prelude",
],
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
alwayslink = True,
)

Expand Down
Loading

0 comments on commit 5f84977

Please sign in to comment.