Skip to content

Commit

Permalink
22.02 onnx-TensorRT release (#801)
Browse files Browse the repository at this point in the history
Signed-off-by: Kevin Chen <[email protected]>
  • Loading branch information
kevinch-nv authored Feb 4, 2022
1 parent e9456d5 commit 8b6144a
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 42 deletions.
76 changes: 42 additions & 34 deletions ConditionalHelpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,14 +115,14 @@ Status addConditionalInputIfNeeded(IImporterContext* ctx, nvinfer1::IIfCondition

// Add IConditionalInputLayers to `layer`'s inputs.
Status addIfInputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditional, InputsMap& inputsMap,
const ::ONNX_NAMESPACE::GraphProto& subgraph, const std::vector<nvinfer1::ILayer*>& newLayers)
const std::vector<nvinfer1::ILayer*>& newLayers)
{
// Find all of the tensors entering the subgraph.
// The node-names are from the ONNX context.
using NodeName = std::string;
using InputIndex = int32_t;
std::unordered_map<NodeName, std::set<InputIndex>> subgraphInputsMap;
getSubgraphInputs(subgraph, subgraphInputsMap);
getSubgraphInputs(newLayers, subgraphInputsMap);

// Add a ConditionalInputLayer in front of each input that is external to the subgraph.
for (const auto& layer : newLayers)
Expand Down Expand Up @@ -158,10 +158,10 @@ Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* condit

std::vector<std::string> thenReportedOutputs;
getReportedOutputs(thenGraph, thenReportedOutputs);
getSubgraphOutputs(thenGraph, thenOutputs, thenReportedOutputs);
getSubgraphOutputs(thenLayers, thenOutputs, thenReportedOutputs);
std::vector<std::string> elseReportedOutputs;
getReportedOutputs(thenGraph, elseReportedOutputs);
getSubgraphOutputs(elseGraph, elseOutputs, elseReportedOutputs);
getReportedOutputs(elseGraph, elseReportedOutputs);
getSubgraphOutputs(elseLayers, elseOutputs, elseReportedOutputs);

// Retrieve the output tensors of a subgraph (tensors exiting the subgraph).
auto getSubgraphOutputTensors
Expand Down Expand Up @@ -212,59 +212,67 @@ Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* condit
}

// Given a subgraph, find all of its external inputs/outputs (tensors entering/exiting the subgraph).
Status getSubgraphTensors(const ::ONNX_NAMESPACE::GraphProto& graph,
Status getSubgraphTensors(const std::vector<nvinfer1::ILayer*>& newLayers,
std::unordered_map<std::string, std::set<int32_t>>& externalOutputs, bool extractOutputs,
const std::vector<std::string>* reportedOutputs = nullptr)
{
std::vector<size_t> topoOrder;
ASSERT(toposort(graph.node(), &topoOrder) && "Failed to sort the model topologically.", ErrorCode::kINVALID_GRAPH);
using NodeName = std::string;
using TensorName = std::string;
using PortIndex = int32_t;
using Port = std::pair<NodeName, PortIndex>;
std::unordered_set<TensorName> outputTensors;
std::unordered_set<TensorName> inputTensors;
using TensorsSet = std::unordered_set<nvinfer1::ITensor*>;
TensorsSet outputTensors;
TensorsSet inputTensors;

// To determine which tensors are entering or exiting the given graph, we first collect the sets of all input and
// output tensors. Then we categorize the tensors according to this logic:
// Entering tensors := {inputs} - {outputs}
// Exiting tensors := {outputs} - {inputs}

// Collect all input and output tensors belonging to nodes in the graph.
for (const auto& nodeIndex : topoOrder)
{
const auto& node = graph.node(nodeIndex);
for (const auto& outputName : node.output())
{
outputTensors.insert(outputName);
}
for (const auto& inputName : node.input())

auto getTensors = [](nvinfer1::ILayer const* l, bool const input, auto inserter) {
auto const count = input ? l->getNbInputs() : l->getNbOutputs();
for (int32_t i = 0; i < count; i++)
{
inputTensors.insert(inputName);
inserter(input ? l->getInput(i) : l->getOutput(i));
}
};

for (const auto& l : newLayers)
{
getTensors(l, false, [&](nvinfer1::ITensor* t) { outputTensors.insert(t); });
getTensors(l, true, [&](nvinfer1::ITensor* t) { inputTensors.insert(t); });
}

using NodeProto = const ::ONNX_NAMESPACE::NodeProto;
auto getOutputs = [](NodeProto& node) { return node.output(); };
auto getInputs = [](NodeProto& node) { return node.input(); };
using TensorsVec = std::vector<nvinfer1::ITensor*>;
auto getOutputs = [&](nvinfer1::ILayer const* l, TensorsVec res) {
getTensors(l, false, [&](nvinfer1::ITensor* t) { res.emplace_back(t); });
};

auto getInputs = [&](nvinfer1::ILayer const* l, TensorsVec res) {
getTensors(l, true, [&](nvinfer1::ITensor* t) { res.emplace_back(t); });
};

// Retrieve the list of tensors either exiting or entering the subgraph.
std::unordered_map<TensorName, std::vector<Port>> externalPortsMap;
auto filterTensors = [&](std::unordered_set<TensorName> tensors, auto nodeAccessor) {
for (const auto& nodeIndex : topoOrder)
auto filterTensors = [&](TensorsSet const& tensors, auto getNodeAccessor) {
for (nvinfer1::ILayer const* l : newLayers)
{
const auto& node = graph.node(nodeIndex);
const auto& nodeName = getNodeName(node);
const auto& nodeName = l->getName();
PortIndex i = 0;

for (const auto& tensorName : nodeAccessor(node))
TensorsVec nodeAccessor;
getNodeAccessor(l, nodeAccessor);
for (const auto& tensor : nodeAccessor)
{
if (tensorName.empty())
if (tensor == nullptr)
{
continue;
}
if (tensors.count(tensorName) == 0)
if (tensors.count(tensor) == 0)
{
TensorName tensorName = tensor->getName();
auto prefixFound = false;
if (reportedOutputs)
{
Expand Down Expand Up @@ -314,17 +322,17 @@ Status getSubgraphTensors(const ::ONNX_NAMESPACE::GraphProto& graph,
return Status::success();
}

Status getSubgraphOutputs(const ::ONNX_NAMESPACE::GraphProto& graph,
Status getSubgraphOutputs(const std::vector<nvinfer1::ILayer*>& newLayers,
std::unordered_map<std::string, std::set<int32_t>>& externalOutputs,
const std::vector<std::string>& reportedOutputs)
{
return getSubgraphTensors(graph, externalOutputs, true, &reportedOutputs);
return getSubgraphTensors(newLayers, externalOutputs, true, &reportedOutputs);
}

Status getSubgraphInputs(
const ::ONNX_NAMESPACE::GraphProto& graph, std::unordered_map<std::string, std::set<int32_t>>& externalInputs)
Status getSubgraphInputs(const std::vector<nvinfer1::ILayer*>& newLayers,
std::unordered_map<std::string, std::set<int32_t>>& externalInputs)
{
return getSubgraphTensors(graph, externalInputs, false);
return getSubgraphTensors(newLayers, externalInputs, false);
}

} // namespace onnx2trt
7 changes: 4 additions & 3 deletions ConditionalHelpers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@ namespace onnx2trt
// The result is returned in `subgraphInputs`, which is a map indexed by layer-name and with values indicating a set
// of external input indices.
Status getSubgraphInputs(
const ::ONNX_NAMESPACE::GraphProto& graph, std::unordered_map<std::string, std::set<int32_t>>& subgraphInputs);
const std::vector<nvinfer1::ILayer*>& newLayers,
std::unordered_map<std::string, std::set<int32_t>>& subgraphInputs);

// Given a subgraph, find all of its external outputs (tensors exiting the subgraph).
// The result is returned in `subgraphInputs`, which is a map indexed by layer-name and with values indicating a set
// of external outputs indices.
Status getSubgraphOutputs(const ::ONNX_NAMESPACE::GraphProto& graph,
Status getSubgraphOutputs(const std::vector<nvinfer1::ILayer*>& newLayers,
std::unordered_map<std::string, std::set<int32_t>>& subgraphOutputs,
const std::vector<std::string>& reportedOutputs);

Expand All @@ -40,7 +41,7 @@ using InputsMap = std::unordered_map<std::string, nvinfer1::IIfConditionalInputL

// Add IIfConditionalInputLayers to the inputs of the subgraph indicated by `subgraph`.
onnx2trt::Status addIfInputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditional, InputsMap& inputsMap,
const ::ONNX_NAMESPACE::GraphProto& subgraph, const std::vector<nvinfer1::ILayer*>& newLayers);
const std::vector<nvinfer1::ILayer*>& newLayers);

// Add IIfConditionalOutputLayers to the outputs of the subgraph indicated by `subgraph`.
onnx2trt::Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditional,
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ For press and other inquiries, please contact Hector Marinez at hmarinez@nvidia.

## Supported TensorRT Versions

Development on the Master branch is for the latest version of [TensorRT 8.2.1.8](https://developer.nvidia.com/nvidia-tensorrt-download) with full-dimensions and dynamic shape support.
Development on the Master branch is for the latest version of [TensorRT 8.2.3.0](https://developer.nvidia.com/nvidia-tensorrt-download) with full-dimensions and dynamic shape support.

For previous versions of TensorRT, refer to their respective branches.

Expand Down Expand Up @@ -48,8 +48,8 @@ Current supported ONNX operators are found in the [operator support matrix](docs
### Dependencies

- [Protobuf >= 3.0.x](https://github.com/google/protobuf/releases)
- [TensorRT 8.2.1.8](https://developer.nvidia.com/tensorrt)
- [TensorRT 8.2.1.8 open source libaries (master branch)](https://github.com/NVIDIA/TensorRT/)
- [TensorRT 8.2.3.0](https://developer.nvidia.com/tensorrt)
- [TensorRT 8.2.3.0 open source libaries (master branch)](https://github.com/NVIDIA/TensorRT/)

### Building

Expand Down
4 changes: 2 additions & 2 deletions builtin_op_importers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2170,8 +2170,8 @@ DEFINE_BUILTIN_OP_IMPORTER(If)

using InputsMap = std::unordered_map<std::string, nvinfer1::IIfConditionalInputLayer*>;
InputsMap inputsMap;
CHECK(addIfInputLayers(ctx, conditional, inputsMap, thenGraph, thenLayers));
CHECK(addIfInputLayers(ctx, conditional, inputsMap, elseGraph, elseLayers));
CHECK(addIfInputLayers(ctx, conditional, inputsMap, thenLayers));
CHECK(addIfInputLayers(ctx, conditional, inputsMap, elseLayers));
CHECK(addIfOutputLayers(ctx, conditional, thenGraph, thenLayers, elseGraph, elseLayers, graphOutputs));

return {graphOutputs};
Expand Down
4 changes: 4 additions & 0 deletions docs/Changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

# ONNX-TensorRT Changelog

## 22.02 Container Release - 2021-02-03
### Fixes
- Fixed naming issue in parsing `If` conditonal graphs

## TensorRT 8.2 GA Release - 2021-11-23

### Added
Expand Down

0 comments on commit 8b6144a

Please sign in to comment.