forked from dusty-nv/jetson-inference
-
Notifications
You must be signed in to change notification settings - Fork 8
/
tensorNet.h
120 lines (98 loc) · 2.91 KB
/
tensorNet.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/*
* http://github.com/dusty-nv/jetson-inference
*/
#ifndef __TENSOR_NET_H__
#define __TENSOR_NET_H__
#include "NvInfer.h"
#include "NvCaffeParser.h"
#include <sstream>
/**
* Abstract class for loading a tensor network with TensorRT.
* For example implementations, @see imageNet and @see detectNet
*/
class tensorNet
{
public:
/**
* Destory
*/
virtual ~tensorNet();
/**
* Load a new network instance
* @param prototxt File path to the deployable network prototxt
* @param model File path to the caffemodel
* @param mean File path to the mean value binary proto (NULL if none)
*/
bool LoadNetwork( const char* prototxt, const char* model, const char* mean=NULL,
const char* input_blob="data", const char* output_blob="prob");
/**
* Load a new network instance with multiple output layers
* @param prototxt File path to the deployable network prototxt
* @param model File path to the caffemodel
* @param mean File path to the mean value binary proto (NULL if none)
*/
bool LoadNetwork( const char* prototxt, const char* model, const char* mean,
const char* input_blob, const std::vector<std::string>& output_blobs);
/**
* Query for half-precision FP16 support.
*/
inline bool HasFP16() const { return mEnableFP16; }
protected:
/**
* Constructor.
*/
tensorNet();
/**
* Create and output an optimized network model
* @note this function is automatically used by LoadNetwork, but also can
* be used individually to perform the network operations offline.
* @param deployFile name for network prototxt
* @param modelFile name for model
* @param outputs network outputs
* @param maxBatchSize maximum batch size
* @param modelStream output model stream
*/
bool ProfileModel( const std::string& deployFile, const std::string& modelFile,
const std::vector<std::string>& outputs,
uint32_t maxBatchSize, std::ostream& modelStream);
/**
* Prefix used for tagging printed log output
*/
#define LOG_GIE "[GIE] "
/**
* Logger class for GIE info/warning/errors
*/
class Logger : public nvinfer1::ILogger
{
void log( Severity severity, const char* msg ) override
{
if( severity != Severity::kINFO )
printf(LOG_GIE "%s\n", msg);
}
} gLogger;
/* Member Variables */
std::string mPrototxtPath;
std::string mModelPath;
std::string mMeanPath;
std::string mInputBlobName;
nvinfer1::IRuntime* mInfer;
nvinfer1::ICudaEngine* mEngine;
nvinfer1::IExecutionContext* mContext;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mInputSize;
float* mInputCPU;
float* mInputCUDA;
bool mEnableFP16;
nvinfer1::Dims3 mInputDims;
struct outputLayer
{
std::string name;
nvinfer1::Dims3 dims;
uint32_t size;
float* CPU;
float* CUDA;
};
std::vector<outputLayer> mOutputs;
};
#endif