This repository has been archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 51
/
Copy pathIENetwork.h
65 lines (57 loc) · 2.21 KB
/
IENetwork.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#ifndef __DEVICE_PLUGIN_H
#define __DEVICE_PLUGIN_H
#include <ie_cnn_network.h>
#include <ie_core.hpp>
#include <ie_executable_network.hpp>
#include <ie_infer_request.hpp>
#include <ie_input_info.hpp>
#include <vector>
#include "utils.h"
// #include "ie_blob.h"
// #include "ie_common.h"
// #include "ie_core.hpp"
// #include "inference_engine.hpp"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace nnhal {
class IIENetwork {
public:
virtual ~IIENetwork() {}
virtual bool loadNetwork() = 0;
virtual InferenceEngine::InferRequest getInferRequest() = 0;
virtual void infer() = 0;
virtual void queryState() = 0;
virtual InferenceEngine::TBlob<float>::Ptr getBlob(const std::string& outName) = 0;
virtual void prepareInput(InferenceEngine::Precision precision,
InferenceEngine::Layout layout) = 0;
virtual void prepareOutput(InferenceEngine::Precision precision,
InferenceEngine::Layout layout) = 0;
virtual void setBlob(const std::string& inName,
const InferenceEngine::Blob::Ptr& inputBlob) = 0;
};
// Abstract this class for all accelerators
class IENetwork : public IIENetwork {
private:
std::shared_ptr<InferenceEngine::CNNNetwork> mNetwork;
InferenceEngine::ExecutableNetwork mExecutableNw;
InferenceEngine::InferRequest mInferRequest;
InferenceEngine::InputsDataMap mInputInfo;
InferenceEngine::OutputsDataMap mOutputInfo;
public:
IENetwork() : IENetwork(nullptr) {}
IENetwork(std::shared_ptr<InferenceEngine::CNNNetwork> network) : mNetwork(network) {}
virtual bool loadNetwork();
void prepareInput(InferenceEngine::Precision precision, InferenceEngine::Layout layout);
void prepareOutput(InferenceEngine::Precision precision, InferenceEngine::Layout layout);
void setBlob(const std::string& inName, const InferenceEngine::Blob::Ptr& inputBlob);
InferenceEngine::TBlob<float>::Ptr getBlob(const std::string& outName);
InferenceEngine::InferRequest getInferRequest() { return mInferRequest; }
void queryState() {}
void infer();
};
} // namespace nnhal
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif