diff --git a/BasePreparedModel.cpp b/BasePreparedModel.cpp index b18ed0fa1..3c2513aff 100644 --- a/BasePreparedModel.cpp +++ b/BasePreparedModel.cpp @@ -21,6 +21,7 @@ #include #include #include "ExecutionBurstServer.h" +#include "Utils.h" #include "ValidateHal.h" #define DISABLE_ALL_QUANT @@ -49,7 +50,7 @@ T getScalarData(const RunTimeOperandInfo& info) { return data[0]; } -bool BasePreparedModel::initialize() { +bool BasePreparedModel::initialize(const Model& model) { ALOGV("Entering %s", __func__); return true; } @@ -64,6 +65,11 @@ static Return notify(const sp& callback, const E return callback->notify_1_2(status, outputShapes, timing); } +static Return notify(const sp& callback, const ErrorStatus& status, + const hidl_vec& outputShapes, Timing timing) { + return callback->notify_1_3(convertToV1_3(status), outputShapes, timing); +} + static void floatToUint8(const float* src, uint8_t* dst, size_t size) { for (uint32_t i = 0; i < size; ++i) { dst[i] = static_cast(src[i]); @@ -99,7 +105,7 @@ Return executeBase(const Request& request, MeasureTiming measure, ALOGE("invalid callback passed to execute"); return ErrorStatus::INVALID_ARGUMENT; } - if (!validateRequest(request, preparedModel->getModelInfo()->getModel())) { + if (!validateRequest(request, convertToV1_2(preparedModel->getModelInfo()->getModel()))) { notify(callback, ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); return ErrorStatus::INVALID_ARGUMENT; } @@ -337,8 +343,8 @@ static std::tuple, Timing> executeSynch .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; return {ErrorStatus::NONE, modelInfo->getOutputShapes(), timing}; } - return {ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming}; ALOGV("Exiting %s", __func__); + return {ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming}; } Return BasePreparedModel::executeSynchronously(const Request& request, MeasureTiming measure, @@ -347,7 +353,7 @@ Return BasePreparedModel::executeSynchronously(const Request& request, Mea time_point driverStart; if (measure == MeasureTiming::YES) driverStart = now(); - if (!validateRequest(request, mModelInfo->getModel())) { + if (!validateRequest(request, convertToV1_2(mModelInfo->getModel()))) { cb(ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); return Void(); } @@ -358,6 +364,25 @@ Return BasePreparedModel::executeSynchronously(const Request& request, Mea return Void(); } +Return BasePreparedModel::executeSynchronously_1_3( + const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) { + ALOGV("Entering %s", __func__); + time_point driverStart; + if (measure == MeasureTiming::YES) driverStart = now(); + + if (!validateRequest(convertToV1_0(request), convertToV1_2(mModelInfo->getModel()))) { + cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); + return Void(); + } + auto [status, outputShapes, timing] = + executeSynchronouslyBase(convertToV1_0(request), measure, this, driverStart); + cb(convertToV1_3(status), std::move(outputShapes), timing); + ALOGV("Exiting %s", __func__); + return Void(); +} + Return BasePreparedModel::configureExecutionBurst( const sp& callback, const MQDescriptorSync& requestChannel, @@ -373,6 +398,7 @@ Return BasePreparedModel::configureExecutionBurst( cb(ErrorStatus::NONE, burst); ALOGI("%s burst created", __func__); } + ALOGV("Exiting %s", __func__); return Void(); } @@ -388,6 +414,26 @@ Return BasePreparedModel::execute_1_2(const Request& request, Measu return executeBase(request, measure, this, callback); } +Return BasePreparedModel::execute_1_3( + const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const sp& callback) { + ALOGV("Entering %s", __func__); + return convertToV1_3(executeBase(convertToV1_0(request), measure, this, callback)); +} + +Return BasePreparedModel::executeFenced( + const V1_3::Request& request, const hidl_vec& waitFor, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) { + ALOGV("Entering %s", __func__); + // TODO: Add support + ALOGV("Exiting %s", __func__); + return Void(); +} + } // namespace nnhal } // namespace neuralnetworks } // namespace hardware diff --git a/BasePreparedModel.h b/BasePreparedModel.h old mode 100644 new mode 100755 index aa2cf6f13..4ee0eabfe --- a/BasePreparedModel.h +++ b/BasePreparedModel.h @@ -19,6 +19,10 @@ #include #include +#include +#include +#include +#include #include #include #include @@ -48,7 +52,7 @@ template using vec = std::vector; typedef uint8_t* memory; -class BasePreparedModel : public V1_2::IPreparedModel { +class BasePreparedModel : public V1_3::IPreparedModel { public: BasePreparedModel(const Model& model) : mTargetDevice(IntelDeviceType::CPU) { mModelInfo = std::make_shared(model); @@ -63,15 +67,28 @@ class BasePreparedModel : public V1_2::IPreparedModel { const sp& callback) override; Return execute_1_2(const Request& request, MeasureTiming measure, const sp& callback) override; - Return executeSynchronously(const Request& request, MeasureTiming measure, + Return execute_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const sp& callback) override; + Return executeSynchronously(const Request& request, V1_2::MeasureTiming measure, executeSynchronously_cb cb) override; + Return executeSynchronously_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure, + const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + executeSynchronously_1_3_cb cb) override; Return configureExecutionBurst( const sp& callback, const MQDescriptorSync& requestChannel, const MQDescriptorSync& resultChannel, configureExecutionBurst_cb cb) override; + Return executeFenced(const V1_3::Request& request, const hidl_vec& waitFor, + V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline, + const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const V1_3::OptionalTimeoutDuration& duration, + executeFenced_cb cb) override; - virtual bool initialize(); + virtual bool initialize(const Model& model); std::shared_ptr getModelInfo() { return mModelInfo; } diff --git a/Driver.cpp b/Driver.cpp index 9eb80e75c..a20590c7c 100644 --- a/Driver.cpp +++ b/Driver.cpp @@ -15,6 +15,7 @@ */ #include "Driver.h" +#include #include #include @@ -33,17 +34,18 @@ namespace nnhal { using namespace android::nn; -hidl_vec nonExtensionOperandPerformance(PerformanceInfo perf) { - using OpPerf = Capabilities::OperandPerformance; +hidl_vec nonExtensionOperandPerformanceV1_2( + V1_0::PerformanceInfo perf) { + using OpPerf = V1_2::Capabilities::OperandPerformance; // Note: range presents enumerators in declaration order, not in numerical order. - static constexpr ::android::hardware::hidl_enum_range kOperandTypeRange; + static constexpr ::android::hardware::hidl_enum_range kOperandTypeRange; hidl_vec ret(kOperandTypeRange.end() - kOperandTypeRange.begin()); std::transform(kOperandTypeRange.begin(), kOperandTypeRange.end(), ret.begin(), - [perf](OperandType type) { - return Capabilities::OperandPerformance{type, perf}; + [perf](V1_2::OperandType type) { + return V1_2::Capabilities::OperandPerformance{(type), perf}; }); std::sort(ret.begin(), ret.end(), [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); @@ -51,38 +53,110 @@ hidl_vec nonExtensionOperandPerformance(Perfor return ret; } +hidl_vec nonExtensionOperandPerformance( + V1_0::PerformanceInfo perf) { + using OpPerf = Capabilities::OperandPerformance; + + // Note: range presents enumerators in declaration order, not in numerical order. + static constexpr hidl_enum_range kOperandTypeRange; + + std::vector ret; + ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); + for (OperandType type : kOperandTypeRange) { + if (static_cast(type) != OperandType::SUBGRAPH) { + ret.push_back(OpPerf{type, perf}); + } + } + std::sort(ret.begin(), ret.end(), + [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); + hidl_vec ret1; + ret1 = ret; + + return ret1; +} + +static sp ModelFactory(IntelDeviceType deviceType, const Model& model) { + sp driverPreparedModel = NULL; + + if (deviceType == IntelDeviceType::CPU) + driverPreparedModel = new CpuPreparedModel(model); + else if (deviceType == IntelDeviceType::GNA) + driverPreparedModel = new GnaPreparedModel(model); + return driverPreparedModel; +} // For HAL-1.0 version Return Driver::getCapabilities(getCapabilities_cb cb) { ALOGV("Entering %s", __func__); - - return Void(); + return getCapabilities_1_3( + [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) { + cb(convertToV1_0(error), convertToV1_0(capabilities)); + }); } Return Driver::getSupportedOperations(const V1_0_Model& model, getSupportedOperations_cb cb) { ALOGV("Entering %s", __func__); - - return Void(); + if (!validateModel(model)) { + ALOGE("NNERR: %s failed at line no: %d\n", __func__, __LINE__); + cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); + return Void(); + } + return getSupportedOperations_1_3( + convertToV1_3(model), [&](V1_3::ErrorStatus status, const hidl_vec& supported) { + cb(convertToV1_0(status), supported); + }); } Return Driver::prepareModel(const V1_0_Model& model, const sp& callback) { ALOGV("Entering %s", __func__); + if (callback.get() == nullptr) { + ALOGE("invalid callback passed to prepareModel"); + return ErrorStatus::INVALID_ARGUMENT; + } + if (!validateModel(model)) { + callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); + return ErrorStatus::INVALID_ARGUMENT; + } + sp driverPreparedModel = ModelFactory(mDeviceType, convertToV1_3(model)); + if (driverPreparedModel == NULL) { + ALOGE("failed to create preparedmodel"); + return ErrorStatus::INVALID_ARGUMENT; + } + for (auto opn : model.operations) dumpOperation(opn); + + if (!driverPreparedModel->initialize(convertToV1_3(model))) { + ALOGE("failed to initialize preparedmodel"); + callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); + return ErrorStatus::NONE; + } + + callback->notify(ErrorStatus::NONE, driverPreparedModel); + ALOGV("Exiting %s", __func__); return ErrorStatus::NONE; } // For HAL-1.1 version Return Driver::getCapabilities_1_1(getCapabilities_1_1_cb cb) { ALOGV("Entering %s", __func__); - - return Void(); + return getCapabilities_1_3( + [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) { + cb(convertToV1_0(error), convertToV1_1(capabilities)); + }); } Return Driver::getSupportedOperations_1_1(const V1_1_Model& model, getSupportedOperations_1_1_cb cb) { ALOGV("Entering %s", __func__); - - return Void(); + if (!validateModel(model)) { + ALOGE("NNERR: %s failed at line no: %d\n", __func__, __LINE__); + cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); + return Void(); + } + return getSupportedOperations_1_3( + convertToV1_3(model), [&](V1_3::ErrorStatus status, const hidl_vec& supported) { + cb(convertToV1_0(status), supported); + }); } Return Driver::prepareModel_1_1(const V1_1_Model& model, @@ -90,6 +164,30 @@ Return Driver::prepareModel_1_1(const V1_1_Model& model, const sp& callback) { ALOGV("Entering %s", __func__); + if (callback.get() == nullptr) { + ALOGE("invalid callback passed to prepareModel"); + return ErrorStatus::INVALID_ARGUMENT; + } + if (!validateModel(model) || !validateExecutionPreference(preference)) { + callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); + return ErrorStatus::INVALID_ARGUMENT; + } + + sp driverPreparedModel = ModelFactory(mDeviceType, convertToV1_3(model)); + if (driverPreparedModel == NULL) { + ALOGE("failed to create preparedmodel"); + return ErrorStatus::INVALID_ARGUMENT; + } + for (auto opn : model.operations) dumpOperation(opn); + + if (!driverPreparedModel->initialize(convertToV1_3(model))) { + ALOGE("failed to initialize preparedmodel"); + callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); + return ErrorStatus::NONE; + } + + callback->notify(ErrorStatus::NONE, driverPreparedModel); + ALOGV("Exiting %s", __func__); return ErrorStatus::NONE; } @@ -99,72 +197,49 @@ Return Driver::getCapabilities_1_2(getCapabilities_1_2_cb cb) { if (mDeviceType == IntelDeviceType::CPU) { ALOGI("CPU driver getCapabilities()"); // Setting operandPerformance value to base value for all operand types - Capabilities capabilities = { + V1_2::Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.9f, .powerUsage = 0.9f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.9f, .powerUsage = 0.9f}, - .operandPerformance = nonExtensionOperandPerformance({0.9f, 0.9f})}; + .operandPerformance = nonExtensionOperandPerformanceV1_2({0.9f, 0.9f})}; ALOGI("CPU MKLDNN driver Capabilities .execTime = 0.9f, .powerUsage = 0.9f"); cb(ErrorStatus::NONE, capabilities); } else if (mDeviceType == IntelDeviceType::GPU) { ALOGI("GPU driver getCapabilities()"); - Capabilities capabilities = { + V1_2::Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.95f, .powerUsage = 0.85f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.95f, .powerUsage = 0.85f}, - .operandPerformance = nonExtensionOperandPerformance({0.95f, 0.95f})}; + .operandPerformance = nonExtensionOperandPerformanceV1_2({0.95f, 0.95f})}; ALOGI("GPU clDNN driver Capabilities .execTime = 0.95f, .powerUsage = 0.85f"); cb(ErrorStatus::NONE, capabilities); } else if (mDeviceType == IntelDeviceType::GNA) { ALOGI("GPU driver getCapabilities()"); - Capabilities capabilities = { + V1_2::Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.8f, .powerUsage = 0.8f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.8f, .powerUsage = 0.8f}, - .operandPerformance = nonExtensionOperandPerformance({0.8f, 0.8f})}; + .operandPerformance = nonExtensionOperandPerformanceV1_2({0.8f, 0.8f})}; ALOGI("GPU clDNN driver Capabilities .execTime = 0.95f, .powerUsage = 0.85f"); cb(ErrorStatus::NONE, capabilities); } else if (mDeviceType == IntelDeviceType::VPU) { ALOGI("Myriad driver getCapabilities()"); - Capabilities capabilities = { + V1_2::Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.1f, .powerUsage = 1.1f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.1f, .powerUsage = 1.1f}, - .operandPerformance = nonExtensionOperandPerformance({1.1f, 1.1f})}; + .operandPerformance = nonExtensionOperandPerformanceV1_2({1.1f, 1.1f})}; - ALOGI("Myriad driver Capabilities .execTime = 1.1f, .powerUsage = 1.1f"); + ALOGI("Driver Capabilities .execTime = 1.1f, .powerUsage = 1.1f"); cb(ErrorStatus::NONE, capabilities); } else { - Capabilities capabilities; + V1_2::Capabilities capabilities; cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); } ALOGV("Exiting %s", __func__); return Void(); } -Return Driver::getStatus() { - ALOGI("DeviceStatus::AVAILABLE"); - return DeviceStatus::AVAILABLE; -} - -Return Driver::getVersionString(getVersionString_cb cb) { - ALOGV("Entering %s", __func__); - cb(ErrorStatus::NONE, "intel_nn_hal"); - return Void(); -} - -Return Driver::getType(getType_cb cb) { - ALOGV("Entering %s", __func__); - cb(ErrorStatus::NONE, V1_2::DeviceType::CPU); - return Void(); -} - -Return Driver::getSupportedExtensions(getSupportedExtensions_cb cb) { - ALOGV("Entering %s", __func__); - cb(ErrorStatus::NONE, {/* No extensions. */}); - return Void(); -} - -Return Driver::getSupportedOperations_1_2(const Model& model, +Return Driver::getSupportedOperations_1_2(const V1_2_Model& model, getSupportedOperations_1_2_cb cb) { ALOGV("Entering %s", __func__); @@ -177,7 +252,7 @@ Return Driver::getSupportedOperations_1_2(const Model& model, return Void(); } - auto modelInfo = std::make_shared(model); + auto modelInfo = std::make_shared(convertToV1_3(model)); NgraphNetworkCreator ngraphCreatorInst(modelInfo, mDeviceType); ngraphCreatorInst.getSupportedOperations(supported); @@ -186,20 +261,10 @@ Return Driver::getSupportedOperations_1_2(const Model& model, return Void(); } -static sp ModelFactory(IntelDeviceType deviceType, const Model& model) { - sp driverPreparedModel = NULL; - - if (deviceType == IntelDeviceType::CPU) - driverPreparedModel = new CpuPreparedModel(model); - else if (deviceType == IntelDeviceType::GNA) - driverPreparedModel = new GnaPreparedModel(model); - return driverPreparedModel; -} - -Return Driver::prepareModel_1_2(const Model& model, ExecutionPreference preference, - const hidl_vec& modelCache, - const hidl_vec& dataCache, - const HidlToken& token, +Return Driver::prepareModel_1_2(const V1_2_Model& model, + ExecutionPreference preference, + const hidl_vec&, + const hidl_vec&, const HidlToken&, const sp& callback) { ALOGV("Entering %s", __func__); @@ -213,14 +278,14 @@ Return Driver::prepareModel_1_2(const Model& model, ExecutionPrefer } // TODO: make asynchronous later - sp driverPreparedModel = ModelFactory(mDeviceType, model); + sp driverPreparedModel = ModelFactory(mDeviceType, convertToV1_3(model)); if (driverPreparedModel == NULL) { ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } for (auto opn : model.operations) dumpOperation(opn); - if (!driverPreparedModel->initialize()) { + if (!driverPreparedModel->initialize(convertToV1_3(model))) { ALOGE("failed to initialize preparedmodel"); callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); return ErrorStatus::NONE; @@ -246,6 +311,172 @@ Return Driver::prepareModelFromCache( return ErrorStatus::GENERAL_FAILURE; } +// For HAL-1.3 version +Return Driver::getCapabilities_1_3(getCapabilities_1_3_cb cb) { + ALOGV("Entering %s", __func__); + if (mDeviceType == IntelDeviceType::CPU) { + ALOGI("CPU driver getCapabilities()"); + // Setting operandPerformance value to base value for all operand types + Capabilities capabilities = { + .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.9f, .powerUsage = 0.9f}, + .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.9f, .powerUsage = 0.9f}, + .operandPerformance = nonExtensionOperandPerformance({0.9f, 0.9f}), + .ifPerformance = {.execTime = 0.9f, .powerUsage = 0.9f}, + .whilePerformance = {.execTime = 0.9f, .powerUsage = 0.9f}}; + + ALOGI("CPU MKLDNN driver Capabilities .execTime = 0.9f, .powerUsage = 0.9f"); + cb(V1_3::ErrorStatus::NONE, capabilities); + } else if (mDeviceType == IntelDeviceType::GPU) { + ALOGI("GPU driver getCapabilities()"); + V1_3::Capabilities capabilities = { + .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.95f, .powerUsage = 0.85f}, + .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.95f, .powerUsage = 0.85f}, + .operandPerformance = nonExtensionOperandPerformance({0.95f, 0.95f}), + .ifPerformance = {.execTime = 0.95f, .powerUsage = 0.85f}, + .whilePerformance = {.execTime = 0.95f, .powerUsage = 0.85f}}; + + ALOGI("GPU clDNN driver Capabilities .execTime = 0.95f, .powerUsage = 0.85f"); + cb(V1_3::ErrorStatus::NONE, capabilities); + } else if (mDeviceType == IntelDeviceType::GNA) { + ALOGI("GNA driver getCapabilities()"); + Capabilities capabilities = { + .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.8f, .powerUsage = 0.8f}, + .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.8f, .powerUsage = 0.8f}, + .operandPerformance = nonExtensionOperandPerformance({0.8f, 0.8f}), + .ifPerformance = {.execTime = 0.8f, .powerUsage = 0.8f}, + .whilePerformance = {.execTime = 0.8f, .powerUsage = 0.8f}}; + + ALOGI("GNA driver Capabilities .execTime = 0.95f, .powerUsage = 0.85f"); + cb(V1_3::ErrorStatus::NONE, capabilities); + } else if (mDeviceType == IntelDeviceType::VPU) { + ALOGI("Driver getCapabilities()"); + Capabilities capabilities = { + .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.1f, .powerUsage = 1.1f}, + .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.1f, .powerUsage = 1.1f}, + .operandPerformance = nonExtensionOperandPerformance({1.1f, 1.1f}), + .ifPerformance = {.execTime = 1.1f, .powerUsage = 1.1f}, + .whilePerformance = {.execTime = 1.1f, .powerUsage = 1.1f}}; + + ALOGI("Driver Capabilities .execTime = 1.1f, .powerUsage = 1.1f"); + cb(V1_3::ErrorStatus::NONE, capabilities); + } else { + Capabilities capabilities; + cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + } + ALOGV("Exiting %s", __func__); + return Void(); +} + +Return Driver::getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) { + ALOGV("Entering %s", __func__); + + int count = model.main.operations.size(); + std::vector supported(count, true); + + if (!validateModel(model)) { + ALOGE("NNERR: %s failed at line no: %d\n", __func__, __LINE__); + cb(V1_3::ErrorStatus::INVALID_ARGUMENT, supported); + return Void(); + } + + auto modelInfo = std::make_shared(model); + NgraphNetworkCreator ngraphCreatorInst(modelInfo, mDeviceType); + ngraphCreatorInst.getSupportedOperations(supported); + + cb(V1_3::ErrorStatus::NONE, supported); + ALOGV("Exiting %s", __func__); + return Void(); +} + +Return Driver::prepareModel_1_3( + const Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, + const V1_3::OptionalTimePoint&, + const android::hardware::hidl_vec&, + const android::hardware::hidl_vec&, const HidlToken&, + const android::sp& cb) { + ALOGV("Entering %s", __func__); + + if (cb.get() == nullptr) { + ALOGI("invalid callback passed to prepareModel"); + return V1_3::ErrorStatus::INVALID_ARGUMENT; + } + + if (!validateModel(model) || !validateExecutionPreference(preference) || + !validatePriority(priority)) { + cb->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); + ALOGI("validatemodel failed"); + return V1_3::ErrorStatus::INVALID_ARGUMENT; + } + + // TODO: make asynchronous later + sp driverPreparedModel = ModelFactory(mDeviceType, model); + if (!driverPreparedModel->initialize(model)) { + ALOGI("Failed to initialize prepared model"); + cb->notify_1_3(convertToV1_3(ErrorStatus::INVALID_ARGUMENT), nullptr); + return V1_3::ErrorStatus::NONE; + } + cb->notify_1_3((V1_3::ErrorStatus::NONE), driverPreparedModel); + ALOGV("Exiting %s", __func__); + + return convertToV1_3(ErrorStatus::NONE); +} + +Return Driver::prepareModelFromCache_1_3( + const V1_3::OptionalTimePoint& timing, + const android::hardware::hidl_vec&, + const android::hardware::hidl_vec&, const HidlToken&, + const sp& callback) { + ALOGV("V1_3::Driver::prepareModelFromCache_1_3()"); + + if (callback.get() == nullptr) { + ALOGI("invalid callback passed to prepareModel"); + return V1_3::ErrorStatus::INVALID_ARGUMENT; + } + + const auto ret = callback->notify_1_3(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr); + if (!ret.isOk()) { + ALOGE("Error when calling IPreparedModelCallback::notify_1_3: %s", + ret.description().c_str()); + } + ALOGV("Exiting %s", __func__); + return V1_3::ErrorStatus::GENERAL_FAILURE; +} + +Return Driver::allocate(const V1_3::BufferDesc& desc, + const hidl_vec>& preparedModels, + const hidl_vec& inputRoles, + const hidl_vec& outputRoles, + V1_3::IDevice::allocate_cb cb) { + ALOGV("Entering %s", __func__); + cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0); + ALOGV("Exiting %s", __func__); + return Void(); +} + +Return Driver::getStatus() { + ALOGI("DeviceStatus::AVAILABLE"); + return DeviceStatus::AVAILABLE; +} + +Return Driver::getVersionString(getVersionString_cb cb) { + ALOGV("Entering %s", __func__); + cb(ErrorStatus::NONE, "intel_nn_hal"); + return Void(); +} + +Return Driver::getType(getType_cb cb) { + ALOGV("Entering %s", __func__); + cb(ErrorStatus::NONE, V1_2::DeviceType::CPU); + return Void(); +} + +Return Driver::getSupportedExtensions(getSupportedExtensions_cb cb) { + ALOGV("Entering %s", __func__); + cb(ErrorStatus::NONE, {/* No extensions. */}); + return Void(); +} + } // namespace nnhal } // namespace neuralnetworks } // namespace hardware diff --git a/Driver.h b/Driver.h old mode 100644 new mode 100755 index 8d31cbbdd..8f1f26286 --- a/Driver.h +++ b/Driver.h @@ -28,8 +28,15 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include +#include "Utils.h" namespace android { namespace hardware { @@ -43,6 +50,8 @@ using namespace ::android::hardware::neuralnetworks::V1_0; using V1_0_Model = ::android::hardware::neuralnetworks::V1_0::Model; using V1_0_Operation = ::android::hardware::neuralnetworks::V1_0::Operation; using V1_0_Capabilities = ::android::hardware::neuralnetworks::V1_0::Capabilities; +using Request = ::android::hardware::neuralnetworks::V1_0::Request; +using ErrorStatus = ::android::hardware::neuralnetworks::V1_0::ErrorStatus; // For HAL-1.1 version using namespace ::android::hardware::neuralnetworks::V1_1; @@ -52,12 +61,22 @@ using V1_1_Capabilities = ::android::hardware::neuralnetworks::V1_1::Capabilitie // For HAL-1.2 version using namespace ::android::hardware::neuralnetworks::V1_2; -using Model = ::android::hardware::neuralnetworks::V1_2::Model; -using Operand = ::android::hardware::neuralnetworks::V1_2::Operand; -using Operation = ::android::hardware::neuralnetworks::V1_2::Operation; -using OperationType = ::android::hardware::neuralnetworks::V1_2::OperationType; -using OperandType = ::android::hardware::neuralnetworks::V1_2::OperandType; -using Capabilities = ::android::hardware::neuralnetworks::V1_2::Capabilities; +using V1_2_Model = ::android::hardware::neuralnetworks::V1_2::Model; +using V1_2_Operand = ::android::hardware::neuralnetworks::V1_2::Operand; +using V1_2_Operation = ::android::hardware::neuralnetworks::V1_2::Operation; +using V1_2_OperationType = ::android::hardware::neuralnetworks::V1_2::OperationType; +using V1_2_OperandType = ::android::hardware::neuralnetworks::V1_2::OperandType; +using V1_2_Capabilities = ::android::hardware::neuralnetworks::V1_2::Capabilities; + +// For HAL-1.3 version +using namespace ::android::hardware::neuralnetworks::V1_3; +using Model = ::android::hardware::neuralnetworks::V1_3::Model; +using Operand = ::android::hardware::neuralnetworks::V1_3::Operand; +using OperationType = ::android::hardware::neuralnetworks::V1_3::OperationType; +using OperandType = ::android::hardware::neuralnetworks::V1_3::OperandType; +using OperandLifeTime = ::android::hardware::neuralnetworks::V1_3::OperandLifeTime; +using Operation = ::android::hardware::neuralnetworks::V1_3::Operation; +using Capabilities = ::android::hardware::neuralnetworks::V1_3::Capabilities; using ::android::hardware::MQDescriptorSync; using HidlToken = android::hardware::hidl_array; @@ -67,7 +86,7 @@ using HidlToken = android::hardware::hidl_array; // // Since these drivers simulate hardware, they must run the computations // on the CPU. An actual driver would not do that. -class Driver : public ::android::hardware::neuralnetworks::V1_2::IDevice { +class Driver : public ::android::hardware::neuralnetworks::V1_3::IDevice { public: Driver() {} Driver(IntelDeviceType device) : mDeviceType(device) {} @@ -90,16 +109,37 @@ class Driver : public ::android::hardware::neuralnetworks::V1_2::IDevice { // For HAL-1.2 version Return getCapabilities_1_2(getCapabilities_1_2_cb cb) override; - Return getSupportedOperations_1_2(const Model& model, + Return getSupportedOperations_1_2(const V1_2_Model& model, getSupportedOperations_1_2_cb cb) override; - Return prepareModel_1_2(const Model& model, ExecutionPreference preference, - const hidl_vec& modelCache, - const hidl_vec& dataCache, - const HidlToken& token, - const sp& callback) override; + Return prepareModel_1_2( + const V1_2_Model& model, ExecutionPreference preference, + const hidl_vec& modelCache, const hidl_vec& dataCache, + const HidlToken& token, const sp& callback) override; Return prepareModelFromCache( const hidl_vec& modelCache, const hidl_vec& dataCache, const HidlToken& token, const sp& callback) override; + + // For HAL-1.3 version + Return getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) override; + Return prepareModel_1_3( + const Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, + const V1_3::OptionalTimePoint&, + const android::hardware::hidl_vec&, + const android::hardware::hidl_vec&, const HidlToken&, + const android::sp& cb) override; + Return prepareModelFromCache_1_3( + const V1_3::OptionalTimePoint&, + const android::hardware::hidl_vec&, + const android::hardware::hidl_vec&, const HidlToken&, + const sp& callback) override; + Return allocate(const V1_3::BufferDesc& desc, + const hidl_vec>& preparedModels, + const hidl_vec& inputRoles, + const hidl_vec& outputRoles, + V1_3::IDevice::allocate_cb cb) override; + Return getStatus() override; Return getVersionString(getVersionString_cb cb) override; Return getType(getType_cb cb) override; diff --git a/ModelManager.cpp b/ModelManager.cpp old mode 100644 new mode 100755 index b2eda39e6..3ec23e1b6 --- a/ModelManager.cpp +++ b/ModelManager.cpp @@ -26,19 +26,19 @@ bool NnapiModelInfo::updateOutputshapes(size_t outputIndex, std::vector& bool NnapiModelInfo::initializeRunTimeOperandInfo() { // initialize runtime operand info from model. - const size_t count = mModel.operands.size(); + const size_t count = mModel.main.operands.size(); ALOGD("Operand size = %zu\n", count); if (!count) { ALOGE("NNERR:Operand Count is 0"); return false; } mOperands.resize(count); - mOutputShapes.resize(mModel.outputIndexes.size()); + mOutputShapes.resize(mModel.main.outputIndexes.size()); // Start by setting the runtime info to what's in the model. for (size_t i = 0; i < count; i++) { - const Operand& from = mModel.operands[i]; - dumpOperand(i, mModel); + const Operand& from = mModel.main.operands[i]; + dumpOperand(i, mModel.main); RunTimeOperandInfo& to = mOperands[i]; to.dimensions.resize(from.dimensions.size()); for (size_t j = 0; j < from.dimensions.size(); j++) { @@ -95,8 +95,8 @@ bool NnapiModelInfo::initializeRunTimeOperandInfo() { to.numberOfUsesLeft = 0; break; } - case OperandLifeTime::MODEL_INPUT: - case OperandLifeTime::MODEL_OUTPUT: + case OperandLifeTime::SUBGRAPH_INPUT: + case OperandLifeTime::SUBGRAPH_OUTPUT: case OperandLifeTime::NO_VALUE: to.buffer = nullptr; to.numberOfUsesLeft = 0; @@ -107,8 +107,8 @@ bool NnapiModelInfo::initializeRunTimeOperandInfo() { } } - for (uint32_t i = 0; i < mModel.outputIndexes.size(); i++) { - const uint32_t operandIndex = mModel.outputIndexes[i]; + for (uint32_t i = 0; i < mModel.main.outputIndexes.size(); i++) { + const uint32_t operandIndex = mModel.main.outputIndexes[i]; const RunTimeOperandInfo& from = mOperands[operandIndex]; mOutputShapes[i].dimensions = from.dimensions; mOutputShapes[i].isSufficient = true; @@ -131,7 +131,7 @@ T NnapiModelInfo::GetConstFromBuffer(const uint8_t* buf, uint32_t len) { const uint8_t* NnapiModelInfo::GetOperandMemory(int index, uint32_t& lenOut) { ALOGV("%s", __func__); - const auto op = mModel.operands[index]; + const auto op = mModel.main.operands[index]; lenOut = op.location.length; if (op.lifetime == OperandLifeTime::CONSTANT_COPY) { ALOGV("operand lifetime OperandLifeTime::CONSTANT_COPY"); @@ -146,8 +146,8 @@ const uint8_t* NnapiModelInfo::GetOperandMemory(int index, uint32_t& lenOut) { auto& r = mPoolInfos[poolIndex]; return (const_cast(r.buffer + op.location.offset)); } else if (op.lifetime == OperandLifeTime::TEMPORARY_VARIABLE || - op.lifetime == OperandLifeTime::MODEL_INPUT || - op.lifetime == OperandLifeTime::MODEL_OUTPUT || + op.lifetime == OperandLifeTime::SUBGRAPH_INPUT || + op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT || op.lifetime == OperandLifeTime::NO_VALUE) { // ALOGD( // "operand lifetime " @@ -164,7 +164,7 @@ const uint8_t* NnapiModelInfo::GetOperandMemory(int index, uint32_t& lenOut) { Blob::Ptr NnapiModelInfo::GetInOutOperandAsBlob(RunTimeOperandInfo& op, const uint8_t* buf, uint32_t& len) { if (op.type == OperandType::TENSOR_FLOAT32 || op.type == OperandType::FLOAT32) { - if (op.lifetime == OperandLifeTime::MODEL_INPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_INPUT) { ALOGD("Create input blob !!!!"); vec order; InferenceEngine::Layout layout; @@ -197,7 +197,7 @@ Blob::Ptr NnapiModelInfo::GetInOutOperandAsBlob(RunTimeOperandInfo& op, const ui InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, inputOpDims, layout); if (buf == nullptr) { - ALOGD("MODEL_INPUT buf is NULL !!!!!!!!!!!!!!!"); + ALOGD("SUBGRAPH_INPUT buf is NULL !!!!!!!!!!!!!!!"); InferenceEngine::TBlob::Ptr blob = std::make_shared>(td); blob->allocate(); @@ -207,7 +207,7 @@ Blob::Ptr NnapiModelInfo::GetInOutOperandAsBlob(RunTimeOperandInfo& op, const ui std::make_shared>(td, (float*)buf, len); return blob; } - } else if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + } else if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { ALOGD("Create output blob !!!!"); vec order; InferenceEngine::Layout layout; @@ -226,7 +226,7 @@ Blob::Ptr NnapiModelInfo::GetInOutOperandAsBlob(RunTimeOperandInfo& op, const ui InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, toDims(op.dimensions), layout); // nhwc if (buf == nullptr) { - ALOGD("MODEL_OUTPUT buf is NULL !!!!!!!!!!!!!!!"); + ALOGD("SUBGRAPH_OUTPUT buf is NULL !!!!!!!!!!!!!!!"); InferenceEngine::TBlob::Ptr blob = std::make_shared>(td); blob->allocate(); @@ -306,7 +306,7 @@ Blob::Ptr NnapiModelInfo::GetInOutOperandAsBlob(RunTimeOperandInfo& op, const ui } IRBlob::Ptr NnapiModelInfo::GetConstOperandAsTensor(int operand_idx, int operation_idx) { - const auto op = mModel.operands[operand_idx]; + const auto op = mModel.main.operands[operand_idx]; uint32_t len; const uint8_t* buf = GetOperandMemory(operand_idx, len); @@ -392,7 +392,7 @@ IRBlob::Ptr NnapiModelInfo::GetConstOperandAsTensor(int operand_idx, int operati // Redundant.. Remove the code IRBlob::Ptr NnapiModelInfo::GetConstWeightsOperandAsTensor(uint32_t index) { - const auto op = mModel.operands[index]; + const auto op = mModel.main.operands[index]; uint32_t len; const uint8_t* buf = GetOperandMemory(index, len); ALOGD("NnapiModelInfo:: Operand: index: %d, len: %d, buf: %p", index, len, buf); @@ -494,7 +494,7 @@ bool NnapiModelInfo::setRunTimePoolInfosFromHidlMemories(const hidl_vec(buf, len); } - const auto& getOperations() { return mModel.operations; } + const auto& getOperations() { return mModel.main.operations; } const auto& getOperationOutput(int operationIndex, uint32_t outputIndex) { - return mModel.operations[operationIndex].outputs[outputIndex]; + return mModel.main.operations[operationIndex].outputs[outputIndex]; } const auto& getOperationInput(int operationIndex, uint32_t inputIndex) { - return mModel.operations[operationIndex].inputs[inputIndex]; + return mModel.main.operations[operationIndex].inputs[inputIndex]; } size_t getOperationInputsSize(int operationIndex) { - return mModel.operations[operationIndex].inputs.size(); + return mModel.main.operations[operationIndex].inputs.size(); } size_t getOperationOutputsSize(int operationIndex) { - return mModel.operations[operationIndex].outputs.size(); + return mModel.main.operations[operationIndex].outputs.size(); } - size_t getOperationsSize() { return mModel.operations.size(); } + size_t getOperationsSize() { return mModel.main.operations.size(); } - const auto& getOperationType(int index) { return mModel.operations[index].type; } + const auto& getOperationType(int index) { return mModel.main.operations[index].type; } - const Operand& getOperand(int index) { return mModel.operands[index]; } + const Operand& getOperand(int index) { return mModel.main.operands[index]; } - size_t getOperandsSize() { return mModel.operands.size(); } + size_t getOperandsSize() { return mModel.main.operands.size(); } float getOperandScale(int index) { auto operand = getOperand(index); @@ -105,13 +105,13 @@ class NnapiModelInfo { } RunTimeOperandInfo& getRuntimeOperand(uint32_t index) { - return mOperands[mModel.inputIndexes[index]]; + return mOperands[mModel.main.inputIndexes[index]]; } bool isConstOperand(int index) { ALOGD("---------------------------------------------"); ALOGD("Operand index: %d", index); - const auto op = mModel.operands[index]; + const auto op = mModel.main.operands[index]; ALOGD(" %s", toString(op).c_str()); bool ret = (op.lifetime == OperandLifeTime::CONSTANT_COPY || op.lifetime == OperandLifeTime::CONSTANT_REFERENCE); @@ -127,11 +127,11 @@ class NnapiModelInfo { template T ParseOperationInput(int operationIndex, uint32_t index) { - uint32_t inputIndex = mModel.operations[operationIndex].inputs[index]; - const auto operand = mModel.operands[inputIndex]; + uint32_t inputIndex = mModel.main.operations[operationIndex].inputs[index]; + const auto operand = mModel.main.operands[inputIndex]; const auto value = GetConstOperand(inputIndex); ALOGV("Operation input index: %d, operand index: %d", index, inputIndex); - ALOGV("Operation: %s", toString(mModel.operations[operationIndex]).c_str()); + ALOGV("Operation: %s", toString(mModel.main.operations[operationIndex]).c_str()); printHelper::print(value, toString(operand).c_str()); return value; diff --git a/cpu/CpuPreparedModel.cpp b/cpu/CpuPreparedModel.cpp old mode 100644 new mode 100755 index 70bdeb9f4..da9eb0961 --- a/cpu/CpuPreparedModel.cpp +++ b/cpu/CpuPreparedModel.cpp @@ -24,7 +24,7 @@ void CpuPreparedModel::deinitialize() { ALOGV("Exiting %s", __func__); } -bool CpuPreparedModel::initialize() { +bool CpuPreparedModel::initialize(const Model& model) { ALOGV("Entering %s", __func__); if (!mModelInfo->initRuntimeInfo()) { ALOGE("Failed to initialize Model runtime parameters!!"); diff --git a/cpu/CpuPreparedModel.h b/cpu/CpuPreparedModel.h old mode 100644 new mode 100755 index ce587b02e..cbcba673c --- a/cpu/CpuPreparedModel.h +++ b/cpu/CpuPreparedModel.h @@ -37,7 +37,7 @@ class CpuPreparedModel : public BasePreparedModel { CpuPreparedModel(const Model& model) : BasePreparedModel(IntelDeviceType::CPU, model) {} ~CpuPreparedModel() { deinitialize(); } - bool initialize() override; + bool initialize(const Model& model) override; Return configureExecutionBurst( const sp& callback, const MQDescriptorSync& requestChannel, diff --git a/gna/GnaPreparedModel.cpp b/gna/GnaPreparedModel.cpp old mode 100644 new mode 100755 index 87b20f3ec..d9e8cf50f --- a/gna/GnaPreparedModel.cpp +++ b/gna/GnaPreparedModel.cpp @@ -24,7 +24,7 @@ void GnaPreparedModel::deinitialize() { ALOGV("Exiting %s", __func__); } -bool GnaPreparedModel::initialize() { +bool GnaPreparedModel::initialize(const Model& model) { ALOGV("Entering %s", __func__); if (!mModelInfo->initRuntimeInfo()) { ALOGE("Failed to initialize Model runtime parameters!!"); diff --git a/gna/GnaPreparedModel.h b/gna/GnaPreparedModel.h old mode 100644 new mode 100755 index 95b69ea56..e90a614c8 --- a/gna/GnaPreparedModel.h +++ b/gna/GnaPreparedModel.h @@ -37,7 +37,7 @@ class GnaPreparedModel : public BasePreparedModel { GnaPreparedModel(const Model& model) : BasePreparedModel(IntelDeviceType::GNA, model) {} ~GnaPreparedModel() { deinitialize(); } - bool initialize() override; + bool initialize(const Model& model) override; Return configureExecutionBurst( const sp& callback, const MQDescriptorSync& requestChannel, diff --git a/ngraph_creator/operations/src/Cast.cpp b/ngraph_creator/operations/src/Cast.cpp old mode 100644 new mode 100755 index b18c6bfda..a7a83e151 --- a/ngraph_creator/operations/src/Cast.cpp +++ b/ngraph_creator/operations/src/Cast.cpp @@ -63,7 +63,7 @@ std::shared_ptr Cast::createNode() { mNgraphNodes->setOutputAtOperandIndex(outputIndex, outputNode); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(mDefaultOutputIndex, outputNode); } diff --git a/ngraph_creator/operations/src/LSTM.cpp b/ngraph_creator/operations/src/LSTM.cpp old mode 100644 new mode 100755 index 5d64029bd..833a68b63 --- a/ngraph_creator/operations/src/LSTM.cpp +++ b/ngraph_creator/operations/src/LSTM.cpp @@ -380,7 +380,7 @@ std::shared_ptr LSTM::createNode() { auto outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, i); mNgraphNodes->setOutputAtOperandIndex(outputIndex, LstmOutputs[i]); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(outputIndex, LstmOutputs[i]); } } diff --git a/ngraph_creator/operations/src/OperationsBase.cpp b/ngraph_creator/operations/src/OperationsBase.cpp old mode 100644 new mode 100755 index 338fd67eb..b7c66d785 --- a/ngraph_creator/operations/src/OperationsBase.cpp +++ b/ngraph_creator/operations/src/OperationsBase.cpp @@ -56,7 +56,7 @@ void OperationsBase::connectOperationToGraph() { if (op.type == OperandType::TENSOR_QUANT8_ASYMM) { outputNode = QuantizeNode(outputNode, mDefaultOutputIndex, ngraph::element::u8); } - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(mDefaultOutputIndex, outputNode); } mNgraphNodes->setOutputAtOperandIndex(mDefaultOutputIndex, outputNode->get_default_output()); diff --git a/ngraph_creator/operations/src/Quantize.cpp b/ngraph_creator/operations/src/Quantize.cpp old mode 100644 new mode 100755 index 41e36013c..f04c67608 --- a/ngraph_creator/operations/src/Quantize.cpp +++ b/ngraph_creator/operations/src/Quantize.cpp @@ -34,7 +34,7 @@ std::shared_ptr Quantize::createNode() { mNgraphNodes->setOutputAtOperandIndex(outputIndex, outputNode); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(mDefaultOutputIndex, outputNode); } diff --git a/ngraph_creator/operations/src/RNN.cpp b/ngraph_creator/operations/src/RNN.cpp old mode 100644 new mode 100755 index 3978d3ebb..f1f69dc8a --- a/ngraph_creator/operations/src/RNN.cpp +++ b/ngraph_creator/operations/src/RNN.cpp @@ -68,7 +68,7 @@ std::shared_ptr RNN::createNode() { mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); ALOGD("%s Set Output index %d", __func__, outputIndex); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(outputIndex, outNode); ALOGD("%s Add result %d", __func__, outputIndex); } diff --git a/ngraph_creator/operations/src/Split.cpp b/ngraph_creator/operations/src/Split.cpp old mode 100644 new mode 100755 index 5c28c5879..15784ba55 --- a/ngraph_creator/operations/src/Split.cpp +++ b/ngraph_creator/operations/src/Split.cpp @@ -59,7 +59,7 @@ std::shared_ptr Split::createNode() { mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(outputIndex, outNode); } } diff --git a/ngraph_creator/operations/src/Topk_V2.cpp b/ngraph_creator/operations/src/Topk_V2.cpp old mode 100644 new mode 100755 index da074c07f..4c274d294 --- a/ngraph_creator/operations/src/Topk_V2.cpp +++ b/ngraph_creator/operations/src/Topk_V2.cpp @@ -65,7 +65,7 @@ std::shared_ptr Topk_V2::createNode() { mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); ALOGD("%s Set Output index %d", __func__, outputIndex); const auto op = sModelInfo->getOperand(outputIndex); - if (op.lifetime == OperandLifeTime::MODEL_OUTPUT) { + if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { addResultNode(outputIndex, outNode); ALOGD("%s Add result %d", __func__, outputIndex); } diff --git a/utils.h b/utils.h old mode 100644 new mode 100755 index 8c7277748..b7eaf57a3 --- a/utils.h +++ b/utils.h @@ -149,7 +149,7 @@ struct RunTimeOperandInfo { uint32_t length; OperandLifeTime lifetime; uint32_t numberOfUsesLeft; - Operand::ExtraParams extraParams; + V1_2::Operand::ExtraParams extraParams; Shape shape() const { return { .type = type,