Skip to content
This repository has been archived by the owner on Jul 18, 2024. It is now read-only.

Commit

Permalink
Added support for v1.3 interfaces
Browse files Browse the repository at this point in the history
- Supports v1.3 interfaces
- Supports v1.0 and v1.1 interfaces

Signed-Off-by: Vijeetkumar Benni <[email protected]>
  • Loading branch information
vbenni authored and JeevakaPrabu committed Aug 17, 2021
1 parent 8f8db22 commit 05893b1
Show file tree
Hide file tree
Showing 18 changed files with 472 additions and 138 deletions.
54 changes: 50 additions & 4 deletions BasePreparedModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <log/log.h>
#include <thread>
#include "ExecutionBurstServer.h"
#include "Utils.h"
#include "ValidateHal.h"

#define DISABLE_ALL_QUANT
Expand Down Expand Up @@ -49,7 +50,7 @@ T getScalarData(const RunTimeOperandInfo& info) {
return data[0];
}

bool BasePreparedModel::initialize() {
bool BasePreparedModel::initialize(const Model& model) {
ALOGV("Entering %s", __func__);
return true;
}
Expand All @@ -64,6 +65,11 @@ static Return<void> notify(const sp<V1_2::IExecutionCallback>& callback, const E
return callback->notify_1_2(status, outputShapes, timing);
}

static Return<void> notify(const sp<V1_3::IExecutionCallback>& callback, const ErrorStatus& status,
const hidl_vec<OutputShape>& outputShapes, Timing timing) {
return callback->notify_1_3(convertToV1_3(status), outputShapes, timing);
}

static void floatToUint8(const float* src, uint8_t* dst, size_t size) {
for (uint32_t i = 0; i < size; ++i) {
dst[i] = static_cast<uint8_t>(src[i]);
Expand Down Expand Up @@ -99,7 +105,7 @@ Return<ErrorStatus> executeBase(const Request& request, MeasureTiming measure,
ALOGE("invalid callback passed to execute");
return ErrorStatus::INVALID_ARGUMENT;
}
if (!validateRequest(request, preparedModel->getModelInfo()->getModel())) {
if (!validateRequest(request, convertToV1_2(preparedModel->getModelInfo()->getModel()))) {
notify(callback, ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
return ErrorStatus::INVALID_ARGUMENT;
}
Expand Down Expand Up @@ -337,8 +343,8 @@ static std::tuple<ErrorStatus, hidl_vec<V1_2::OutputShape>, Timing> executeSynch
.timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
return {ErrorStatus::NONE, modelInfo->getOutputShapes(), timing};
}
return {ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming};
ALOGV("Exiting %s", __func__);
return {ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming};
}

Return<void> BasePreparedModel::executeSynchronously(const Request& request, MeasureTiming measure,
Expand All @@ -347,7 +353,7 @@ Return<void> BasePreparedModel::executeSynchronously(const Request& request, Mea
time_point driverStart;
if (measure == MeasureTiming::YES) driverStart = now();

if (!validateRequest(request, mModelInfo->getModel())) {
if (!validateRequest(request, convertToV1_2(mModelInfo->getModel()))) {
cb(ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
return Void();
}
Expand All @@ -358,6 +364,25 @@ Return<void> BasePreparedModel::executeSynchronously(const Request& request, Mea
return Void();
}

Return<void> BasePreparedModel::executeSynchronously_1_3(
const V1_3::Request& request, V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
ALOGV("Entering %s", __func__);
time_point driverStart;
if (measure == MeasureTiming::YES) driverStart = now();

if (!validateRequest(convertToV1_0(request), convertToV1_2(mModelInfo->getModel()))) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
return Void();
}
auto [status, outputShapes, timing] =
executeSynchronouslyBase(convertToV1_0(request), measure, this, driverStart);
cb(convertToV1_3(status), std::move(outputShapes), timing);
ALOGV("Exiting %s", __func__);
return Void();
}

Return<void> BasePreparedModel::configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
Expand All @@ -373,6 +398,7 @@ Return<void> BasePreparedModel::configureExecutionBurst(
cb(ErrorStatus::NONE, burst);
ALOGI("%s burst created", __func__);
}
ALOGV("Exiting %s", __func__);
return Void();
}

Expand All @@ -388,6 +414,26 @@ Return<ErrorStatus> BasePreparedModel::execute_1_2(const Request& request, Measu
return executeBase(request, measure, this, callback);
}

Return<V1_3::ErrorStatus> BasePreparedModel::execute_1_3(
const V1_3::Request& request, V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<V1_3::IExecutionCallback>& callback) {
ALOGV("Entering %s", __func__);
return convertToV1_3(executeBase(convertToV1_0(request), measure, this, callback));
}

Return<void> BasePreparedModel::executeFenced(
const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor, V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) {
ALOGV("Entering %s", __func__);
// TODO: Add support
ALOGV("Exiting %s", __func__);
return Void();
}

} // namespace nnhal
} // namespace neuralnetworks
} // namespace hardware
Expand Down
23 changes: 20 additions & 3 deletions BasePreparedModel.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@

#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <sys/mman.h>
Expand Down Expand Up @@ -48,7 +52,7 @@ template <class T>
using vec = std::vector<T>;
typedef uint8_t* memory;

class BasePreparedModel : public V1_2::IPreparedModel {
class BasePreparedModel : public V1_3::IPreparedModel {
public:
BasePreparedModel(const Model& model) : mTargetDevice(IntelDeviceType::CPU) {
mModelInfo = std::make_shared<NnapiModelInfo>(model);
Expand All @@ -63,15 +67,28 @@ class BasePreparedModel : public V1_2::IPreparedModel {
const sp<V1_0::IExecutionCallback>& callback) override;
Return<ErrorStatus> execute_1_2(const Request& request, MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback) override;
Return<void> executeSynchronously(const Request& request, MeasureTiming measure,
Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<V1_3::IExecutionCallback>& callback) override;
Return<void> executeSynchronously(const Request& request, V1_2::MeasureTiming measure,
executeSynchronously_cb cb) override;
Return<void> executeSynchronously_1_3(const V1_3::Request& request, V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb) override;
Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override;
Return<void> executeFenced(const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const V1_3::OptionalTimeoutDuration& duration,
executeFenced_cb cb) override;

virtual bool initialize();
virtual bool initialize(const Model& model);

std::shared_ptr<NnapiModelInfo> getModelInfo() { return mModelInfo; }

Expand Down
Loading

0 comments on commit 05893b1

Please sign in to comment.