diff --git a/android/src/main/cpp/AudioConstants.h b/android/src/main/cpp/AudioConstants.h index d05d43c..4919684 100644 --- a/android/src/main/cpp/AudioConstants.h +++ b/android/src/main/cpp/AudioConstants.h @@ -6,11 +6,33 @@ #define AUDIOPLAYBACK_AUDIOCONSTANTS_H #include +#include +#include struct AudioProperties { int32_t channelCount; int32_t sampleRate; }; +struct SetupAudioStreamResult { + std::optional error; +}; + +struct OpenAudioStreamResult { + std::optional error; +}; + +struct CloseAudioStreamResult { + std::optional error; +}; + +struct LoadSoundResult { + std::optional id; + std::optional error; +}; + +struct UnloadSoundResult { + std::optional error; +}; #endif //AUDIOPLAYBACK_AUDIOCONSTANTS_H diff --git a/android/src/main/cpp/AudioEngine.cpp b/android/src/main/cpp/AudioEngine.cpp index c980784..a0b258a 100644 --- a/android/src/main/cpp/AudioEngine.cpp +++ b/android/src/main/cpp/AudioEngine.cpp @@ -8,10 +8,9 @@ #include "audio/AAssetDataSource.h" -void AudioEngine::setupAudioStream(double sampleRate, double channelCount) { +SetupAudioStreamResult AudioEngine::setupAudioStream(double sampleRate, double channelCount) { if(mAudioStream) { - LOGD("Setting up an audio stream while one is already available"); - return; + return { .error = "Setting up an audio stream while one is already available"}; } mDesiredSampleRate = static_cast(sampleRate); @@ -32,49 +31,50 @@ void AudioEngine::setupAudioStream(double sampleRate, double channelCount) { oboe::Result result = builder.openStream(mAudioStream); if( result != oboe::Result::OK) { - LOGE("Failed to open stream. Error: %s", convertToText(result)); + auto error = "Failed to open stream:" + std::string (convertToText(result)); + return { .error = error}; } else { - LOGE("Opened stream successfully"); + return {.error = std::nullopt}; } - } -void AudioEngine::openAudioStream() { +OpenAudioStreamResult AudioEngine::openAudioStream() { if(!mAudioStream) { - LOGD("There is no audio stream to start"); - return; + return {.error = "There is no audio stream to start" }; } auto streamState = mAudioStream->getState(); if(streamState == oboe::StreamState::Starting || streamState == oboe::StreamState::Started) { - LOGD("Audio stream was requested to start but it is already started"); - return; + return {.error = "Audio stream was requested to start but it is already started"}; } oboe::Result result = mAudioStream->requestStart(); if(result != oboe::Result::OK) { - LOGE("Failed to start stream, Error: %s", oboe::convertToText(result)); + auto error = "Failed to start stream, Error: %s" + std::string(oboe::convertToText(result)); + return {.error = error}; + } else { + return {.error = std::nullopt}; } } -void AudioEngine::closeAudioStream() { +CloseAudioStreamResult AudioEngine::closeAudioStream() { if(!mAudioStream) { - LOGD("There is no audio stream to close"); - return; + return { .error = "There is no audio stream to close" }; } auto streamState = mAudioStream->getState(); if(streamState == oboe::StreamState::Closing || streamState == oboe::StreamState::Closed) { - LOGD("Audio stream was requested to close but it is already closed"); - return; + return { .error = "Audio stream was requested to close but it is already closed" }; } oboe::Result result = mAudioStream->close(); if(result != oboe::Result::OK) { - LOGE("Failed to start stream, Error: %s", oboe::convertToText(result)); + auto error ="Failed to close stream: %s" + std::string(oboe::convertToText(result)); + return {.error = error}; } mAudioStream = nullptr; + return {.error = std::nullopt}; } oboe::DataCallbackResult @@ -115,7 +115,7 @@ void AudioEngine::seekSoundsTo(const std::vector> } } -std::optional AudioEngine::loadSound(int fd, int offset, int length) { +LoadSoundResult AudioEngine::loadSound(int fd, int offset, int length) { auto playersSize = mPlayers.size(); LOGD("Loading audio with already %d sounds loaded", playersSize); @@ -125,30 +125,28 @@ std::optional AudioEngine::loadSound(int fd, int offset, int length .sampleRate = mDesiredSampleRate }; - std::shared_ptr mClapSource { - AAssetDataSource::newFromCompressedAsset(fd, offset, length, targetProperties) - }; - if(mClapSource == nullptr) { - LOGE("Could not load source data for clap sound"); - return std::nullopt; + auto compressedAssetResult = AAssetDataSource::newFromCompressedAsset(fd, offset, length, targetProperties); + + if(compressedAssetResult.error) { + return {.id = std::nullopt, .error = compressedAssetResult.error}; + } else if(compressedAssetResult.dataSource == nullptr) { + return {.id = std::nullopt, .error = "An unknown error occurred while loading the audio file. Please create an issue with a reproducible"}; } std::string id = uuid::generate_uuid_v4(); - mPlayers[id] = std::make_unique(std::move(mClapSource)); - return id; + mPlayers[id] = std::make_unique(compressedAssetResult.dataSource); + return {.id = id, .error = std::nullopt}; } -void AudioEngine::unloadSound(const std::string &playerId) { - auto playersSize = mPlayers.size(); - - LOGD("Unloading audio with already %d sounds loaded", playersSize); - +UnloadSoundResult AudioEngine::unloadSound(const std::string &playerId) { auto it = mPlayers.find(playerId); if(it != mPlayers.end()) { mPlayers.erase(it); } else { - LOGE("Player with identifier: %s not found", playerId.c_str()); + return {.error = "Audio file could not be unloaded because it is not found"}; } + + return {.error = std::nullopt}; } diff --git a/android/src/main/cpp/AudioEngine.h b/android/src/main/cpp/AudioEngine.h index 7ed8307..0c07b37 100644 --- a/android/src/main/cpp/AudioEngine.h +++ b/android/src/main/cpp/AudioEngine.h @@ -11,18 +11,19 @@ #include #include "audio/Player.h" +#include "AudioConstants.h" #include class AudioEngine : public oboe::AudioStreamDataCallback{ public: - void setupAudioStream(double sampleRate, double channelCount); - void openAudioStream(); - void closeAudioStream(); - std::optional loadSound(int fd, int offset, int length); - void unloadSound(const std::string &playerId); + SetupAudioStreamResult setupAudioStream(double sampleRate, double channelCount); + OpenAudioStreamResult openAudioStream(); + CloseAudioStreamResult closeAudioStream(); void playSounds(const std::vector>&); void loopSounds(const std::vector>&); void seekSoundsTo(const std::vector>&); + LoadSoundResult loadSound(int fd, int offset, int length); + UnloadSoundResult unloadSound(const std::string &playerId); oboe::DataCallbackResult onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames) override; diff --git a/android/src/main/cpp/audio/AAssetDataSource.cpp b/android/src/main/cpp/audio/AAssetDataSource.cpp index cfecdf7..2ea0daf 100644 --- a/android/src/main/cpp/audio/AAssetDataSource.cpp +++ b/android/src/main/cpp/audio/AAssetDataSource.cpp @@ -24,7 +24,7 @@ constexpr int kMaxCompressionRatio { 12 }; -AAssetDataSource * +NewFromCompressedAssetResult AAssetDataSource::newFromCompressedAsset(int fd, int offset, int length, AudioProperties targetProperties) { @@ -35,9 +35,12 @@ AAssetDataSource::newFromCompressedAsset(int fd, int offset, const long maximumDataSizeInBytes = kMaxCompressionRatio * length * sizeof(int16_t); auto decodedData = new uint8_t[maximumDataSizeInBytes]; - int64_t bytesDecoded = NDKExtractor::decodeFileDescriptor(fd, offset, length, decodedData, targetProperties); - auto numSamples = bytesDecoded / sizeof(int16_t); - LOGE("Number of samples: %lld", numSamples); + auto decodeResult = NDKExtractor::decodeFileDescriptor(fd, offset, length, decodedData, targetProperties); + if(decodeResult.error) { + return {.dataSource = nullptr, .error = decodeResult.error }; + } + + auto numSamples = decodeResult.bytesRead / sizeof(int16_t); // Now we know the exact number of samples we can create a float array to hold the audio data auto outputBuffer = std::make_unique(numSamples); @@ -46,13 +49,14 @@ AAssetDataSource::newFromCompressedAsset(int fd, int offset, oboe::convertPcm16ToFloat( reinterpret_cast(decodedData), outputBuffer.get(), - bytesDecoded / sizeof(int16_t)); + decodeResult.bytesRead / sizeof(int16_t)); delete[] decodedData; - // TODO: handle closing asset -// AAsset_close(asset); - return new AAssetDataSource(std::move(outputBuffer), - numSamples, - targetProperties); + return { + .dataSource = new AAssetDataSource(std::move(outputBuffer), + numSamples, + targetProperties), + .error = std::nullopt + }; } diff --git a/android/src/main/cpp/audio/AAssetDataSource.h b/android/src/main/cpp/audio/AAssetDataSource.h index e08e861..547c0a7 100644 --- a/android/src/main/cpp/audio/AAssetDataSource.h +++ b/android/src/main/cpp/audio/AAssetDataSource.h @@ -17,18 +17,26 @@ #ifndef AUDIOPLAYBACK_AASSETDATASOURCE_H #define AUDIOPLAYBACK_AASSETDATASOURCE_H +#include #include #include #include "DataSource.h" +class AAssetDataSource; + +struct NewFromCompressedAssetResult { + AAssetDataSource *dataSource; + std::optional error; +}; + class AAssetDataSource : public DataSource { public: - int64_t getSize() const override { return mBufferSize; } - AudioProperties getProperties() const override { return mProperties; } - const float* getData() const override { return mBuffer.get(); } + [[nodiscard]] int64_t getSize() const override { return mBufferSize; } + [[nodiscard]] AudioProperties getProperties() const override { return mProperties; } + [[nodiscard]] const float* getData() const override { return mBuffer.get(); } - static AAssetDataSource* newFromCompressedAsset( + static NewFromCompressedAssetResult newFromCompressedAsset( int fd, int offset, int length, AudioProperties targetProperties); diff --git a/android/src/main/cpp/audio/NDKExtractor.cpp b/android/src/main/cpp/audio/NDKExtractor.cpp index d7d3c17..2f55668 100644 --- a/android/src/main/cpp/audio/NDKExtractor.cpp +++ b/android/src/main/cpp/audio/NDKExtractor.cpp @@ -17,6 +17,8 @@ #include #include +#include +#include #include #include @@ -24,182 +26,7 @@ #include "NDKExtractor.h" -int32_t NDKExtractor::decode(AAsset *asset, uint8_t *targetData, AudioProperties targetProperties) { - - LOGD("Using NDK decoder"); - - // open asset as file descriptor - off_t start, length; - int fd = AAsset_openFileDescriptor(asset, &start, &length); - - // Extract the audio frames - AMediaExtractor *extractor = AMediaExtractor_new(); - media_status_t amresult = AMediaExtractor_setDataSourceFd(extractor, fd, - static_cast(start), - static_cast(length)); - if (amresult != AMEDIA_OK){ - LOGE("Error setting extractor data source, err %d", amresult); - return 0; - } - - // Specify our desired output format by creating it from our source - AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, 0); - - int32_t sampleRate; - if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &sampleRate)){ - LOGD("Source sample rate %d", sampleRate); - if (sampleRate != targetProperties.sampleRate){ - LOGE("Input (%d) and output (%d) sample rates do not match. " - "NDK decoder does not support resampling.", - sampleRate, - targetProperties.sampleRate); - return 0; - } - } else { - LOGE("Failed to get sample rate"); - return 0; - }; - - int32_t channelCount; - if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channelCount)){ - LOGD("Got channel count %d", channelCount); - if (channelCount != targetProperties.channelCount){ - LOGE("NDK decoder does not support different " - "input (%d) and output (%d) channel counts", - channelCount, - targetProperties.channelCount); - } - } else { - LOGE("Failed to get channel count"); - return 0; - } - - const char *formatStr = AMediaFormat_toString(format); - LOGD("Output format %s", formatStr); - - const char *mimeType; - if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mimeType)) { - LOGD("Got mime type %s", mimeType); - } else { - LOGE("Failed to get mime type"); - return 0; - } - - // Obtain the correct decoder - AMediaCodec *codec = nullptr; - AMediaExtractor_selectTrack(extractor, 0); - codec = AMediaCodec_createDecoderByType(mimeType); - AMediaCodec_configure(codec, format, nullptr, nullptr, 0); - AMediaCodec_start(codec); - - // DECODE - - bool isExtracting = true; - bool isDecoding = true; - int32_t bytesWritten = 0; - - while(isExtracting || isDecoding){ - - if (isExtracting){ - - // Obtain the index of the next available input buffer - ssize_t inputIndex = AMediaCodec_dequeueInputBuffer(codec, 2000); - //LOGV("Got input buffer %d", inputIndex); - - // The input index acts as a status if its negative - if (inputIndex < 0){ - if (inputIndex == AMEDIACODEC_INFO_TRY_AGAIN_LATER){ - // LOGV("Codec.dequeueInputBuffer try again later"); - } else { - LOGE("Codec.dequeueInputBuffer unknown error status"); - } - } else { - - // Obtain the actual buffer and read the encoded data into it - size_t inputSize; - uint8_t *inputBuffer = AMediaCodec_getInputBuffer(codec, inputIndex, &inputSize); - //LOGV("Sample size is: %d", inputSize); - - ssize_t sampleSize = AMediaExtractor_readSampleData(extractor, inputBuffer, inputSize); - auto presentationTimeUs = AMediaExtractor_getSampleTime(extractor); - - if (sampleSize > 0){ - - // Enqueue the encoded data - AMediaCodec_queueInputBuffer(codec, inputIndex, 0, sampleSize, - presentationTimeUs, - 0); - AMediaExtractor_advance(extractor); - - } else { - LOGD("End of extractor data stream"); - isExtracting = false; - - // We need to tell the codec that we've reached the end of the stream - AMediaCodec_queueInputBuffer(codec, inputIndex, 0, 0, - presentationTimeUs, - AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM); - } - } - } - - if (isDecoding){ - // Dequeue the decoded data - AMediaCodecBufferInfo info; - ssize_t outputIndex = AMediaCodec_dequeueOutputBuffer(codec, &info, 0); - - if (outputIndex >= 0){ - - // Check whether this is set earlier - if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM){ - LOGD("Reached end of decoding stream"); - isDecoding = false; - } - - // Valid index, acquire buffer - size_t outputSize; - uint8_t *outputBuffer = AMediaCodec_getOutputBuffer(codec, outputIndex, &outputSize); - - /*LOGV("Got output buffer index %d, buffer size: %d, info size: %d writing to pcm index %d", - outputIndex, - outputSize, - info.size, - m_writeIndex);*/ - - // copy the data out of the buffer - memcpy(targetData + bytesWritten, outputBuffer, info.size); - bytesWritten+=info.size; - AMediaCodec_releaseOutputBuffer(codec, outputIndex, false); - } else { - - // The outputIndex doubles as a status return if its value is < 0 - switch(outputIndex){ - case AMEDIACODEC_INFO_TRY_AGAIN_LATER: - LOGD("dequeueOutputBuffer: try again later"); - break; - case AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED: - LOGD("dequeueOutputBuffer: output buffers changed"); - break; - case AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED: - LOGD("dequeueOutputBuffer: output outputFormat changed"); - format = AMediaCodec_getOutputFormat(codec); - LOGD("outputFormat changed to: %s", AMediaFormat_toString(format)); - break; - } - } - } - } - - // Clean up - AMediaFormat_delete(format); - AMediaCodec_delete(codec); - AMediaExtractor_delete(extractor); - - return bytesWritten; -} - -int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8_t* targetData, AudioProperties targetProperties) { - +DecodeFileDescriptorResult NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8_t* targetData, AudioProperties targetProperties) { LOGD("Using NDK decoder"); // Extract the audio frames @@ -209,7 +36,7 @@ int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8 static_cast(length)); if (amresult != AMEDIA_OK){ LOGE("Error setting extractor data source, err %d", amresult); - return 0; + return {.bytesRead = 0, .error = "Decoding sound file failed"}; } // Specify our desired output format by creating it from our source @@ -217,42 +44,44 @@ int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8 int32_t sampleRate; if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &sampleRate)){ - LOGD("Source sample rate %d", sampleRate); + LOGD("File sample rate: %d", sampleRate); if (sampleRate != targetProperties.sampleRate){ - LOGE("Input (%d) and output (%d) sample rates do not match. " - "NDK decoder does not support resampling.", - sampleRate, - targetProperties.sampleRate); - return 0; + std::stringstream error; + error + << "Resampling audio files is not supported." + << "The sample rate of the audio file, " + << sampleRate + << ", doesn't match the sample rate of the stream, " + << targetProperties.sampleRate << "."; + + return {.bytesRead = 0, .error = error.str()}; } } else { - LOGE("Failed to get sample rate"); - return 0; + return {.bytesRead = 0, .error = "Failed to load sound file: could not determine sample rate"}; }; int32_t channelCount; if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channelCount)){ - LOGD("Got channel count %d", channelCount); + LOGD("File channel count: %d", channelCount); if (channelCount != targetProperties.channelCount){ - LOGE("NDK decoder does not support different " - "input (%d) and output (%d) channel counts", - channelCount, - targetProperties.channelCount); + std::stringstream error; + error + << "The channel count of the audio file, " + << channelCount + << ", doesn't match the channel count of the stream, " + << targetProperties.channelCount << "."; + + return {.bytesRead = 0, .error = error.str()}; } } else { - LOGE("Failed to get channel count"); - return 0; + return {.bytesRead = 0, .error = "Failed to load sound file: could not determine channel count"}; } - const char *formatStr = AMediaFormat_toString(format); - LOGD("Output format %s", formatStr); - const char *mimeType; if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mimeType)) { - LOGD("Got mime type %s", mimeType); + LOGD("File mime type: %s", mimeType); } else { - LOGE("Failed to get mime type"); - return 0; + return {.bytesRead = 0, .error = "Failed to load sound file: could not determine mimeType"}; } // Obtain the correct decoder @@ -288,7 +117,6 @@ int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8 // Obtain the actual buffer and read the encoded data into it size_t inputSize; uint8_t *inputBuffer = AMediaCodec_getInputBuffer(codec, inputIndex, &inputSize); - //LOGV("Sample size is: %d", inputSize); ssize_t sampleSize = AMediaExtractor_readSampleData(extractor, inputBuffer, inputSize); auto presentationTimeUs = AMediaExtractor_getSampleTime(extractor); @@ -330,12 +158,6 @@ int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8 size_t outputSize; uint8_t *outputBuffer = AMediaCodec_getOutputBuffer(codec, outputIndex, &outputSize); - /*LOGV("Got output buffer index %d, buffer size: %d, info size: %d writing to pcm index %d", - outputIndex, - outputSize, - info.size, - m_writeIndex);*/ - // copy the data out of the buffer memcpy(targetData + bytesWritten, outputBuffer, info.size); bytesWritten+=info.size; @@ -365,6 +187,6 @@ int32_t NDKExtractor::decodeFileDescriptor(int fd, int offset, int length, uint8 AMediaCodec_delete(codec); AMediaExtractor_delete(extractor); - return bytesWritten; + return {.bytesRead = bytesWritten, .error = std::nullopt}; } diff --git a/android/src/main/cpp/audio/NDKExtractor.h b/android/src/main/cpp/audio/NDKExtractor.h index 3cbe495..c634177 100644 --- a/android/src/main/cpp/audio/NDKExtractor.h +++ b/android/src/main/cpp/audio/NDKExtractor.h @@ -23,14 +23,15 @@ #include #include "utils/logging.h" +struct DecodeFileDescriptorResult { + int32_t bytesRead; + std::optional error; +}; -class NDKExtractor { +class NDKExtractor { public: - static int32_t decode(AAsset *asset, uint8_t *targetData, AudioProperties targetProperties); - - static int32_t decodeFileDescriptor(int fd, int offset, int length, uint8_t *targetData, - AudioProperties targetProperties); + static DecodeFileDescriptorResult decodeFileDescriptor(int fd, int offset, int length, uint8_t *targetData, AudioProperties targetProperties); }; #endif //AUDIOPLAYBACK_NDKMEDIAEXTRACTOR_H diff --git a/android/src/main/cpp/audio/Player.h b/android/src/main/cpp/audio/Player.h index f0965a9..82b15ed 100644 --- a/android/src/main/cpp/audio/Player.h +++ b/android/src/main/cpp/audio/Player.h @@ -41,8 +41,8 @@ class Player : public IRenderableAudio{ * * @param source */ - explicit Player(std::shared_ptr source) - : mSource(std::move(source)) + explicit Player(DataSource *source) + : mSource(source) {}; void renderAudio(float *targetData, int32_t numFrames) override; @@ -54,7 +54,7 @@ class Player : public IRenderableAudio{ int32_t mReadFrameIndex = 0; std::atomic mIsPlaying { false }; std::atomic mIsLooping { false }; - std::shared_ptr mSource; + std::unique_ptr mSource; }; #endif //AUDIOPLAYBACK_PLAYER_H diff --git a/android/src/main/cpp/native-lib.cpp b/android/src/main/cpp/native-lib.cpp index f237813..a67af54 100644 --- a/android/src/main/cpp/native-lib.cpp +++ b/android/src/main/cpp/native-lib.cpp @@ -67,30 +67,79 @@ std::string jstringToStdString(JNIEnv* env, jstring jStr) { } extern "C" { -JNIEXPORT void JNICALL +JNIEXPORT jobject JNICALL Java_com_audioplayback_AudioPlaybackModule_setupAudioStreamNative(JNIEnv *env, jobject thiz, jdouble sample_rate, jdouble channel_count) { - audioEngine->setupAudioStream(sample_rate, channel_count); + auto result = audioEngine->setupAudioStream(sample_rate, channel_count); + + jclass structClass = env->FindClass("com/audioplayback/models/SetupAudioStreamResult"); + jmethodID constructor = env->GetMethodID(structClass, "", "(Ljava/lang/String;)V"); + + jstring jError = result.error.has_value() ? env->NewStringUTF(result.error->c_str()): nullptr; + jobject returnValue = env->NewObject(structClass, constructor, jError); + + if(jError) { + env->DeleteLocalRef(jError); + } + + return returnValue; } -JNIEXPORT void JNICALL +JNIEXPORT jobject JNICALL Java_com_audioplayback_AudioPlaybackModule_openAudioStreamNative(JNIEnv *env, jobject thiz) { - audioEngine->openAudioStream(); + auto result = audioEngine->openAudioStream(); + + jclass structClass = env->FindClass("com/audioplayback/models/OpenAudioStreamResult"); + jmethodID constructor = env->GetMethodID(structClass, "", "(Ljava/lang/String;)V"); + + jstring jError = result.error.has_value() ? env->NewStringUTF(result.error->c_str()): nullptr; + jobject returnValue = env->NewObject(structClass, constructor, jError); + + if(jError) { + env->DeleteLocalRef(jError); + } + + return returnValue; } -JNIEXPORT void JNICALL +JNIEXPORT jobject JNICALL Java_com_audioplayback_AudioPlaybackModule_closeAudioStreamNative(JNIEnv *env, jobject thiz) { - audioEngine->closeAudioStream(); + auto result = audioEngine->closeAudioStream(); + + jclass structClass = env->FindClass("com/audioplayback/models/CloseAudioStreamResult"); + jmethodID constructor = env->GetMethodID(structClass, "", "(Ljava/lang/String;)V"); + + jstring jError = result.error.has_value() ? env->NewStringUTF(result.error->c_str()): nullptr; + jobject returnValue = env->NewObject(structClass, constructor, jError); + + if(jError) { + env->DeleteLocalRef(jError); + } + + return returnValue; } -JNIEXPORT void JNICALL +JNIEXPORT jobject JNICALL Java_com_audioplayback_AudioPlaybackModule_unloadSoundNative(JNIEnv *env, jobject instance, jstring playerId) { - audioEngine->unloadSound(jstringToStdString(env, playerId)); + auto result = audioEngine->unloadSound(jstringToStdString(env, playerId)); + + jclass structClass = env->FindClass("com/audioplayback/models/UnloadSoundResult"); + jmethodID constructor = env->GetMethodID(structClass, "", "(Ljava/lang/String;)V"); + + jstring jError = result.error.has_value() ? env->NewStringUTF(result.error->c_str()): nullptr; + jobject returnValue = env->NewObject(structClass, constructor, jError); + + if(jError) { + env->DeleteLocalRef(jError); + } + + return returnValue; } -JNIEXPORT jstring JNICALL +JNIEXPORT jobject JNICALL Java_com_audioplayback_AudioPlaybackModule_loadSoundNative(JNIEnv *env, jobject instance, jint fd, jint fileLength, jint fileOffset) { - auto id = audioEngine->loadSound(fd, fileOffset, fileLength); + auto result = audioEngine->loadSound(fd, fileOffset, fileLength); + // Once done, close the file descriptor if (close(fd) == -1) { LOGE("Error closing file descriptor: %s", strerror(errno)); @@ -98,11 +147,19 @@ Java_com_audioplayback_AudioPlaybackModule_loadSoundNative(JNIEnv *env, jobject LOGD("File descriptor closed"); } - if(id) { - return env->NewStringUTF(id->c_str()); - } else { - return nullptr; - } + jclass structClass = env->FindClass("com/audioplayback/models/LoadSoundResult"); + jmethodID constructor = env->GetMethodID(structClass, "", "(Ljava/lang/String;Ljava/lang/String;)V"); + + jstring jError = result.error.has_value() ? env->NewStringUTF(result.error->c_str()): nullptr; + jstring jId = result.id.has_value() ? env->NewStringUTF(result.id->c_str()): nullptr; + jobject returnValue = env->NewObject(structClass, constructor, jError, jId); + + if(jError) { + env->DeleteLocalRef(jError); + env->DeleteLocalRef(jId); + } + + return returnValue; } diff --git a/android/src/main/java/com/audioplayback/AudioPlaybackModule.kt b/android/src/main/java/com/audioplayback/AudioPlaybackModule.kt index bcdd1a2..c93d318 100644 --- a/android/src/main/java/com/audioplayback/AudioPlaybackModule.kt +++ b/android/src/main/java/com/audioplayback/AudioPlaybackModule.kt @@ -1,13 +1,19 @@ package com.audioplayback import android.net.Uri -import android.util.Log +import com.audioplayback.models.CloseAudioStreamResult import com.facebook.react.bridge.Promise import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactMethod import com.facebook.react.bridge.ReadableArray import com.facebook.react.bridge.ReadableType import com.audioplayback.models.FileDescriptorProps +import com.audioplayback.models.LoadSoundResult +import com.audioplayback.models.OpenAudioStreamResult +import com.audioplayback.models.SetupAudioStreamResult +import com.audioplayback.models.UnloadSoundResult +import com.facebook.react.bridge.Arguments +import com.facebook.react.bridge.WritableMap import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch @@ -22,21 +28,31 @@ class AudioPlaybackModule internal constructor(context: ReactApplicationContext) return NAME } - @ReactMethod - override fun setupAudioStream(sampleRate: Double, channelCount: Double) { - setupAudioStreamNative(sampleRate, channelCount) + @ReactMethod(isBlockingSynchronousMethod = true) + override fun setupAudioStream(sampleRate: Double, channelCount: Double): WritableMap { + val result = setupAudioStreamNative(sampleRate, channelCount) + val map = Arguments.createMap() + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + return map } - @ReactMethod - override fun closeAudioStream() { - closeAudioStreamNative() + @ReactMethod(isBlockingSynchronousMethod = true) + override fun openAudioStream(): WritableMap { + val result = openAudioStreamNative() + val map = Arguments.createMap() + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + return map } - @ReactMethod - override fun openAudioStream() { - openAudioStreamNative() + @ReactMethod(isBlockingSynchronousMethod = true) + override fun closeAudioStream(): WritableMap { + val result = closeAudioStreamNative() + val map = Arguments.createMap() + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + return map } + @ReactMethod override fun loopSounds(arg: ReadableArray) { val (ids, values) = readableArrayToStringBooleanArray(arg) @@ -56,17 +72,25 @@ class AudioPlaybackModule internal constructor(context: ReactApplicationContext) } - @ReactMethod - override fun unloadSound(id: String) { - unloadSoundNative(id) + @ReactMethod(isBlockingSynchronousMethod = true) + override fun unloadSound(id: String): WritableMap { + val result = unloadSoundNative(id) + val map = Arguments.createMap() + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + return map } @ReactMethod override fun loadSound(uri: String, promise: Promise) { + val map = Arguments.createMap() + val scheme = Uri.parse(uri).scheme if( scheme == null) { val fileDescriptorProps = FileDescriptorProps.fromLocalResource(reactApplicationContext, uri) - return promise.resolve(loadSoundNative(fileDescriptorProps.id, fileDescriptorProps.length, fileDescriptorProps.offset)) + val result = loadSoundNative(fileDescriptorProps.id, fileDescriptorProps.length, fileDescriptorProps.offset); + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + result.id?.let { map.putString("id", it) } ?: map.putNull("id") + promise.resolve(map) } else { CoroutineScope(Dispatchers.Main).launch { withContext(Dispatchers.IO) { @@ -74,9 +98,13 @@ class AudioPlaybackModule internal constructor(context: ReactApplicationContext) val fileDescriptorProps = FileDescriptorProps.getFileDescriptorPropsFromUrl(reactApplicationContext, url) if(fileDescriptorProps == null) { promise.resolve(null) - Log.d("Could not get file descriptor info from uri", LOG) + map.putString("error", "Failed to load sound file") + map.putNull("id") } else { - promise.resolve(loadSoundNative(fileDescriptorProps.id, fileDescriptorProps.length, fileDescriptorProps.offset)) + val result = loadSoundNative(fileDescriptorProps.id, fileDescriptorProps.length, fileDescriptorProps.offset); + result.error?.let { map.putString("error", it) } ?: map.putNull("error") + result.id?.let { map.putString("id", it) } ?: map.putNull("id") + promise.resolve(map) } } } @@ -144,14 +172,14 @@ class AudioPlaybackModule internal constructor(context: ReactApplicationContext) closeAudioStreamNative() } - private external fun setupAudioStreamNative(sampleRate: Double, channelCount: Double) - private external fun openAudioStreamNative() - private external fun closeAudioStreamNative() + private external fun setupAudioStreamNative(sampleRate: Double, channelCount: Double): SetupAudioStreamResult + private external fun openAudioStreamNative(): OpenAudioStreamResult + private external fun closeAudioStreamNative(): CloseAudioStreamResult private external fun playSoundsNative(ids: Array, values: BooleanArray) private external fun loopSoundsNative(ids: Array, values: BooleanArray) private external fun seekSoundsToNative(ids: Array, values: DoubleArray) - private external fun loadSoundNative(fd: Int, fileLength: Int, fileOffset: Int): String? - private external fun unloadSoundNative(playerId: String): Unit + private external fun loadSoundNative(fd: Int, fileLength: Int, fileOffset: Int): LoadSoundResult + private external fun unloadSoundNative(playerId: String): UnloadSoundResult // Example method // See https://reactnative.dev/docs/native-modules-android diff --git a/android/src/main/java/com/audioplayback/models/AudioPlaybackMethodsResults.kt b/android/src/main/java/com/audioplayback/models/AudioPlaybackMethodsResults.kt new file mode 100644 index 0000000..9b87d6e --- /dev/null +++ b/android/src/main/java/com/audioplayback/models/AudioPlaybackMethodsResults.kt @@ -0,0 +1,7 @@ +package com.audioplayback.models + +data class SetupAudioStreamResult(val error: String?) +data class OpenAudioStreamResult(val error: String?) +data class CloseAudioStreamResult(val error: String?) +data class LoadSoundResult(val error: String?, val id: String?) +data class UnloadSoundResult(val error: String?) diff --git a/android/src/oldarch/AudioPlaybackSpec.kt b/android/src/oldarch/AudioPlaybackSpec.kt index d0b63dc..ac93961 100644 --- a/android/src/oldarch/AudioPlaybackSpec.kt +++ b/android/src/oldarch/AudioPlaybackSpec.kt @@ -1,21 +1,20 @@ package com.audioplayback -import com.facebook.proguard.annotations.DoNotStrip import com.facebook.react.bridge.Promise import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule -import com.facebook.react.bridge.ReactMethod import com.facebook.react.bridge.ReadableArray +import com.facebook.react.bridge.WritableMap abstract class AudioPlaybackSpec internal constructor(context: ReactApplicationContext) : ReactContextBaseJavaModule(context) { - abstract fun setupAudioStream(sampleRate: Double, channelCount: Double) + abstract fun setupAudioStream(sampleRate: Double, channelCount: Double): WritableMap - abstract fun openAudioStream() + abstract fun openAudioStream(): WritableMap - abstract fun closeAudioStream() + abstract fun closeAudioStream(): WritableMap abstract fun loopSounds(arg: ReadableArray) @@ -23,7 +22,7 @@ abstract class AudioPlaybackSpec internal constructor(context: ReactApplicationC abstract fun seekSoundsTo(arg: ReadableArray) - abstract fun unloadSound(id: String) + abstract fun unloadSound(id: String): WritableMap abstract fun loadSound(uri: String, promise: Promise) } diff --git a/example/ios/Podfile.lock b/example/ios/Podfile.lock index 07af4e9..4282245 100644 --- a/example/ios/Podfile.lock +++ b/example/ios/Podfile.lock @@ -1242,7 +1242,7 @@ PODS: - ReactCommon/turbomodule/bridging - ReactCommon/turbomodule/core - Yoga - - react-native-audio-playback (0.1.0): + - react-native-audio-playback (0.2.0-alpha.3): - DoubleConversion - glog - hermes-engine @@ -1772,7 +1772,7 @@ SPEC CHECKSUMS: React-logger: 81d58ca6f1d93fca9a770bda6cc1c4fbfcc99c9c React-Mapbuffer: 726951e68f4bb1c2513d322f2548798b2a3d628d React-microtasksnativemodule: 7a69a9b8fded72ea3cf81923ecf75cad5558ed26 - react-native-audio-playback: eaff56b0cd9f5d4ca98d2a3809f1493cdddd8244 + react-native-audio-playback: 90ecd527297c496404fbad379c672d59a53bb6cc React-nativeconfig: 470fce6d871c02dc5eff250a362d56391b7f52d6 React-NativeModulesApple: 6297fc3136c1fd42884795c51d7207de6312b606 React-perflogger: f2c94413cfad44817c96cab33753831e73f0d0dd diff --git a/example/src/App.tsx b/example/src/App.tsx index 02d3e5d..994999b 100644 --- a/example/src/App.tsx +++ b/example/src/App.tsx @@ -11,9 +11,6 @@ import { } from 'react-native'; import { Player, AudioManager } from 'react-native-audio-playback'; -AudioManager.shared.setupAudioStream(44100, 2); -AudioManager.shared.openAudioStream(); - export default function App() { const [players, setPlayers] = useState< Array<{ player: Player; title: string }> @@ -21,26 +18,26 @@ export default function App() { useEffect(() => { // loads the sounds - const players = Array<{ player: Player; title: string }>(); + const effectPlayers = Array<{ player: Player; title: string }>(); (async () => { const player1 = await AudioManager.shared.loadSound( require('./assets/bamboo.mp3') ); if (player1) { - players.push({ player: player1, title: 'Bamboo' }); + effectPlayers.push({ player: player1, title: 'Bamboo' }); } const player2 = await AudioManager.shared.loadSound( require('./assets/swords.mp3') ); if (player2) { - players.push({ player: player2, title: 'Swords' }); + effectPlayers.push({ player: player2, title: 'Swords' }); } const player3 = await AudioManager.shared.loadSound( require('./assets/coins.mp3') ); if (player3) { - players.push({ player: player3, title: 'Coins' }); + effectPlayers.push({ player: player3, title: 'Coins' }); } const player4 = await AudioManager.shared.loadSound( @@ -48,99 +45,105 @@ export default function App() { ); if (player4) { - players.push({ player: player4, title: 'Axe' }); + effectPlayers.push({ player: player4, title: 'Axe' }); } - setPlayers(players); + setPlayers(effectPlayers); })(); return () => { - players.forEach((player) => player.player.unloadSound()); + effectPlayers.forEach((player) => player.player.unloadSound()); }; }, []); + const playersExceptAxe = players + .filter((p) => p.title !== 'Axe') + .map((player) => player.player); + return ( - {players.map((player) => ( - +