From e4c9cb270c0c7942d08249d766883df1e31db167 Mon Sep 17 00:00:00 2001 From: cloudwebrtc Date: Thu, 15 Aug 2024 16:57:56 +0800 Subject: [PATCH] Add dummy I/O support. --- BUILD.gn | 45 +++- include/rtc_audio_frame.h | 94 +------- include/rtc_audio_source.h | 16 ++ include/rtc_audio_track.h | 19 ++ include/rtc_desktop_device.h | 4 +- include/rtc_peerconnection_factory.h | 20 +- include/rtc_video_source.h | 11 + src/audio_device_dummy.cc | 318 +++++++++++++++++++++++++ src/audio_device_dummy.h | 115 +++++++++ src/rtc_audio_frame_impl.cc | 62 +++++ src/rtc_audio_frame_impl.h | 37 +++ src/rtc_audio_source_impl.cc | 69 ++++++ src/rtc_audio_track_impl.cc | 28 +++ src/rtc_audio_track_impl.h | 16 +- src/rtc_peerconnection_factory_impl.cc | 31 ++- src/rtc_peerconnection_factory_impl.h | 34 ++- src/rtc_video_frame_impl.h | 2 + src/rtc_video_source_impl.cc | 52 ++++ src/rtc_video_source_impl.h | 1 + 19 files changed, 861 insertions(+), 113 deletions(-) create mode 100644 src/audio_device_dummy.cc create mode 100644 src/audio_device_dummy.h create mode 100644 src/rtc_audio_frame_impl.cc create mode 100644 src/rtc_audio_frame_impl.h diff --git a/BUILD.gn b/BUILD.gn index bcb7ce7ac1..967f2a12e0 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2,7 +2,9 @@ import("../webrtc.gni") declare_args() { libwebrtc_intel_media_sdk = false - libwebrtc_desktop_capture = true + libwebrtc_desktop_capture = false + libwebrtc_video_capture = false + libwebrtc_dummy_audio_device = true } if (is_android) { @@ -41,9 +43,14 @@ rtc_shared_library("libwebrtc") { defines = [ "USE_LIBYUV", - "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE", ] + if(libwebrtc_dummy_audio_device) { + defines += [ "LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE" ] + } else { + defines += [ "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE" ] + } + if (is_win) { defines += [ "LIB_WEBRTC_API_EXPORTS", @@ -69,6 +76,7 @@ rtc_shared_library("libwebrtc") { "include/base/scoped_ref_ptr.h", "include/libwebrtc.h", "include/rtc_audio_device.h", + "include/rtc_audio_frame.h", "include/rtc_audio_source.h", "include/rtc_audio_track.h", "include/rtc_data_channel.h", @@ -88,7 +96,6 @@ rtc_shared_library("libwebrtc") { "include/rtc_rtp_transceiver.h", "include/rtc_session_description.h", "include/rtc_types.h", - "include/rtc_video_device.h", "include/rtc_video_frame.h", "include/rtc_video_renderer.h", "include/rtc_video_source.h", @@ -96,13 +103,11 @@ rtc_shared_library("libwebrtc") { "include/helper.h", "src/helper.cc", "src/base/portable.cc", - "src/internal/vcm_capturer.cc", - "src/internal/vcm_capturer.h", - "src/internal/video_capturer.cc", - "src/internal/video_capturer.h", "src/libwebrtc.cc", "src/rtc_audio_device_impl.cc", "src/rtc_audio_device_impl.h", + "src/rtc_audio_frame_impl.cc", + "src/rtc_audio_frame_impl.h", "src/rtc_audio_source_impl.cc", "src/rtc_audio_source_impl.h", "src/rtc_audio_track_impl.cc", @@ -137,8 +142,6 @@ rtc_shared_library("libwebrtc") { "src/rtc_rtp_transceiver_impl.h", "src/rtc_session_description_impl.cc", "src/rtc_session_description_impl.h", - "src/rtc_video_device_impl.cc", - "src/rtc_video_device_impl.h", "src/rtc_video_frame_impl.cc", "src/rtc_video_frame_impl.h", "src/rtc_video_sink_adapter.cc", @@ -149,6 +152,28 @@ rtc_shared_library("libwebrtc") { "src/rtc_video_track_impl.h", ] + if(libwebrtc_dummy_audio_device) { + sources += [ + "src/audio_device_dummy.cc", + "src/audio_device_dummy.h", + ] + } + + + # video capture device + if (libwebrtc_video_capture) { + defines += [ "RTC_VIDEO_CAPTURE_DEVICE" ] + sources += [ + "include/rtc_video_capturer.h", + "src/internal/video_capturer.h", + "src/internal/video_capturer.cc", + "src/internal/vcm_capturer.cc", + "src/internal/vcm_capturer.h", + "src/rtc_video_device_impl.cc", + "src/rtc_video_device_impl.h", + ] + } + # intel media sdk if (is_win && libwebrtc_intel_media_sdk) { sources += [ @@ -235,7 +260,7 @@ rtc_shared_library("libwebrtc") { # screen capture device if (libwebrtc_desktop_capture) { - defines += [ "RTC_DESKTOP_DEVICE" ] + defines += [ "RTC_DESKTOP_CAPTURE_DEVICE" ] sources += [ "include/rtc_desktop_capturer.h", "include/rtc_desktop_device.h", diff --git a/include/rtc_audio_frame.h b/include/rtc_audio_frame.h index 3f276a1676..b591e4dc66 100644 --- a/include/rtc_audio_frame.h +++ b/include/rtc_audio_frame.h @@ -1,108 +1,40 @@ -#ifndef AUDIO_FRAME_HXX -#define AUDIO_FRAME_HXX +#ifndef LIB_WEBRTC_RTC_AUDIO_FRAME_HXX +#define LIB_WEBRTC_RTC_AUDIO_FRAME_HXX -#include "media_manager_types.h" +#include "rtc_types.h" -namespace b2bua { +namespace libwebrtc { -class AudioFrame { +class RTCAudioFrame : public RefCountInterface { public: - /** - * @brief Creates a new instance of AudioFrame. - * @return AudioFrame*: a pointer to the newly created AudioFrame. - */ - MEDIA_MANAGER_API static AudioFrame* Create(); + LIB_WEBRTC_API static scoped_refptr Create(); - /** - * @brief Creates a new instance of AudioFrame with specified parameters. - * @param id: the unique identifier of the frame. - * @param timestamp: the timestamp of the frame. - * @param data: a pointer to the audio data buffer. - * @param samples_per_channel: the number of samples per channel. - * @param sample_rate_hz: the sample rate in Hz. - * @param num_channels: the number of audio channels. - * @return AudioFrame*: a pointer to the newly created AudioFrame. - */ - MEDIA_MANAGER_API static AudioFrame* Create(int id, uint32_t timestamp, - const int16_t* data, - size_t samples_per_channel, - int sample_rate_hz, - size_t num_channels = 1); - - /** - * @brief Releases the memory of this AudioFrame. - */ - virtual void Release() = 0; + LIB_WEBRTC_API static scoped_refptr Create( + uint32_t timestamp, const int16_t* data, size_t samples_per_channel, + int sample_rate_hz, size_t num_channels = 1); public: - /** - * @brief Updates the audio frame with specified parameters. - * @param id: the unique identifier of the frame. - * @param timestamp: the timestamp of the frame. - * @param data: a pointer to the audio data buffer. - * @param samples_per_channel: the number of samples per channel. - * @param sample_rate_hz: the sample rate in Hz. - * @param num_channels: the number of audio channels. - */ - virtual void UpdateFrame(int id, uint32_t timestamp, const int16_t* data, + virtual void UpdateFrame(uint32_t timestamp, const int16_t* data, size_t samples_per_channel, int sample_rate_hz, size_t num_channels = 1) = 0; - /** - * @brief Copies the contents of another AudioFrame. - * @param src: the source AudioFrame to copy from. - */ - virtual void CopyFrom(const AudioFrame& src) = 0; + virtual void CopyFrom(const scoped_refptr src) = 0; - /** - * @brief Adds another AudioFrame to this one. - * @param frame_to_add: the AudioFrame to add. - */ - virtual void Add(const AudioFrame& frame_to_add) = 0; + virtual void Add(const scoped_refptr frame_to_add) = 0; - /** - * @brief Mutes the audio data in this AudioFrame. - */ virtual void Mute() = 0; - /** - * @brief Returns a pointer to the audio data buffer. - * @return const int16_t*: a pointer to the audio data buffer. - */ virtual const int16_t* data() = 0; - /** - * @brief Returns the number of samples per channel. - * @return size_t: the number of samples per channel. - */ virtual size_t samples_per_channel() = 0; - /** - * @brief Returns the sample rate in Hz. - * @return int: the sample rate in Hz. - */ virtual int sample_rate_hz() = 0; - /** - * @brief Returns the number of audio channels. - * @return size_t: the number of audio channels. - */ virtual size_t num_channels() = 0; - /** - * @brief Returns the timestamp of the AudioFrame. - * @return uint32_t: the timestamp of the AudioFrame. - */ virtual uint32_t timestamp() = 0; - - /** - * @brief Returns the unique identifier of the AudioFrame. - * @return int: the unique identifier of the AudioFrame. - */ - - virtual int id() = 0; }; -}; // namespace b2bua +} // namespace libwebrtc #endif diff --git a/include/rtc_audio_source.h b/include/rtc_audio_source.h index 43e39fd801..f4427694de 100644 --- a/include/rtc_audio_source.h +++ b/include/rtc_audio_source.h @@ -2,6 +2,7 @@ #define LIB_WEBRTC_RTC_AUDIO_SOURCE_HXX #include "rtc_types.h" +#include "rtc_audio_frame.h" namespace libwebrtc { @@ -20,6 +21,21 @@ class RTCAudioSource : public RefCountInterface { virtual ~RTCAudioSource() {} }; +class VirtualAudioCapturer : public RefCountInterface { + public: + LIB_WEBRTC_API static scoped_refptr Create(); + + virtual void OnFrame(scoped_refptr data) = 0; + + virtual void OnData(const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) = 0; + + virtual scoped_refptr source() = 0; +}; + } // namespace libwebrtc #endif // LIB_WEBRTC_RTC_AUDIO_TRACK_HXX diff --git a/include/rtc_audio_track.h b/include/rtc_audio_track.h index c64e4bc4a5..c3d40b60b1 100644 --- a/include/rtc_audio_track.h +++ b/include/rtc_audio_track.h @@ -1,11 +1,24 @@ #ifndef LIB_WEBRTC_RTC_AUDIO_TRACK_HXX #define LIB_WEBRTC_RTC_AUDIO_TRACK_HXX +#include "rtc_audio_frame.h" #include "rtc_media_track.h" #include "rtc_types.h" namespace libwebrtc { +template +class RTCAudioRenderer { + public: + virtual void OnFrame(AudioFrameT frame) = 0; + + virtual void OnData(const void* audio_data, int bits_per_sample, + int sample_rate, size_t number_of_channels, + size_t number_of_frames) = 0; + protected: + virtual ~RTCAudioRenderer() {} +}; + /** * The RTCAudioTrack class represents an audio track in WebRTC. * Audio tracks are used to transmit audio data over a WebRTC peer connection. @@ -17,6 +30,12 @@ class RTCAudioTrack : public RTCMediaTrack { // volume in [0-10] virtual void SetVolume(double volume) = 0; + virtual void AddAudioSink( + RTCAudioRenderer>* sink) = 0; + + virtual void RemoveAudioSink( + RTCAudioRenderer>* sink) = 0; + protected: /** * The destructor for the RTCAudioTrack class. diff --git a/include/rtc_desktop_device.h b/include/rtc_desktop_device.h index e3e4c6fab1..cd5a326b99 100644 --- a/include/rtc_desktop_device.h +++ b/include/rtc_desktop_device.h @@ -1,5 +1,5 @@ -#ifndef LIB_WEBRTC_RTC_DESKTOP_DEVICE_HXX -#define LIB_WEBRTC_RTC_DESKTOP_DEVICE_HXX +#ifndef LIB_WEBRTC_RTC_DESKTOP_CAPTURE_DEVICE_HXX +#define LIB_WEBRTC_RTC_DESKTOP_CAPTURE_DEVICE_HXX #include "rtc_types.h" diff --git a/include/rtc_peerconnection_factory.h b/include/rtc_peerconnection_factory.h index cb024672c2..be52805893 100644 --- a/include/rtc_peerconnection_factory.h +++ b/include/rtc_peerconnection_factory.h @@ -4,7 +4,7 @@ #include "rtc_audio_source.h" #include "rtc_audio_track.h" #include "rtc_types.h" -#ifdef RTC_DESKTOP_DEVICE +#ifdef RTC_DESKTOP_CAPTURE_DEVICE #include "rtc_desktop_device.h" #endif #include "rtc_media_stream.h" @@ -31,24 +31,28 @@ class RTCPeerConnectionFactory : public RefCountInterface { virtual void Delete(scoped_refptr peerconnection) = 0; +#if !defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) virtual scoped_refptr GetAudioDevice() = 0; - - virtual scoped_refptr GetVideoDevice() = 0; -#ifdef RTC_DESKTOP_DEVICE - virtual scoped_refptr GetDesktopDevice() = 0; #endif - virtual scoped_refptr CreateAudioSource( - const string audio_source_label) = 0; +#ifdef RTC_VIDEO_CAPTURE_DEVICE + virtual scoped_refptr GetVideoDevice() = 0; virtual scoped_refptr CreateVideoSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) = 0; -#ifdef RTC_DESKTOP_DEVICE +#endif + +#ifdef RTC_DESKTOP_CAPTURE_DEVICE + virtual scoped_refptr GetDesktopDevice() = 0; virtual scoped_refptr CreateDesktopSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) = 0; #endif + + virtual scoped_refptr CreateAudioSource( + const string audio_source_label) = 0; + virtual scoped_refptr CreateAudioTrack( scoped_refptr source, const string track_id) = 0; diff --git a/include/rtc_video_source.h b/include/rtc_video_source.h index cb61abbb49..3343966265 100644 --- a/include/rtc_video_source.h +++ b/include/rtc_video_source.h @@ -2,6 +2,7 @@ #define LIB_WEBRTC_RTC_VIDEO_SOURCE_HXX #include "rtc_types.h" +#include "rtc_video_frame.h" namespace libwebrtc { @@ -9,6 +10,16 @@ class RTCVideoSource : public RefCountInterface { public: ~RTCVideoSource() {} }; + +class VirtualVideoCapturer : public RefCountInterface { + public: + LIB_WEBRTC_API static scoped_refptr Create(); + + virtual void OnFrameCaptured(scoped_refptr frame) = 0; + + virtual scoped_refptr source() = 0; +}; + } // namespace libwebrtc #endif // LIB_WEBRTC_RTC_VIDEO_SOURCE_HXX diff --git a/src/audio_device_dummy.cc b/src/audio_device_dummy.cc new file mode 100644 index 0000000000..3f33b1a194 --- /dev/null +++ b/src/audio_device_dummy.cc @@ -0,0 +1,318 @@ +#include "audio_device_dummy.h" + +const int kSampleRate = 48000; +const int kChannels = 2; +const int kBytesPerSample = kChannels * sizeof(int16_t); +const int kSamplesPer10Ms = kSampleRate / 100; + +namespace libwebrtc { + +AudioDeviceDummy::AudioDeviceDummy(webrtc::TaskQueueFactory* task_queue_factory) + : data_(kSamplesPer10Ms * kChannels), + task_queue_factory_(task_queue_factory) {} + +AudioDeviceDummy::~AudioDeviceDummy() { + Terminate(); +} + +int32_t AudioDeviceDummy::ActiveAudioLayer(AudioLayer* audioLayer) const { + *audioLayer = AudioLayer::kDummyAudio; + return 0; +} + +int32_t AudioDeviceDummy::RegisterAudioCallback(webrtc::AudioTransport* transport) { + webrtc::MutexLock lock(&mutex_); + audio_transport_ = transport; + return 0; +} + +int32_t AudioDeviceDummy::Init() { + webrtc::MutexLock lock(&mutex_); + if (initialized_) + return 0; + + audio_queue_ = task_queue_factory_->CreateTaskQueue( + "AudioDevice", webrtc::TaskQueueFactory::Priority::NORMAL); + + audio_task_ = + webrtc::RepeatingTaskHandle::Start(audio_queue_.get(), [this]() { + webrtc::MutexLock lock(&mutex_); + + if (playing_) { + int64_t elapsed_time_ms = -1; + int64_t ntp_time_ms = -1; + size_t n_samples_out = 0; + void* data = data_.data(); + + // Request the AudioData, otherwise WebRTC will ignore the packets. + // 10ms of audio data. + audio_transport_->NeedMorePlayData( + kSamplesPer10Ms, kBytesPerSample, kChannels, kSampleRate, data, + n_samples_out, &elapsed_time_ms, &ntp_time_ms); + } + + return webrtc::TimeDelta::Millis(10); + }); + + initialized_ = true; + return 0; +} + +int32_t AudioDeviceDummy::Terminate() { + { + webrtc::MutexLock lock(&mutex_); + if (!initialized_) + return 0; + + initialized_ = false; + } + audio_queue_ = nullptr; + return 0; +} + +bool AudioDeviceDummy::Initialized() const { + webrtc::MutexLock lock(&mutex_); + return initialized_; +} + +int16_t AudioDeviceDummy::PlayoutDevices() { + return 0; +} + +int16_t AudioDeviceDummy::RecordingDevices() { + return 0; +} + +int32_t AudioDeviceDummy::PlayoutDeviceName(uint16_t index, + char name[webrtc::kAdmMaxDeviceNameSize], + char guid[webrtc::kAdmMaxGuidSize]) { + return 0; +} + +int32_t AudioDeviceDummy::RecordingDeviceName( + uint16_t index, + char name[webrtc::kAdmMaxDeviceNameSize], + char guid[webrtc::kAdmMaxGuidSize]) { + return 0; +} + +int32_t AudioDeviceDummy::SetPlayoutDevice(uint16_t index) { + return 0; +} + +int32_t AudioDeviceDummy::SetPlayoutDevice(WindowsDeviceType device) { + return 0; +} + +int32_t AudioDeviceDummy::SetRecordingDevice(uint16_t index) { + return 0; +} + +int32_t AudioDeviceDummy::SetRecordingDevice(WindowsDeviceType device) { + return 0; +} + +int32_t AudioDeviceDummy::PlayoutIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::InitPlayout() { + return 0; +} + +bool AudioDeviceDummy::PlayoutIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::RecordingIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::InitRecording() { + return 0; +} + +bool AudioDeviceDummy::RecordingIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::StartPlayout() { + webrtc::MutexLock lock(&mutex_); + playing_ = true; + return 0; +} + +int32_t AudioDeviceDummy::StopPlayout() { + webrtc::MutexLock lock(&mutex_); + playing_ = false; + return 0; +} + +bool AudioDeviceDummy::Playing() const { + webrtc::MutexLock lock(&mutex_); + return playing_; +} + +int32_t AudioDeviceDummy::StartRecording() { + return 0; +} + +int32_t AudioDeviceDummy::StopRecording() { + return 0; +} + +bool AudioDeviceDummy::Recording() const { + return false; +} + +int32_t AudioDeviceDummy::InitSpeaker() { + return 0; +} + +bool AudioDeviceDummy::SpeakerIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::InitMicrophone() { + return 0; +} + +bool AudioDeviceDummy::MicrophoneIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::SpeakerVolumeIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::SetSpeakerVolume(uint32_t volume) { + return 0; +} + +int32_t AudioDeviceDummy::SpeakerVolume(uint32_t* volume) const { + return 0; +} + +int32_t AudioDeviceDummy::MaxSpeakerVolume(uint32_t* maxVolume) const { + return 0; +} + +int32_t AudioDeviceDummy::MinSpeakerVolume(uint32_t* minVolume) const { + return 0; +} + +int32_t AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t volume) { + return 0; +} + +int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t* volume) const { + return 0; +} + +int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t* maxVolume) const { + return 0; +} + +int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t* minVolume) const { + return 0; +} + +int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::SetSpeakerMute(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::SpeakerMute(bool* enabled) const { + return 0; +} + +int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool* available) { + return 0; +} + +int32_t AudioDeviceDummy::SetMicrophoneMute(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::MicrophoneMute(bool* enabled) const { + return 0; +} + +int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool* available) const { + *available = true; + return 0; +} + +int32_t AudioDeviceDummy::SetStereoPlayout(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::StereoPlayout(bool* enabled) const { + return 0; +} + +int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool* available) const { + return 0; +} + +int32_t AudioDeviceDummy::SetStereoRecording(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::StereoRecording(bool* enabled) const { + *enabled = true; + return 0; +} + +int32_t AudioDeviceDummy::PlayoutDelay(uint16_t* delayMS) const { + return 0; +} + +bool AudioDeviceDummy::BuiltInAECIsAvailable() const { + return false; +} + +bool AudioDeviceDummy::BuiltInAGCIsAvailable() const { + return false; +} + +bool AudioDeviceDummy::BuiltInNSIsAvailable() const { + return false; +} + +int32_t AudioDeviceDummy::EnableBuiltInAEC(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::EnableBuiltInAGC(bool enable) { + return 0; +} + +int32_t AudioDeviceDummy::EnableBuiltInNS(bool enable) { + return 0; +} + +#if defined(WEBRTC_IOS) +int AudioDeviceDummy::GetPlayoutAudioParameters( + webrtc::AudioParameters* params) const { + return 0; +} + +int AudioDeviceDummy::GetRecordAudioParameters( + webrtc::AudioParameters* params) const { + return 0; +} +#endif // WEBRTC_IOS + +int32_t AudioDeviceDummy::SetAudioDeviceSink(webrtc::AudioDeviceSink* sink) const { + return 0; +} + +} // namespace libwebrtc diff --git a/src/audio_device_dummy.h b/src/audio_device_dummy.h new file mode 100644 index 0000000000..00d344420a --- /dev/null +++ b/src/audio_device_dummy.h @@ -0,0 +1,115 @@ +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ + +#include + +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_utils/repeating_task.h" + +namespace libwebrtc { + + class AudioDeviceDummy : public webrtc::AudioDeviceModule { + public: + AudioDeviceDummy(webrtc::TaskQueueFactory* task_queue_factory); + ~AudioDeviceDummy() override; + + int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override; + int32_t RegisterAudioCallback(webrtc::AudioTransport* transport) override; + + int32_t Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[webrtc::kAdmMaxDeviceNameSize], + char guid[webrtc::kAdmMaxGuidSize]) override; + + int32_t RecordingDeviceName(uint16_t index, + char name[webrtc::kAdmMaxDeviceNameSize], + char guid[webrtc::kAdmMaxGuidSize]) override; + + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice(WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice(WindowsDeviceType device) override; + + int32_t PlayoutIsAvailable(bool* available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool* available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + int32_t SpeakerVolumeIsAvailable(bool* available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t* volume) const override; + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t* minVolume) const override; + + int32_t MicrophoneVolumeIsAvailable(bool* available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t* volume) const override; + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override; + + int32_t SpeakerMuteIsAvailable(bool* available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool* enabled) const override; + + int32_t MicrophoneMuteIsAvailable(bool* available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool* enabled) const override; + + int32_t StereoPlayoutIsAvailable(bool* available) const override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool* enabled) const override; + int32_t StereoRecordingIsAvailable(bool* available) const override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool* enabled) const override; + + int32_t PlayoutDelay(uint16_t* delayMS) const override; + + bool BuiltInAECIsAvailable() const override; + bool BuiltInAGCIsAvailable() const override; + bool BuiltInNSIsAvailable() const override; + + int32_t EnableBuiltInAEC(bool enable) override; + int32_t EnableBuiltInAGC(bool enable) override; + int32_t EnableBuiltInNS(bool enable) override; + +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(webrtc::AudioParameters* params) const override; + int GetRecordAudioParameters(webrtc::AudioParameters* params) const override; +#endif // WEBRTC_IOS + + int32_t SetAudioDeviceSink(webrtc::AudioDeviceSink* sink) const override; + + private: + mutable webrtc::Mutex mutex_; + std::vector data_; + std::unique_ptr audio_queue_; + webrtc::RepeatingTaskHandle audio_task_; + webrtc::AudioTransport* audio_transport_; + webrtc::TaskQueueFactory* task_queue_factory_; + bool playing_{false}; + bool initialized_{false}; +}; +} // namespace webrtc + +#endif // AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ diff --git a/src/rtc_audio_frame_impl.cc b/src/rtc_audio_frame_impl.cc new file mode 100644 index 0000000000..754e0b4b11 --- /dev/null +++ b/src/rtc_audio_frame_impl.cc @@ -0,0 +1,62 @@ +#include "rtc_audio_frame_impl.h" + +#include "audio/utility/audio_frame_operations.h" + +namespace libwebrtc { + +scoped_refptr RTCAudioFrame::Create() { + return scoped_refptr( + new RefCountedObject()); +} + +scoped_refptr RTCAudioFrame::Create( + uint32_t timestamp, const int16_t* data, size_t samples_per_channel, + int sample_rate_hz, size_t num_channels /*= 1*/) { + auto audio_frame = + scoped_refptr(new RefCountedObject()); + audio_frame->UpdateFrame(timestamp, data, samples_per_channel, sample_rate_hz, + num_channels); + return audio_frame; +} + +RTCAudioFrameImpl::RTCAudioFrameImpl(webrtc::AudioFrame& buffer) { + buffer_.CopyFrom(buffer); +} + +RTCAudioFrameImpl::~RTCAudioFrameImpl() {} + +void RTCAudioFrameImpl::UpdateFrame(uint32_t timestamp, const int16_t* data, + size_t samples_per_channel, + int sample_rate_hz, + size_t num_channels /*= 1*/) { + buffer_.UpdateFrame(timestamp, data, samples_per_channel, sample_rate_hz, + webrtc::AudioFrame::kNormalSpeech, + webrtc::AudioFrame::kVadUnknown, num_channels); +} + +void RTCAudioFrameImpl::CopyFrom(const scoped_refptr src) { + RTCAudioFrameImpl* frame = static_cast(src.get()); + buffer_.CopyFrom(frame->buffer_); +} + +void RTCAudioFrameImpl::Add(const scoped_refptr frame_to_add) { + RTCAudioFrameImpl* frame = + static_cast(frame_to_add.get()); + webrtc::AudioFrameOperations::Add(frame->buffer_, &buffer_); +} + +void RTCAudioFrameImpl::Mute() { buffer_.Mute(); } + +const int16_t* RTCAudioFrameImpl::data() { return buffer_.data(); } + +size_t RTCAudioFrameImpl::samples_per_channel() { + return buffer_.samples_per_channel_; +} + +int RTCAudioFrameImpl::sample_rate_hz() { return buffer_.sample_rate_hz_; } + +size_t RTCAudioFrameImpl::num_channels() { return buffer_.num_channels_; } + +uint32_t RTCAudioFrameImpl::timestamp() { return buffer_.timestamp_; } + +} // namespace libwebrtc diff --git a/src/rtc_audio_frame_impl.h b/src/rtc_audio_frame_impl.h new file mode 100644 index 0000000000..5fe9f1b18b --- /dev/null +++ b/src/rtc_audio_frame_impl.h @@ -0,0 +1,37 @@ +#include "api/audio/audio_frame.h" +#include "include/rtc_audio_frame.h" + +namespace libwebrtc { + +class RTCAudioFrameImpl : public RTCAudioFrame { + public: + RTCAudioFrameImpl() {} + RTCAudioFrameImpl(webrtc::AudioFrame& buffer); + ~RTCAudioFrameImpl(); + + public: + virtual void UpdateFrame(uint32_t timestamp, const int16_t* data, + size_t samples_per_channel, int sample_rate_hz, + size_t num_channels = 1) override; + + virtual void CopyFrom(const scoped_refptr src) override; + + virtual void Add(const scoped_refptr frame_to_add) override; + + virtual void Mute() override; + + virtual const int16_t* data() override; + + virtual size_t samples_per_channel() override; + + virtual int sample_rate_hz() override; + + virtual size_t num_channels() override; + + virtual uint32_t timestamp() override; + + private: + webrtc::AudioFrame buffer_; +}; + +} // namespace libwebrtc diff --git a/src/rtc_audio_source_impl.cc b/src/rtc_audio_source_impl.cc index a9e2526c80..72bfcd4ca7 100644 --- a/src/rtc_audio_source_impl.cc +++ b/src/rtc_audio_source_impl.cc @@ -1,5 +1,7 @@ #include "rtc_audio_source_impl.h" +#include "pc/local_audio_source.h" + namespace libwebrtc { RTCAudioSourceImpl::RTCAudioSourceImpl( @@ -12,4 +14,71 @@ RTCAudioSourceImpl::~RTCAudioSourceImpl() { RTC_LOG(LS_INFO) << __FUNCTION__ << ": dtor "; } +class AdaptedVirtualAudioCapturer : public webrtc::LocalAudioSource { + public: + AdaptedVirtualAudioCapturer() {} + ~AdaptedVirtualAudioCapturer() {} + + void AddSink(webrtc::AudioTrackSinkInterface* sink) override { + webrtc::MutexLock lock(&mutex_); + sinks_.push_back(sink); + } + + void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override { + webrtc::MutexLock lock(&mutex_); + sinks_.erase(std::remove(sinks_.begin(), sinks_.end(), sink), sinks_.end()); + } + + void OnFrame(scoped_refptr frame) { + webrtc::MutexLock lock(&mutex_); + for (auto sink : sinks_) { + sink->OnData((const void*)frame->data(), 16, frame->sample_rate_hz(), + frame->num_channels(), frame->samples_per_channel()); + } + } + + void OnData(const void* audio_data, int bits_per_sample, int sample_rate, + size_t number_of_channels, size_t number_of_frames) { + webrtc::MutexLock lock(&mutex_); + for (auto sink : sinks_) { + sink->OnData(audio_data, bits_per_sample, sample_rate, number_of_channels, + number_of_frames); + } + } + + private: + mutable webrtc::Mutex mutex_; + std::vector sinks_; +}; + +class VirtualAudioCapturerImpl : public VirtualAudioCapturer { + public: + VirtualAudioCapturerImpl() {} + virtual ~VirtualAudioCapturerImpl() {} + + virtual void OnFrame(scoped_refptr frame) override { + adapted_source_->OnFrame(frame); + } + + virtual void OnData(const void* audio_data, int bits_per_sample, + int sample_rate, size_t number_of_channels, + size_t number_of_frames) override { + adapted_source_->OnData(audio_data, bits_per_sample, sample_rate, + number_of_channels, number_of_frames); + } + + virtual scoped_refptr source() override { + return rtc_audio_source_; + } + + private: + scoped_refptr rtc_audio_source_; + rtc::scoped_refptr adapted_source_; +}; + +scoped_refptr VirtualAudioCapturer::Create() { + return scoped_refptr( + new RefCountedObject()); +} + } // namespace libwebrtc diff --git a/src/rtc_audio_track_impl.cc b/src/rtc_audio_track_impl.cc index 802a3e272a..0024b2a2b2 100644 --- a/src/rtc_audio_track_impl.cc +++ b/src/rtc_audio_track_impl.cc @@ -18,4 +18,32 @@ void AudioTrackImpl::SetVolume(double volume) { rtc_track_->GetSource()->SetVolume(volume); } +void AudioTrackImpl::AddAudioSink( + RTCAudioRenderer>* sink) { + webrtc::MutexLock lock(&mutex_); + renderers_.push_back(sink); +} + +void AudioTrackImpl::RemoveAudioSink( + RTCAudioRenderer>* sink) { + webrtc::MutexLock lock(&mutex_); + renderers_.erase( + std::remove_if( + renderers_.begin(), renderers_.end(), + [sink](const RTCAudioRenderer>* sink_) { + return sink_ == sink; + }), + renderers_.end()); +} + +void AudioTrackImpl::OnData(const void* audio_data, int bits_per_sample, + int sample_rate, size_t number_of_channels, + size_t number_of_frames) { + webrtc::MutexLock lock(&mutex_); + for (auto sink : renderers_) { + sink->OnData(audio_data, bits_per_sample, sample_rate, + number_of_channels, number_of_frames); + } +} + } // namespace libwebrtc diff --git a/src/rtc_audio_track_impl.h b/src/rtc_audio_track_impl.h index 1fd120ea9f..ce00c2b350 100644 --- a/src/rtc_audio_track_impl.h +++ b/src/rtc_audio_track_impl.h @@ -14,7 +14,7 @@ namespace libwebrtc { -class AudioTrackImpl : public RTCAudioTrack { +class AudioTrackImpl : public RTCAudioTrack, public webrtc::AudioTrackSinkInterface { public: AudioTrackImpl(rtc::scoped_refptr audio_track); @@ -22,6 +22,18 @@ class AudioTrackImpl : public RTCAudioTrack { virtual void SetVolume(double volume) override; + virtual void AddAudioSink( + RTCAudioRenderer>* sink) override; + + virtual void RemoveAudioSink( + RTCAudioRenderer>* sink) override; + + virtual void OnData(const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) override; + virtual const string kind() const override { return kind_; } virtual const string id() const override { return id_; } @@ -43,6 +55,8 @@ class AudioTrackImpl : public RTCAudioTrack { private: rtc::scoped_refptr rtc_track_; string id_, kind_; + mutable webrtc::Mutex mutex_; + std::list>*> renderers_; }; } // namespace libwebrtc diff --git a/src/rtc_peerconnection_factory_impl.cc b/src/rtc_peerconnection_factory_impl.cc index 7c7fe2d452..d3a84809da 100644 --- a/src/rtc_peerconnection_factory_impl.cc +++ b/src/rtc_peerconnection_factory_impl.cc @@ -12,7 +12,11 @@ #include "rtc_mediaconstraints_impl.h" #include "rtc_peerconnection_impl.h" #include "rtc_rtp_capabilities_impl.h" + +#ifdef RTC_VIDEO_CAPTURE_DEVICE #include "rtc_video_device_impl.h" +#endif + #include "rtc_video_source_impl.h" #if defined(USE_INTEL_MEDIA_SDK) #include "src/win/mediacapabilities.h" @@ -23,6 +27,9 @@ #include "engine/sdk/objc/Framework/Classes/videotoolboxvideocodecfactory.h" #endif #include +#if defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) +#include "src/audio_device_dummy.h" +#endif namespace libwebrtc { @@ -87,8 +94,12 @@ bool RTCPeerConnectionFactoryImpl::Initialize() { bool RTCPeerConnectionFactoryImpl::Terminate() { worker_thread_->BlockingCall([&] { +#if !defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) audio_device_impl_ = nullptr; +#endif + #ifdef RTC_VIDEO_CAPTURE_DEVICE video_device_impl_ = nullptr; + #endif }); rtc_peerconnection_factory_ = NULL; if (audio_device_module_) { @@ -100,9 +111,15 @@ bool RTCPeerConnectionFactoryImpl::Terminate() { void RTCPeerConnectionFactoryImpl::CreateAudioDeviceModule_w() { if (!audio_device_module_) +#if defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) + audio_device_module_ = + rtc::make_ref_counted( + task_queue_factory_.get()); +#else audio_device_module_ = webrtc::AudioDeviceModule::Create( webrtc::AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory_.get()); +#endif } void RTCPeerConnectionFactoryImpl::DestroyAudioDeviceModule_w() { @@ -131,6 +148,7 @@ void RTCPeerConnectionFactoryImpl::Delete( peerconnections_.end()); } +#if !defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) scoped_refptr RTCPeerConnectionFactoryImpl::GetAudioDevice() { if (!audio_device_module_) { worker_thread_->BlockingCall([this] { CreateAudioDeviceModule_w(); }); @@ -143,7 +161,9 @@ scoped_refptr RTCPeerConnectionFactoryImpl::GetAudioDevice() { return audio_device_impl_; } +#endif +#ifdef RTC_VIDEO_CAPTURE_DEVICE scoped_refptr RTCPeerConnectionFactoryImpl::GetVideoDevice() { if (!video_device_impl_) video_device_impl_ = scoped_refptr( @@ -151,6 +171,8 @@ scoped_refptr RTCPeerConnectionFactoryImpl::GetVideoDevice() { return video_device_impl_; } +#endif + scoped_refptr RTCPeerConnectionFactoryImpl::CreateAudioSource( const string audio_source_label) { @@ -162,7 +184,7 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateAudioSource( return source; } -#ifdef RTC_DESKTOP_DEVICE +#ifdef RTC_DESKTOP_CAPTURE_DEVICE scoped_refptr RTCPeerConnectionFactoryImpl::GetDesktopDevice() { if (!desktop_device_impl_) { @@ -173,6 +195,7 @@ RTCPeerConnectionFactoryImpl::GetDesktopDevice() { } #endif +#ifdef RTC_VIDEO_CAPTURE_DEVICE scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) { @@ -188,7 +211,10 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource( return CreateVideoSource_s( capturer, to_std_string(video_source_label).c_str(), constraints); } +#endif + +#ifdef RTC_VIDEO_CAPTURE_DEVICE scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource_s( scoped_refptr capturer, const char* video_source_label, scoped_refptr constraints) { @@ -204,8 +230,9 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource_s( new RefCountedObject(rtc_source_track)); return source; } +#endif -#ifdef RTC_DESKTOP_DEVICE +#ifdef RTC_DESKTOP_CAPTURE_DEVICE scoped_refptr RTCPeerConnectionFactoryImpl::CreateDesktopSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) { diff --git a/src/rtc_peerconnection_factory_impl.h b/src/rtc_peerconnection_factory_impl.h index f87bba67ff..b66396234b 100644 --- a/src/rtc_peerconnection_factory_impl.h +++ b/src/rtc_peerconnection_factory_impl.h @@ -10,9 +10,12 @@ #include "rtc_base/thread.h" #include "rtc_peerconnection.h" #include "rtc_peerconnection_factory.h" + +#ifdef RTC_VIDEO_CAPTURE_DEVICE #include "rtc_video_device_impl.h" +#endif -#ifdef RTC_DESKTOP_DEVICE +#ifdef RTC_DESKTOP_CAPTURE_DEVICE #include "rtc_desktop_capturer_impl.h" #include "rtc_desktop_device_impl.h" #include "src/internal/desktop_capturer.h" @@ -36,28 +39,36 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { void Delete(scoped_refptr peerconnection) override; +#if !defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) scoped_refptr GetAudioDevice() override; +#endif +#ifdef RTC_VIDEO_CAPTURE_DEVICE scoped_refptr GetVideoDevice() override; +#endif virtual scoped_refptr CreateAudioSource( const string audio_source_label) override; + virtual scoped_refptr CreateAudioTrack( + scoped_refptr source, const string track_id) override; + + virtual scoped_refptr CreateVideoTrack( + scoped_refptr source, const string track_id) override; + +#ifdef RTC_VIDEO_CAPTURE_DEVICE virtual scoped_refptr CreateVideoSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) override; -#ifdef RTC_DESKTOP_DEVICE +#endif + +#ifdef RTC_DESKTOP_CAPTURE_DEVICE virtual scoped_refptr GetDesktopDevice() override; virtual scoped_refptr CreateDesktopSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) override; #endif - virtual scoped_refptr CreateAudioTrack( - scoped_refptr source, const string track_id) override; - - virtual scoped_refptr CreateVideoTrack( - scoped_refptr source, const string track_id) override; virtual scoped_refptr CreateStream( const string stream_id) override; @@ -83,7 +94,7 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { scoped_refptr CreateVideoSource_s( scoped_refptr capturer, const char* video_source_label, scoped_refptr constraints); -#ifdef RTC_DESKTOP_DEVICE +#ifdef RTC_DESKTOP_CAPTURE_DEVICE scoped_refptr CreateDesktopSource_d( scoped_refptr capturer, const char* video_source_label, @@ -95,10 +106,15 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { std::unique_ptr network_thread_; rtc::scoped_refptr rtc_peerconnection_factory_; + rtc::scoped_refptr audio_device_module_; +#if !defined(LIB_WEBRTC_USE_DUMMY_AUDIO_DEVICE) scoped_refptr audio_device_impl_; +#endif +#ifdef RTC_VIDEO_CAPTURE_DEVICE scoped_refptr video_device_impl_; -#ifdef RTC_DESKTOP_DEVICE +#endif +#ifdef RTC_DESKTOP_CAPTURE_DEVICE scoped_refptr desktop_device_impl_; #endif std::list> peerconnections_; diff --git a/src/rtc_video_frame_impl.h b/src/rtc_video_frame_impl.h index 3af6845abb..0fbe094ee1 100644 --- a/src/rtc_video_frame_impl.h +++ b/src/rtc_video_frame_impl.h @@ -51,6 +51,8 @@ class VideoFrameBufferImpl : public RTCVideoFrame { void set_rotation(webrtc::VideoRotation rotation) { rotation_ = rotation; } + webrtc::VideoRotation rtc_rotation() { return rotation_; } + private: rtc::scoped_refptr buffer_; int64_t timestamp_us_ = 0; diff --git a/src/rtc_video_source_impl.cc b/src/rtc_video_source_impl.cc index ef4ec9c5a6..df3c438f2b 100644 --- a/src/rtc_video_source_impl.cc +++ b/src/rtc_video_source_impl.cc @@ -6,6 +6,58 @@ namespace libwebrtc { +class AdaptedVirtualVideoCapturer : public rtc::AdaptedVideoTrackSource { + public: + AdaptedVirtualVideoCapturer() {} + ~AdaptedVirtualVideoCapturer() override {} + + bool is_screencast() const override { return false; } + + absl::optional needs_denoising() const override { return false; } + + SourceState state() const override { return kLive; } + + bool remote() const override { return false; } + + void OnFrameCaptured(scoped_refptr frame) { + VideoFrameBufferImpl* impl = + static_cast(frame.get()); + auto newFrame = webrtc::VideoFrame::Builder() + .set_video_frame_buffer(impl->buffer()) + .set_rotation(impl->rtc_rotation()) + .set_timestamp_us(impl->timestamp_us()) + .build(); + OnFrame(newFrame); + } +}; + +class VirtualVideoCapturerImpl : public VirtualVideoCapturer { + public: + VirtualVideoCapturerImpl() { + adapted_source_ = new rtc::RefCountedObject(); + rtc_source_ = scoped_refptr( + new RefCountedObject(adapted_source_)); + } + virtual ~VirtualVideoCapturerImpl() {} + + virtual scoped_refptr source() override { + return rtc_source_; + } + + virtual void OnFrameCaptured(scoped_refptr frame) override { + adapted_source_->OnFrameCaptured(frame); + } + + private: + rtc::scoped_refptr adapted_source_; + scoped_refptr rtc_source_; +}; + +scoped_refptr VirtualVideoCapturer::Create() { + return scoped_refptr( + new RefCountedObject()); +} + RTCVideoSourceImpl::RTCVideoSourceImpl( rtc::scoped_refptr rtc_source_track) : rtc_source_track_(rtc_source_track) { diff --git a/src/rtc_video_source_impl.h b/src/rtc_video_source_impl.h index 568355701c..9433379d3f 100644 --- a/src/rtc_video_source_impl.h +++ b/src/rtc_video_source_impl.h @@ -2,6 +2,7 @@ #define LIB_WEBRTC_VIDEO_SOURCE_IMPL_HXX #include "api/media_stream_interface.h" +#include "media/base/adapted_video_track_source.h" #include "media/base/video_broadcaster.h" #include "media/base/video_source_base.h" #include "rtc_peerconnection_factory_impl.h"