Skip to content

Commit

Permalink
Update to 8.7.0
Browse files Browse the repository at this point in the history
  • Loading branch information
xaxtix committed Apr 16, 2022
1 parent 0abe454 commit 1e50785
Show file tree
Hide file tree
Showing 306 changed files with 22,968 additions and 3,966 deletions.
4 changes: 2 additions & 2 deletions TMessagesProj/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ android {
}
}

defaultConfig.versionCode = 2600
defaultConfig.versionCode = 2622

applicationVariants.all { variant ->
variant.outputs.all { output ->
Expand All @@ -319,7 +319,7 @@ android {
defaultConfig {
minSdkVersion 16
targetSdkVersion 30
versionName "8.6.2"
versionName "8.7.0"

vectorDrawables.generatedDensities = ['mdpi', 'hdpi', 'xhdpi', 'xxhdpi']

Expand Down
4 changes: 4 additions & 0 deletions TMessagesProj/jni/voip/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,10 @@ add_library(tgcalls STATIC
voip/tgcalls/v2/NativeNetworkingImpl.cpp
voip/tgcalls/v2/Signaling.cpp
voip/tgcalls/v2/SignalingEncryption.cpp
voip/tgcalls/v2/ContentNegotiation.cpp
voip/tgcalls/v2/InstanceV2ReferenceImpl.cpp
voip/tgcalls/v2_4_0_0/InstanceV2_4_0_0Impl.cpp
voip/tgcalls/v2_4_0_0/Signaling_4_0_0.cpp
voip/webrtc/rtc_base/bitstream_reader.cc
voip/webrtc/rtc_base/async_invoker.cc
voip/webrtc/rtc_base/system_time.cc
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@
#include "libtgvoip/os/android/JNIUtilities.h"
#include "tgcalls/VideoCaptureInterface.h"
#include "tgcalls/v2/InstanceV2Impl.h"
#include "tgcalls/v2_4_0_0/InstanceV2_4_0_0Impl.h"

using namespace tgcalls;

const auto RegisterTag = Register<InstanceImpl>();
const auto RegisterTagLegacy = Register<InstanceImplLegacy>();
const auto RegisterTagV2 = Register<InstanceV2Impl>();
const auto RegisterTagV2_4_0_0 = Register<InstanceV2_4_0_0Impl>();
const auto RegisterTagV2_4_0_1 = Register<InstanceV2Impl>();

jclass TrafficStatsClass;
jclass FingerprintClass;
Expand Down
2 changes: 1 addition & 1 deletion TMessagesProj/jni/voip/tgcalls/MediaManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ _platformContext(platformContext) {
rtc::scoped_refptr<webrtc::AudioDeviceModule> MediaManager::createAudioDeviceModule() {
const auto create = [&](webrtc::AudioDeviceModule::AudioLayer layer) {
#ifdef WEBRTC_IOS
return rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, false);
return rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, false, 1);
#else
return webrtc::AudioDeviceModule::Create(
layer,
Expand Down
1 change: 1 addition & 0 deletions TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ NetworkManager::~NetworkManager() {
_portAllocator.reset();
_networkManager.reset();
_socketFactory.reset();
_networkMonitorFactory.reset();
}

void NetworkManager::start() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,9 @@ _avIoContext(std::move(fileData)) {

_frame = av_frame_alloc();

#if LIBAVFORMAT_VERSION_MAJOR >= 59
const
#endif
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
if (!inputFormat) {
_didReadToEnd = true;
Expand Down Expand Up @@ -144,7 +147,7 @@ _avIoContext(std::move(fileData)) {

_streamId = i;

_durationInMilliseconds = (int)((inStream->duration + inStream->first_dts) * 1000 / 48000);
_durationInMilliseconds = (int)(inStream->duration * av_q2d(inStream->time_base) * 1000);

if (inStream->metadata) {
AVDictionaryEntry *entry = av_dict_get(inStream->metadata, "TG_META", nullptr, 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class AudioStreamingPartPersistentDecoderState {
AudioStreamingPartPersistentDecoderState(AVCodecParameters const *codecParameters, AVRational timeBase) :
_codecParameters(codecParameters),
_timeBase(timeBase) {
AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
const AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
if (codec) {
_codecContext = avcodec_alloc_context3(codec);
int ret = avcodec_parameters_to_context(_codecContext, codecParameters);
Expand Down
10 changes: 8 additions & 2 deletions TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1514,7 +1514,13 @@ class GroupInstanceCustomInternal : public sigslot::has_slots<>, public std::ena
std::unique_ptr<AudioCapturePostProcessor> audioProcessor = nullptr;
#endif
if (_videoContentType != VideoContentType::Screencast) {
PlatformInterface::SharedInstance()->configurePlatformAudio();
int numChannels = 1;
#ifdef WEBRTC_IOS
if (_disableAudioInput) {
numChannels = 2;
}
#endif
PlatformInterface::SharedInstance()->configurePlatformAudio(numChannels);

#if USE_RNNOISE
audioProcessor = std::make_unique<AudioCapturePostProcessor>([weak, threads = _threads](GroupLevelValue const &level) {
Expand Down Expand Up @@ -3297,7 +3303,7 @@ class GroupInstanceCustomInternal : public sigslot::has_slots<>, public std::ena
#endif
const auto create = [&](webrtc::AudioDeviceModule::AudioLayer layer) {
#ifdef WEBRTC_IOS
return rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, disableRecording);
return rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, disableRecording, disableRecording ? 2 : 1);
#else
return webrtc::AudioDeviceModule::Create(
layer,
Expand Down
24 changes: 24 additions & 0 deletions TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -423,6 +423,27 @@ class StreamingMediaContextPrivate : public std::enable_shared_from_this<Streami

if (numChannels == 1) {
frameOut.UpdateFrame(0, audioChannels[0].pcmData.data(), audioChannels[0].pcmData.size(), 48000, webrtc::AudioFrame::SpeechType::kNormalSpeech, webrtc::AudioFrame::VADActivity::kVadActive, numChannels);
} else if (numChannels == _audioRingBufferNumChannels) {
bool skipFrame = false;
int numSamples = (int)audioChannels[0].pcmData.size();
for (int i = 1; i < numChannels; i++) {
if (audioChannels[i].pcmData.size() != numSamples) {
skipFrame = true;
break;
}
}
if (skipFrame) {
break;
}
if (_stereoShuffleBuffer.size() < numChannels * numSamples) {
_stereoShuffleBuffer.resize(numChannels * numSamples);
}
for (int i = 0; i < numSamples; i++) {
for (int j = 0; j < numChannels; j++) {
_stereoShuffleBuffer[i * numChannels + j] = audioChannels[j].pcmData[i];
}
}
frameOut.UpdateFrame(0, _stereoShuffleBuffer.data(), numSamples, 48000, webrtc::AudioFrame::SpeechType::kNormalSpeech, webrtc::AudioFrame::VADActivity::kVadActive, numChannels);
} else {
bool skipFrame = false;
int numSamples = (int)audioChannels[0].pcmData.size();
Expand Down Expand Up @@ -483,6 +504,9 @@ class StreamingMediaContextPrivate : public std::enable_shared_from_this<Streami
RTC_LOG(LS_INFO) << "render: discarding video frames at the end of a segment (displayed " << segment->video[0]->_displayedFrames << " frames)";
}
}
if (!segment->unified.empty() && segment->unified[0]->videoPart->hasRemainingFrames()) {
RTC_LOG(LS_INFO) << "render: discarding video frames at the end of a segment (displayed " << segment->unified[0]->_displayedFrames << " frames)";
}

_availableSegments.erase(_availableSegments.begin());
}
Expand Down
65 changes: 41 additions & 24 deletions TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,16 @@ class Frame {
return _frame;
}

double pts(AVStream *stream) {
double pts(AVStream *stream, double &firstFramePts) {
int64_t framePts = _frame->pts;
double spf = av_q2d(stream->time_base);
return ((double)framePts) * spf;
}

double duration(AVStream *stream) {
int64_t frameDuration = _frame->pkt_duration;
double spf = av_q2d(stream->time_base);
if (frameDuration != 0) {
return ((double)frameDuration) * spf;
} else {
return spf;
double value = ((double)framePts) * spf;

if (firstFramePts < 0.0) {
firstFramePts = value;
}

return value - firstFramePts;
}

private:
Expand Down Expand Up @@ -280,6 +276,9 @@ class VideoStreamingPartInternal {

int ret = 0;

#if LIBAVFORMAT_VERSION_MAJOR >= 59
const
#endif
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
if (!inputFormat) {
_didReadToEnd = true;
Expand Down Expand Up @@ -323,7 +322,7 @@ class VideoStreamingPartInternal {
}

if (videoCodecParameters && videoStream) {
AVCodec *codec = avcodec_find_decoder(videoCodecParameters->codec_id);
const AVCodec *codec = avcodec_find_decoder(videoCodecParameters->codec_id);
if (codec) {
_codecContext = avcodec_alloc_context3(codec);
ret = avcodec_parameters_to_context(_codecContext, videoCodecParameters);
Expand Down Expand Up @@ -410,7 +409,7 @@ class VideoStreamingPartInternal {
.set_rotation(_rotation)
.build();

return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream), _frame.duration(_videoStream), _frameIndex);
return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream, _firstFramePts), _frameIndex);
} else {
return absl::nullopt;
}
Expand Down Expand Up @@ -490,6 +489,7 @@ class VideoStreamingPartInternal {
std::vector<VideoStreamingPartFrame> _finalFrames;

int _frameIndex = 0;
double _firstFramePts = -1.0;
bool _didReadToEnd = false;
};

Expand Down Expand Up @@ -566,25 +566,33 @@ class VideoStreamingPartState {

absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(double timestamp) {
while (true) {
if (!_currentFrame) {
while (_availableFrames.size() >= 2) {
if (timestamp >= _availableFrames[1].pts) {
_availableFrames.erase(_availableFrames.begin());
} else {
break;
}
}

if (_availableFrames.size() < 2) {
if (!_parsedVideoParts.empty()) {
auto result = _parsedVideoParts[0]->getNextFrame();
if (result) {
_currentFrame = result;
_relativeTimestamp += result->duration;
_availableFrames.push_back(result.value());
} else {
_parsedVideoParts.erase(_parsedVideoParts.begin());
continue;
}
continue;
}
}

if (_currentFrame) {
if (timestamp <= _relativeTimestamp) {
return _currentFrame;
} else {
_currentFrame = absl::nullopt;
if (!_availableFrames.empty()) {
for (size_t i = 1; i < _availableFrames.size(); i++) {
if (timestamp < _availableFrames[i].pts) {
return _availableFrames[i - 1];
}
}
return _availableFrames[_availableFrames.size() - 1];
} else {
return absl::nullopt;
}
Expand All @@ -598,6 +606,10 @@ class VideoStreamingPartState {
return absl::nullopt;
}
}

bool hasRemainingFrames() const {
return !_parsedVideoParts.empty();
}

int getAudioRemainingMilliseconds() {
while (!_parsedAudioParts.empty()) {
Expand Down Expand Up @@ -626,8 +638,7 @@ class VideoStreamingPartState {
private:
absl::optional<VideoStreamInfo> _videoStreamInfo;
std::vector<std::unique_ptr<VideoStreamingPartInternal>> _parsedVideoParts;
absl::optional<VideoStreamingPartFrame> _currentFrame;
double _relativeTimestamp = 0.0;
std::vector<VideoStreamingPartFrame> _availableFrames;

std::vector<std::unique_ptr<AudioStreamingPart>> _parsedAudioParts;
};
Expand Down Expand Up @@ -656,6 +667,12 @@ absl::optional<std::string> VideoStreamingPart::getActiveEndpointId() const {
: absl::nullopt;
}

bool VideoStreamingPart::hasRemainingFrames() const {
return _state
? _state->hasRemainingFrames()
: false;
}

int VideoStreamingPart::getAudioRemainingMilliseconds() {
return _state
? _state->getAudioRemainingMilliseconds()
Expand Down
5 changes: 2 additions & 3 deletions TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@ struct VideoStreamingPartFrame {
std::string endpointId;
webrtc::VideoFrame frame;
double pts = 0;
double duration = 0.0;
int index = 0;

VideoStreamingPartFrame(std::string endpointId_, webrtc::VideoFrame const &frame_, double pts_, double duration_, int index_) :
VideoStreamingPartFrame(std::string endpointId_, webrtc::VideoFrame const &frame_, double pts_, int index_) :
endpointId(endpointId_),
frame(frame_),
pts(pts_),
duration(duration_),
index(index_) {
}
};
Expand All @@ -52,6 +50,7 @@ class VideoStreamingPart {

absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(double timestamp);
absl::optional<std::string> getActiveEndpointId() const;
bool hasRemainingFrames() const;

int getAudioRemainingMilliseconds();
std::vector<AudioStreamingPart::StreamingPartChannel> getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ class PlatformInterface {
static PlatformInterface *SharedInstance();
virtual ~PlatformInterface() = default;

virtual void configurePlatformAudio() {
virtual void configurePlatformAudio(int numChannels = 1) {
}

virtual std::unique_ptr<rtc::NetworkMonitorFactory> createNetworkMonitorFactory() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

namespace tgcalls {

void AndroidInterface::configurePlatformAudio() {
void AndroidInterface::configurePlatformAudio(int numChannels) {

}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace tgcalls {

class AndroidInterface : public PlatformInterface {
public:
void configurePlatformAudio() override;
void configurePlatformAudio(int numChannels = 1) override;
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext, bool preferHardwareEncoding = false, bool isScreencast = false) override;
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) override;
bool supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) override;
Expand Down
Loading

0 comments on commit 1e50785

Please sign in to comment.