Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
64
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp
Normal file
64
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
#include "AVIOContextImpl.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
int AVIOContextImplRead(void *opaque, unsigned char *buffer, int bufferSize) {
|
||||
AVIOContextImpl *instance = static_cast<AVIOContextImpl *>(opaque);
|
||||
|
||||
int bytesToRead = std::min(bufferSize, ((int)instance->_fileData.size()) - instance->_fileReadPosition);
|
||||
if (bytesToRead < 0) {
|
||||
bytesToRead = 0;
|
||||
}
|
||||
|
||||
if (bytesToRead > 0) {
|
||||
memcpy(buffer, instance->_fileData.data() + instance->_fileReadPosition, bytesToRead);
|
||||
instance->_fileReadPosition += bytesToRead;
|
||||
|
||||
return bytesToRead;
|
||||
} else {
|
||||
return AVERROR_EOF;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t AVIOContextImplSeek(void *opaque, int64_t offset, int whence) {
|
||||
AVIOContextImpl *instance = static_cast<AVIOContextImpl *>(opaque);
|
||||
|
||||
if (whence == 0x10000) {
|
||||
return (int64_t)instance->_fileData.size();
|
||||
} else {
|
||||
int64_t seekOffset = std::min(offset, (int64_t)instance->_fileData.size());
|
||||
if (seekOffset < 0) {
|
||||
seekOffset = 0;
|
||||
}
|
||||
instance->_fileReadPosition = (int)seekOffset;
|
||||
return seekOffset;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
AVIOContextImpl::AVIOContextImpl(std::vector<uint8_t> &&fileData) :
|
||||
_fileData(std::move(fileData)) {
|
||||
_buffer.resize(4 * 1024);
|
||||
_context = avio_alloc_context(_buffer.data(), (int)_buffer.size(), 0, this, &AVIOContextImplRead, NULL, &AVIOContextImplSeek);
|
||||
}
|
||||
|
||||
AVIOContextImpl::~AVIOContextImpl() {
|
||||
avio_context_free(&_context);
|
||||
}
|
||||
|
||||
AVIOContext *AVIOContextImpl::getContext() const {
|
||||
return _context;
|
||||
};
|
||||
|
||||
}
|
||||
40
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h
Normal file
40
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#ifndef TGCALLS_AVIOCONTEXTIMPL_H
|
||||
#define TGCALLS_AVIOCONTEXTIMPL_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/video/video_frame.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
// Fix build on Windows - this should appear before FFmpeg timestamp include.
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AVIOContextImpl {
|
||||
public:
|
||||
AVIOContextImpl(std::vector<uint8_t> &&fileData);
|
||||
~AVIOContextImpl();
|
||||
|
||||
AVIOContext *getContext() const;
|
||||
|
||||
public:
|
||||
std::vector<uint8_t> _fileData;
|
||||
int _fileReadPosition = 0;
|
||||
|
||||
std::vector<uint8_t> _buffer;
|
||||
AVIOContext *_context = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
181
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp
Normal file
181
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
#include "AudioStreamingPart.h"
|
||||
|
||||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
#include <string>
|
||||
#include <bitset>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartState {
|
||||
struct ChannelMapping {
|
||||
uint32_t ssrc = 0;
|
||||
int channelIndex = 0;
|
||||
|
||||
ChannelMapping(uint32_t ssrc_, int channelIndex_) :
|
||||
ssrc(ssrc_), channelIndex(channelIndex_) {
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
AudioStreamingPartState(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel) :
|
||||
_isSingleChannel(isSingleChannel),
|
||||
_parsedPart(std::move(data), container) {
|
||||
if (_parsedPart.getChannelUpdates().size() == 0 && !isSingleChannel) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_remainingMilliseconds = _parsedPart.getDurationInMilliseconds();
|
||||
|
||||
for (const auto &it : _parsedPart.getChannelUpdates()) {
|
||||
_allSsrcs.insert(it.ssrc);
|
||||
}
|
||||
}
|
||||
|
||||
~AudioStreamingPartState() {
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const {
|
||||
return _parsedPart.getEndpointMapping();
|
||||
}
|
||||
|
||||
int getRemainingMilliseconds() const {
|
||||
return _remainingMilliseconds;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
if (_didReadToEnd) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (const auto &update : _parsedPart.getChannelUpdates()) {
|
||||
if (update.frameIndex == _frameIndex) {
|
||||
updateCurrentMapping(update.ssrc, update.id);
|
||||
}
|
||||
}
|
||||
|
||||
auto readResult = _parsedPart.readPcm(persistentDecoder, _pcm10ms);
|
||||
if (readResult.numSamples <= 0) {
|
||||
_didReadToEnd = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> resultChannels;
|
||||
|
||||
if (_isSingleChannel) {
|
||||
for (int i = 0; i < readResult.numChannels; i++) {
|
||||
AudioStreamingPart::StreamingPartChannel emptyPart;
|
||||
emptyPart.ssrc = i + 1;
|
||||
resultChannels.push_back(emptyPart);
|
||||
}
|
||||
|
||||
for (int i = 0; i < readResult.numChannels; i++) {
|
||||
auto channel = resultChannels.begin() + i;
|
||||
int sourceChannelIndex = i;
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel->pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]);
|
||||
}
|
||||
channel->numSamples += readResult.numSamples;
|
||||
}
|
||||
} else {
|
||||
for (const auto ssrc : _allSsrcs) {
|
||||
AudioStreamingPart::StreamingPartChannel emptyPart;
|
||||
emptyPart.ssrc = ssrc;
|
||||
resultChannels.push_back(emptyPart);
|
||||
}
|
||||
|
||||
for (auto &channel : resultChannels) {
|
||||
auto mappedChannelIndex = getCurrentMappedChannelIndex(channel.ssrc);
|
||||
|
||||
if (mappedChannelIndex) {
|
||||
int sourceChannelIndex = mappedChannelIndex.value();
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel.pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]);
|
||||
}
|
||||
channel.numSamples += readResult.numSamples;
|
||||
} else {
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel.pcmData.push_back(0);
|
||||
}
|
||||
channel.numSamples += readResult.numSamples;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_remainingMilliseconds -= 10;
|
||||
if (_remainingMilliseconds < 0) {
|
||||
_remainingMilliseconds = 0;
|
||||
}
|
||||
_frameIndex++;
|
||||
|
||||
return resultChannels;
|
||||
}
|
||||
|
||||
private:
|
||||
absl::optional<int> getCurrentMappedChannelIndex(uint32_t ssrc) {
|
||||
for (const auto &it : _currentChannelMapping) {
|
||||
if (it.ssrc == ssrc) {
|
||||
return it.channelIndex;
|
||||
}
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void updateCurrentMapping(uint32_t ssrc, int channelIndex) {
|
||||
for (int i = (int)_currentChannelMapping.size() - 1; i >= 0; i--) {
|
||||
const auto &entry = _currentChannelMapping[i];
|
||||
if (entry.ssrc == ssrc && entry.channelIndex == channelIndex) {
|
||||
return;
|
||||
} else if (entry.ssrc == ssrc || entry.channelIndex == channelIndex) {
|
||||
_currentChannelMapping.erase(_currentChannelMapping.begin() + i);
|
||||
}
|
||||
}
|
||||
_currentChannelMapping.emplace_back(ssrc, channelIndex);
|
||||
}
|
||||
|
||||
private:
|
||||
bool _isSingleChannel = false;
|
||||
AudioStreamingPartInternal _parsedPart;
|
||||
std::set<uint32_t> _allSsrcs;
|
||||
|
||||
std::vector<int16_t> _pcm10ms;
|
||||
std::vector<ChannelMapping> _currentChannelMapping;
|
||||
int _frameIndex = 0;
|
||||
int _remainingMilliseconds = 0;
|
||||
|
||||
bool _didReadToEnd = false;
|
||||
};
|
||||
|
||||
AudioStreamingPart::AudioStreamingPart(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel) {
|
||||
if (!data.empty()) {
|
||||
_state = new AudioStreamingPartState(std::move(data), container, isSingleChannel);
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPart::~AudioStreamingPart() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
}
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> AudioStreamingPart::getEndpointMapping() const {
|
||||
return _state ? _state->getEndpointMapping() : std::map<std::string, int32_t>();
|
||||
}
|
||||
|
||||
int AudioStreamingPart::getRemainingMilliseconds() const {
|
||||
return _state ? _state->getRemainingMilliseconds() : 0;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> AudioStreamingPart::get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
return _state
|
||||
? _state->get10msPerChannel(persistentDecoder)
|
||||
: std::vector<AudioStreamingPart::StreamingPartChannel>();
|
||||
}
|
||||
|
||||
}
|
||||
45
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
45
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartState;
|
||||
|
||||
class AudioStreamingPart {
|
||||
public:
|
||||
struct StreamingPartChannel {
|
||||
uint32_t ssrc = 0;
|
||||
std::vector<int16_t> pcmData;
|
||||
int numSamples = 0;
|
||||
};
|
||||
|
||||
explicit AudioStreamingPart(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel);
|
||||
~AudioStreamingPart();
|
||||
|
||||
AudioStreamingPart(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart(AudioStreamingPart&& other) {
|
||||
_state = other._state;
|
||||
other._state = nullptr;
|
||||
}
|
||||
AudioStreamingPart& operator=(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart& operator=(AudioStreamingPart&&) = delete;
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const;
|
||||
int getRemainingMilliseconds() const;
|
||||
std::vector<StreamingPartChannel> get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
AudioStreamingPartState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,404 @@
|
|||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
#include <string>
|
||||
#include <bitset>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
int16_t sampleFloatToInt16(float sample) {
|
||||
return av_clip_int16 (static_cast<int32_t>(lrint(sample*32767)));
|
||||
}
|
||||
|
||||
uint32_t stringToUInt32(std::string const &string) {
|
||||
std::stringstream stringStream(string);
|
||||
uint32_t value = 0;
|
||||
stringStream >> value;
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename Out>
|
||||
void splitString(const std::string &s, char delim, Out result) {
|
||||
std::istringstream iss(s);
|
||||
std::string item;
|
||||
while (std::getline(iss, item, delim)) {
|
||||
*result++ = item;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> splitString(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
splitString(s, delim, std::back_inserter(elems));
|
||||
return elems;
|
||||
}
|
||||
|
||||
static absl::optional<uint32_t> readInt32(std::string const &data, int &offset) {
|
||||
if (offset + 4 > data.length()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, 4);
|
||||
offset += 4;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> parseChannelUpdates(std::string const &data, int &offset) {
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> result;
|
||||
|
||||
auto channels = readInt32(data, offset);
|
||||
if (!channels) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto count = readInt32(data, offset);
|
||||
if (!count) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (int i = 0; i < count.value(); i++) {
|
||||
auto frameIndex = readInt32(data, offset);
|
||||
if (!frameIndex) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto channelId = readInt32(data, offset);
|
||||
if (!channelId) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto ssrc = readInt32(data, offset);
|
||||
if (!ssrc) {
|
||||
return {};
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::ChannelUpdate update;
|
||||
update.frameIndex = frameIndex.value();
|
||||
update.id = channelId.value();
|
||||
update.ssrc = ssrc.value();
|
||||
|
||||
result.push_back(update);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::AudioStreamingPartInternal(std::vector<uint8_t> &&fileData, std::string const &container) :
|
||||
_avIoContext(std::move(fileData)) {
|
||||
int ret = 0;
|
||||
|
||||
_frame = av_frame_alloc();
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
const
|
||||
#endif
|
||||
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
|
||||
if (!inputFormat) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext = avformat_alloc_context();
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext->pb = _avIoContext.getContext();
|
||||
|
||||
if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
_inputFormatContext = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < _inputFormatContext->nb_streams; i++) {
|
||||
AVStream *inStream = _inputFormatContext->streams[i];
|
||||
|
||||
AVCodecParameters *inCodecpar = inStream->codecpar;
|
||||
if (inCodecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
continue;
|
||||
}
|
||||
|
||||
_audioCodecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_audioCodecParameters, inCodecpar);
|
||||
|
||||
_streamId = i;
|
||||
|
||||
_durationInMilliseconds = (int)(inStream->duration * av_q2d(inStream->time_base) * 1000);
|
||||
|
||||
if (inStream->metadata) {
|
||||
AVDictionaryEntry *entry = av_dict_get(inStream->metadata, "TG_META", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string result;
|
||||
size_t data_used = 0;
|
||||
std::string sourceBase64 = (const char *)entry->value;
|
||||
rtc::Base64::Decode(sourceBase64, rtc::Base64::DO_LAX, &result, &data_used);
|
||||
|
||||
if (result.size() != 0) {
|
||||
int offset = 0;
|
||||
_channelUpdates = parseChannelUpdates(result, offset);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t videoChannelMask = 0;
|
||||
entry = av_dict_get(inStream->metadata, "ACTIVE_MASK", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
videoChannelMask = stringToUInt32(sourceString);
|
||||
}
|
||||
|
||||
std::vector<std::string> endpointList;
|
||||
entry = av_dict_get(inStream->metadata, "ENDPOINTS", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
endpointList = splitString(sourceString, ' ');
|
||||
}
|
||||
|
||||
std::bitset<32> videoChannels(videoChannelMask);
|
||||
size_t endpointIndex = 0;
|
||||
if (videoChannels.count() == endpointList.size()) {
|
||||
for (size_t i = 0; i < videoChannels.size(); i++) {
|
||||
if (videoChannels[i]) {
|
||||
_endpointMapping.insert(std::make_pair(endpointList[endpointIndex], i));
|
||||
endpointIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (_streamId == -1) {
|
||||
_didReadToEnd = true;
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::~AudioStreamingPartInternal() {
|
||||
if (_frame) {
|
||||
av_frame_free(&_frame);
|
||||
}
|
||||
if (_inputFormatContext) {
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
}
|
||||
if (_audioCodecParameters) {
|
||||
avcodec_parameters_free(&_audioCodecParameters);
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::ReadPcmResult AudioStreamingPartInternal::readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector<int16_t> &outPcm) {
|
||||
if (_didReadToEnd) {
|
||||
return AudioStreamingPartInternal::ReadPcmResult();
|
||||
}
|
||||
|
||||
int outPcmSampleOffset = 0;
|
||||
ReadPcmResult result;
|
||||
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
fillPcmBuffer(persistentDecoder);
|
||||
}
|
||||
|
||||
if (outPcm.size() != 480 * _channelCount) {
|
||||
outPcm.resize(480 * _channelCount);
|
||||
}
|
||||
int readSamples = 0;
|
||||
if (_channelCount != 0) {
|
||||
readSamples = (int)outPcm.size() / _channelCount;
|
||||
}
|
||||
|
||||
while (outPcmSampleOffset < readSamples) {
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
fillPcmBuffer(persistentDecoder);
|
||||
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int readFromPcmBufferSamples = std::min(_pcmBufferSampleSize - _pcmBufferSampleOffset, readSamples - outPcmSampleOffset);
|
||||
if (readFromPcmBufferSamples != 0) {
|
||||
std::copy(_pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount, _pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount + readFromPcmBufferSamples * _channelCount, outPcm.begin() + outPcmSampleOffset * _channelCount);
|
||||
_pcmBufferSampleOffset += readFromPcmBufferSamples;
|
||||
outPcmSampleOffset += readFromPcmBufferSamples;
|
||||
result.numSamples += readFromPcmBufferSamples;
|
||||
_readSampleCount += readFromPcmBufferSamples;
|
||||
}
|
||||
}
|
||||
|
||||
result.numChannels = _channelCount;
|
||||
|
||||
// Uncomment for debugging incomplete frames
|
||||
/*if (result.numSamples != 480 && result.numSamples != 0) {
|
||||
RTC_LOG(LS_INFO) << "result.numSamples = " << result.numSamples << ", _readSampleCount = " << _readSampleCount << ", duration = " << _inputFormatContext->streams[_streamId]->duration;
|
||||
}*/
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int AudioStreamingPartInternal::getDurationInMilliseconds() const {
|
||||
return _durationInMilliseconds;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> const &AudioStreamingPartInternal::getChannelUpdates() const {
|
||||
return _channelUpdates;
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> AudioStreamingPartInternal::getEndpointMapping() const {
|
||||
return _endpointMapping;
|
||||
}
|
||||
|
||||
void AudioStreamingPartInternal::fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
_pcmBufferSampleSize = 0;
|
||||
_pcmBufferSampleOffset = 0;
|
||||
|
||||
if (_didReadToEnd) {
|
||||
return;
|
||||
}
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
int ret = 0;
|
||||
while (true) {
|
||||
ret = av_read_frame(_inputFormatContext, &_packet);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (_packet.stream_index != _streamId) {
|
||||
av_packet_unref(&_packet);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = persistentDecoder.decode(_audioCodecParameters, _inputFormatContext->streams[_streamId]->time_base, _packet, _frame);
|
||||
av_packet_unref(&_packet);
|
||||
|
||||
if (ret == AVERROR(EAGAIN)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (_channelCount == 0) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
_channelCount = _frame->ch_layout.nb_channels;
|
||||
#else
|
||||
_channelCount = _frame->channels;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (_channelCount == 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_frame->ch_layout.nb_channels != _channelCount || _frame->ch_layout.nb_channels > 8) {
|
||||
#else
|
||||
if (_frame->channels != _channelCount || _frame->channels > 8) {
|
||||
#endif
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_pcmBuffer.size() < _frame->nb_samples * _frame->ch_layout.nb_channels) {
|
||||
_pcmBuffer.resize(_frame->nb_samples * _frame->ch_layout.nb_channels);
|
||||
}
|
||||
#else
|
||||
if (_pcmBuffer.size() < _frame->nb_samples * _frame->channels) {
|
||||
_pcmBuffer.resize(_frame->nb_samples * _frame->channels);
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (_frame->format) {
|
||||
case AV_SAMPLE_FMT_S16: {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->ch_layout.nb_channels);
|
||||
#else
|
||||
memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->channels);
|
||||
#endif
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_S16P: {
|
||||
int16_t *to = _pcmBuffer.data();
|
||||
for (int sample = 0; sample < _frame->nb_samples; ++sample) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int channel = 0; channel < _frame->ch_layout.nb_channels; ++channel) {
|
||||
#else
|
||||
for (int channel = 0; channel < _frame->channels; ++channel) {
|
||||
#endif
|
||||
int16_t *shortChannel = (int16_t*)_frame->data[channel];
|
||||
*to++ = shortChannel[sample];
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_FLT: {
|
||||
float *floatData = (float *)&_frame->data[0];
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int i = 0; i < _frame->nb_samples * _frame->ch_layout.nb_channels; i++) {
|
||||
_pcmBuffer[i] = sampleFloatToInt16(floatData[i]);
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < _frame->nb_samples * _frame->channels; i++) {
|
||||
_pcmBuffer[i] = sampleFloatToInt16(floatData[i]);
|
||||
}
|
||||
#endif
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_FLTP: {
|
||||
int16_t *to = _pcmBuffer.data();
|
||||
for (int sample = 0; sample < _frame->nb_samples; ++sample) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int channel = 0; channel < _frame->ch_layout.nb_channels; ++channel) {
|
||||
#else
|
||||
for (int channel = 0; channel < _frame->channels; ++channel) {
|
||||
#endif
|
||||
float *floatChannel = (float*)_frame->data[channel];
|
||||
*to++ = sampleFloatToInt16(floatChannel[sample]);
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
default: {
|
||||
RTC_FATAL() << "Unexpected sample_fmt";
|
||||
} break;
|
||||
}
|
||||
|
||||
_pcmBufferSampleSize = _frame->nb_samples;
|
||||
_pcmBufferSampleOffset = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "AVIOContextImpl.h"
|
||||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartInternal {
|
||||
public:
|
||||
struct ReadPcmResult {
|
||||
int numSamples = 0;
|
||||
int numChannels = 0;
|
||||
};
|
||||
|
||||
struct ChannelUpdate {
|
||||
int frameIndex = 0;
|
||||
int id = 0;
|
||||
uint32_t ssrc = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
AudioStreamingPartInternal(std::vector<uint8_t> &&fileData, std::string const &container);
|
||||
~AudioStreamingPartInternal();
|
||||
|
||||
ReadPcmResult readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector<int16_t> &outPcm);
|
||||
int getDurationInMilliseconds() const;
|
||||
std::vector<ChannelUpdate> const &getChannelUpdates() const;
|
||||
std::map<std::string, int32_t> getEndpointMapping() const;
|
||||
|
||||
private:
|
||||
void fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
AVIOContextImpl _avIoContext;
|
||||
|
||||
AVFormatContext *_inputFormatContext = nullptr;
|
||||
AVPacket _packet;
|
||||
AVFrame *_frame = nullptr;
|
||||
AVCodecParameters *_audioCodecParameters = nullptr;
|
||||
|
||||
bool _didReadToEnd = false;
|
||||
|
||||
int _durationInMilliseconds = 0;
|
||||
int _streamId = -1;
|
||||
int _channelCount = 0;
|
||||
|
||||
std::vector<ChannelUpdate> _channelUpdates;
|
||||
std::map<std::string, int32_t> _endpointMapping;
|
||||
|
||||
std::vector<int16_t> _pcmBuffer;
|
||||
int _pcmBufferSampleOffset = 0;
|
||||
int _pcmBufferSampleSize = 0;
|
||||
int _readSampleCount = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
WrappedCodecParameters::WrappedCodecParameters(AVCodecParameters const *codecParameters) {
|
||||
_value = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_value, codecParameters);
|
||||
}
|
||||
|
||||
WrappedCodecParameters::~WrappedCodecParameters() {
|
||||
avcodec_parameters_free(&_value);
|
||||
}
|
||||
|
||||
bool WrappedCodecParameters::isEqual(AVCodecParameters const *other) {
|
||||
if (_value->codec_id != other->codec_id) {
|
||||
return false;
|
||||
}
|
||||
if (_value->format != other->format) {
|
||||
return false;
|
||||
}
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_value->ch_layout.nb_channels != other->ch_layout.nb_channels) {
|
||||
#else
|
||||
if (_value->channels != other->channels) {
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class AudioStreamingPartPersistentDecoderState {
|
||||
public:
|
||||
AudioStreamingPartPersistentDecoderState(AVCodecParameters const *codecParameters, AVRational timeBase) :
|
||||
_codecParameters(codecParameters),
|
||||
_timeBase(timeBase) {
|
||||
const AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
|
||||
if (codec) {
|
||||
_codecContext = avcodec_alloc_context3(codec);
|
||||
int ret = avcodec_parameters_to_context(_codecContext, codecParameters);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_codecContext->pkt_timebase = timeBase;
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
_channelCount = _codecContext->ch_layout.nb_channels;
|
||||
#else
|
||||
_channelCount = _codecContext->channels;
|
||||
#endif
|
||||
|
||||
ret = avcodec_open2(_codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~AudioStreamingPartPersistentDecoderState() {
|
||||
if (_codecContext) {
|
||||
avcodec_close(_codecContext);
|
||||
avcodec_free_context(&_codecContext);
|
||||
}
|
||||
}
|
||||
|
||||
int decode(AVPacket &packet, AVFrame *frame) {
|
||||
int ret = avcodec_send_packet(_codecContext, &packet);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bytesPerSample = av_get_bytes_per_sample(_codecContext->sample_fmt);
|
||||
if (bytesPerSample != 2 && bytesPerSample != 4) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = avcodec_receive_frame(_codecContext, frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
public:
|
||||
WrappedCodecParameters _codecParameters;
|
||||
AVRational _timeBase;
|
||||
AVCodecContext *_codecContext = nullptr;
|
||||
int _channelCount = 0;
|
||||
};
|
||||
|
||||
AudioStreamingPartPersistentDecoder::AudioStreamingPartPersistentDecoder() {
|
||||
}
|
||||
|
||||
AudioStreamingPartPersistentDecoder::~AudioStreamingPartPersistentDecoder() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
_state = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStreamingPartPersistentDecoder::maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase) {
|
||||
if (_state) {
|
||||
bool isUpdated = false;
|
||||
if (!_state->_codecParameters.isEqual(codecParameters)) {
|
||||
isUpdated = true;
|
||||
}
|
||||
if (_state->_timeBase.num != timeBase.num || _state->_timeBase.den != timeBase.den) {
|
||||
isUpdated = true;
|
||||
}
|
||||
if (!isUpdated) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (_state) {
|
||||
delete _state;
|
||||
_state = nullptr;
|
||||
}
|
||||
|
||||
_state = new AudioStreamingPartPersistentDecoderState(codecParameters, timeBase);
|
||||
}
|
||||
|
||||
int AudioStreamingPartPersistentDecoder::decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame) {
|
||||
maybeReset(codecParameters, timeBase);
|
||||
|
||||
if (!_state) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return _state->decode(packet, frame);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
// Fix build on Windows - this should appear before FFmpeg timestamp include.
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartPersistentDecoderState;
|
||||
|
||||
class WrappedCodecParameters {
|
||||
public:
|
||||
WrappedCodecParameters(AVCodecParameters const *codecParameters);
|
||||
~WrappedCodecParameters();
|
||||
|
||||
bool isEqual(AVCodecParameters const *other);
|
||||
|
||||
private:
|
||||
AVCodecParameters *_value = nullptr;
|
||||
};
|
||||
|
||||
class AudioStreamingPartPersistentDecoder {
|
||||
public:
|
||||
AudioStreamingPartPersistentDecoder();
|
||||
~AudioStreamingPartPersistentDecoder();
|
||||
|
||||
int decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame);
|
||||
|
||||
private:
|
||||
void maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase);
|
||||
|
||||
private:
|
||||
AudioStreamingPartPersistentDecoderState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
4523
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp
Normal file
4523
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,59 @@
|
|||
#ifndef TGCALLS_GROUP_INSTANCE_CUSTOM_IMPL_H
|
||||
#define TGCALLS_GROUP_INSTANCE_CUSTOM_IMPL_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
|
||||
#include "../Instance.h"
|
||||
#include "GroupInstanceImpl.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class LogSinkImpl;
|
||||
class GroupInstanceCustomInternal;
|
||||
class Threads;
|
||||
|
||||
class GroupInstanceCustomImpl final : public GroupInstanceInterface {
|
||||
public:
|
||||
explicit GroupInstanceCustomImpl(GroupInstanceDescriptor &&descriptor);
|
||||
~GroupInstanceCustomImpl();
|
||||
|
||||
void stop(std::function<void()> completion);
|
||||
|
||||
void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast);
|
||||
|
||||
void emitJoinPayload(std::function<void(GroupJoinPayload const &)> completion);
|
||||
void setJoinResponsePayload(std::string const &payload);
|
||||
void removeSsrcs(std::vector<uint32_t> ssrcs);
|
||||
void removeIncomingVideoSource(uint32_t ssrc);
|
||||
|
||||
void setIsMuted(bool isMuted);
|
||||
void setIsNoiseSuppressionEnabled(bool isNoiseSuppressionEnabled);
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void setVideoSource(std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setAudioInputDevice(std::string id);
|
||||
void addExternalAudioSamples(std::vector<uint8_t> &&samples);
|
||||
|
||||
void addOutgoingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
void setVolume(uint32_t ssrc, double volume);
|
||||
void setRequestedVideoChannels(std::vector<VideoChannelDescription> &&requestedVideoChannels);
|
||||
|
||||
void getStats(std::function<void(GroupInstanceStats)> completion);
|
||||
void internal_addCustomNetworkEvent(bool isRemoteConnected);
|
||||
|
||||
private:
|
||||
std::shared_ptr<Threads> _threads;
|
||||
std::unique_ptr<ThreadLocalObject<GroupInstanceCustomInternal>> _internal;
|
||||
std::unique_ptr<LogSinkImpl> _logSink;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
238
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h
Normal file
238
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
#ifndef TGCALLS_GROUP_INSTANCE_IMPL_H
|
||||
#define TGCALLS_GROUP_INSTANCE_IMPL_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
|
||||
#include "../Instance.h"
|
||||
|
||||
#include "../StaticThreads.h"
|
||||
#include "GroupJoinPayload.h"
|
||||
|
||||
namespace webrtc {
|
||||
class AudioDeviceModule;
|
||||
class TaskQueueFactory;
|
||||
class VideoTrackSourceInterface;
|
||||
}
|
||||
|
||||
namespace webrtc {
|
||||
template <class T>
|
||||
class scoped_refptr;
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class LogSinkImpl;
|
||||
class GroupInstanceManager;
|
||||
class WrappedAudioDeviceModule;
|
||||
struct AudioFrame;
|
||||
|
||||
struct GroupConfig {
|
||||
bool need_log{true};
|
||||
FilePath logPath;
|
||||
};
|
||||
|
||||
struct GroupLevelValue {
|
||||
float level = 0.;
|
||||
bool voice = false;
|
||||
bool isMuted = false;
|
||||
};
|
||||
|
||||
struct GroupLevelUpdate {
|
||||
uint32_t ssrc = 0;
|
||||
GroupLevelValue value;
|
||||
};
|
||||
|
||||
struct GroupLevelsUpdate {
|
||||
std::vector<GroupLevelUpdate> updates;
|
||||
};
|
||||
|
||||
struct GroupActivityUpdate {
|
||||
uint32_t ssrc = 0;
|
||||
};
|
||||
|
||||
struct GroupActivitiesUpdate {
|
||||
std::vector<GroupActivityUpdate> updates;
|
||||
};
|
||||
|
||||
|
||||
class BroadcastPartTask {
|
||||
public:
|
||||
virtual ~BroadcastPartTask() = default;
|
||||
|
||||
virtual void cancel() = 0;
|
||||
};
|
||||
|
||||
struct BroadcastPart {
|
||||
struct VideoParams {
|
||||
};
|
||||
|
||||
enum class Status {
|
||||
Success,
|
||||
NotReady,
|
||||
ResyncNeeded
|
||||
};
|
||||
|
||||
int64_t timestampMilliseconds = 0;
|
||||
double responseTimestamp = 0;
|
||||
Status status = Status::NotReady;
|
||||
std::vector<uint8_t> data;
|
||||
};
|
||||
|
||||
enum class GroupConnectionMode {
|
||||
GroupConnectionModeNone,
|
||||
GroupConnectionModeRtc,
|
||||
GroupConnectionModeBroadcast
|
||||
};
|
||||
|
||||
struct GroupNetworkState {
|
||||
bool isConnected = false;
|
||||
bool isTransitioningFromBroadcastToRtc = false;
|
||||
};
|
||||
|
||||
enum class VideoContentType {
|
||||
None,
|
||||
Screencast,
|
||||
Generic
|
||||
};
|
||||
|
||||
enum class VideoCodecName {
|
||||
VP8,
|
||||
VP9,
|
||||
H264
|
||||
};
|
||||
|
||||
class RequestMediaChannelDescriptionTask {
|
||||
public:
|
||||
virtual ~RequestMediaChannelDescriptionTask() = default;
|
||||
|
||||
virtual void cancel() = 0;
|
||||
};
|
||||
|
||||
struct MediaChannelDescription {
|
||||
enum class Type {
|
||||
Audio,
|
||||
Video
|
||||
};
|
||||
|
||||
Type type = Type::Audio;
|
||||
uint32_t audioSsrc = 0;
|
||||
int64_t userId = 0;
|
||||
std::string videoInformation;
|
||||
};
|
||||
|
||||
struct MediaSsrcGroup {
|
||||
std::string semantics;
|
||||
std::vector<uint32_t> ssrcs;
|
||||
};
|
||||
|
||||
struct VideoChannelDescription {
|
||||
enum class Quality {
|
||||
Thumbnail,
|
||||
Medium,
|
||||
Full
|
||||
};
|
||||
uint32_t audioSsrc = 0;
|
||||
int64_t userId = 0;
|
||||
std::string endpointId;
|
||||
std::vector<MediaSsrcGroup> ssrcGroups;
|
||||
Quality minQuality = Quality::Thumbnail;
|
||||
Quality maxQuality = Quality::Thumbnail;
|
||||
};
|
||||
|
||||
struct GroupInstanceStats {
|
||||
struct IncomingVideoStats {
|
||||
int receivingQuality = 0;
|
||||
int availableQuality = 0;
|
||||
};
|
||||
|
||||
std::vector<std::pair<std::string, IncomingVideoStats>> incomingVideoStats;
|
||||
};
|
||||
|
||||
struct GroupInstanceDescriptor {
|
||||
std::shared_ptr<Threads> threads;
|
||||
GroupConfig config;
|
||||
std::string statsLogPath;
|
||||
std::function<void(GroupNetworkState)> networkStateUpdated;
|
||||
std::function<void(int)> signalBarsUpdated;
|
||||
std::function<void(GroupLevelsUpdate const &)> audioLevelsUpdated;
|
||||
std::function<void(uint32_t, const AudioFrame &)> onAudioFrame;
|
||||
std::function<void(GroupActivitiesUpdate const &)> ssrcActivityUpdated;
|
||||
std::string initialInputDeviceId;
|
||||
std::string initialOutputDeviceId;
|
||||
bool useDummyChannel{true};
|
||||
bool disableIncomingChannels{false};
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> createAudioDeviceModule;
|
||||
std::function<webrtc::scoped_refptr<WrappedAudioDeviceModule>(webrtc::TaskQueueFactory*)> createWrappedAudioDeviceModule;
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture; // deprecated
|
||||
std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::function<void(int64_t)>)> requestCurrentTime;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, std::function<void(BroadcastPart &&)>)> requestAudioBroadcastPart;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function<void(BroadcastPart &&)>)> requestVideoBroadcastPart;
|
||||
int outgoingAudioBitrateKbit{32};
|
||||
bool disableOutgoingAudioProcessing{false};
|
||||
bool disableAudioInput{false};
|
||||
bool ios_enableSystemMute{false};
|
||||
VideoContentType videoContentType{VideoContentType::None};
|
||||
bool initialEnableNoiseSuppression{false};
|
||||
std::vector<VideoCodecName> videoCodecPreferences;
|
||||
std::function<std::shared_ptr<RequestMediaChannelDescriptionTask>(std::vector<uint32_t> const &, std::function<void(std::vector<MediaChannelDescription> &&)>)> requestMediaChannelDescriptions;
|
||||
int minOutgoingVideoBitrateKbit{100};
|
||||
std::function<void(bool)> onMutedSpeechActivityDetected;
|
||||
std::function<std::vector<uint8_t>(std::vector<uint8_t> const &, int64_t, bool, int32_t)> e2eEncryptDecrypt;
|
||||
bool isConference{false};
|
||||
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ThreadLocalObject;
|
||||
|
||||
class GroupInstanceInterface {
|
||||
protected:
|
||||
GroupInstanceInterface() = default;
|
||||
|
||||
public:
|
||||
virtual ~GroupInstanceInterface() = default;
|
||||
|
||||
virtual void stop(std::function<void()> completion) = 0;
|
||||
|
||||
virtual void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast) = 0;
|
||||
|
||||
virtual void emitJoinPayload(std::function<void(GroupJoinPayload const &)> completion) = 0;
|
||||
virtual void setJoinResponsePayload(std::string const &payload) = 0;
|
||||
virtual void removeSsrcs(std::vector<uint32_t> ssrcs) = 0;
|
||||
virtual void removeIncomingVideoSource(uint32_t ssrc) = 0;
|
||||
|
||||
virtual void setIsMuted(bool isMuted) = 0;
|
||||
virtual void setIsNoiseSuppressionEnabled(bool isNoiseSuppressionEnabled) = 0;
|
||||
virtual void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) = 0;
|
||||
virtual void setVideoSource(std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource) = 0;
|
||||
virtual void setAudioOutputDevice(std::string id) = 0;
|
||||
virtual void setAudioInputDevice(std::string id) = 0;
|
||||
virtual void addExternalAudioSamples(std::vector<uint8_t> &&samples) = 0;
|
||||
|
||||
virtual void addOutgoingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
virtual void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
||||
virtual void setVolume(uint32_t ssrc, double volume) = 0;
|
||||
virtual void setRequestedVideoChannels(std::vector<VideoChannelDescription> &&requestedVideoChannels) = 0;
|
||||
|
||||
virtual void getStats(std::function<void(GroupInstanceStats)> completion) = 0;
|
||||
virtual void internal_addCustomNetworkEvent(bool isRemoteConnected) = 0;
|
||||
|
||||
struct AudioDevice {
|
||||
enum class Type {Input, Output};
|
||||
std::string name;
|
||||
std::string guid;
|
||||
};
|
||||
static std::vector<GroupInstanceInterface::AudioDevice> getAudioDevices(AudioDevice::Type type);
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
78
TMessagesProj/jni/voip/tgcalls/group/GroupJoinPayload.h
Normal file
78
TMessagesProj/jni/voip/tgcalls/group/GroupJoinPayload.h
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#ifndef TGCALLS_GROUP_JOIN_PAYLOAD_H
|
||||
#define TGCALLS_GROUP_JOIN_PAYLOAD_H
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct GroupJoinPayloadVideoSourceGroup {
|
||||
std::vector<uint32_t> ssrcs;
|
||||
std::string semantics;
|
||||
};
|
||||
|
||||
struct GroupJoinPayloadVideoPayloadType {
|
||||
struct FeedbackType {
|
||||
std::string type;
|
||||
std::string subtype;
|
||||
};
|
||||
|
||||
uint32_t id = 0;
|
||||
std::string name;
|
||||
uint32_t clockrate = 0;
|
||||
uint32_t channels = 0;
|
||||
std::vector<FeedbackType> feedbackTypes;
|
||||
std::vector<std::pair<std::string, std::string>> parameters;
|
||||
};
|
||||
|
||||
struct GroupJoinTransportDescription {
|
||||
struct Fingerprint {
|
||||
std::string hash;
|
||||
std::string setup;
|
||||
std::string fingerprint;
|
||||
};
|
||||
|
||||
struct Candidate {
|
||||
std::string port;
|
||||
std::string protocol;
|
||||
std::string network;
|
||||
std::string generation;
|
||||
std::string id;
|
||||
std::string component;
|
||||
std::string foundation;
|
||||
std::string priority;
|
||||
std::string ip;
|
||||
std::string type;
|
||||
|
||||
std::string tcpType;
|
||||
std::string relAddr;
|
||||
std::string relPort;
|
||||
};
|
||||
|
||||
std::string ufrag;
|
||||
std::string pwd;
|
||||
std::vector<Fingerprint> fingerprints;
|
||||
std::vector<Candidate> candidates;
|
||||
};
|
||||
|
||||
struct GroupJoinVideoInformation {
|
||||
uint32_t serverVideoBandwidthProbingSsrc = 0;
|
||||
std::string endpointId;
|
||||
std::vector<GroupJoinPayloadVideoPayloadType> payloadTypes;
|
||||
std::vector<std::pair<uint32_t, std::string>> extensionMap;
|
||||
};
|
||||
|
||||
struct GroupParticipantVideoInformation {
|
||||
std::string endpointId;
|
||||
std::vector<GroupJoinPayloadVideoSourceGroup> ssrcGroups;
|
||||
};
|
||||
|
||||
struct GroupJoinPayload {
|
||||
uint32_t audioSsrc = 0;
|
||||
std::string json;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,373 @@
|
|||
#include "GroupJoinPayloadInternal.h"
|
||||
|
||||
#include "third-party/json11.hpp"
|
||||
#include <sstream>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
absl::optional<int32_t> parseInt(json11::Json::object const &object, std::string const &key) {
|
||||
const auto value = object.find(key);
|
||||
if (value == object.end() || !value->second.is_number()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
return value->second.int_value();
|
||||
}
|
||||
|
||||
absl::optional<std::string> parseString(json11::Json::object const &object, std::string const &key) {
|
||||
const auto value = object.find(key);
|
||||
if (value == object.end() || !value->second.is_string()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
return value->second.string_value();
|
||||
}
|
||||
|
||||
template <typename Out>
|
||||
void splitString(const std::string &s, char delim, Out result) {
|
||||
std::istringstream iss(s);
|
||||
std::string item;
|
||||
while (std::getline(iss, item, delim)) {
|
||||
*result++ = item;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> splitString(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
splitString(s, delim, std::back_inserter(elems));
|
||||
return elems;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinTransportDescription> parseTransportDescription(json11::Json::object const &object) {
|
||||
GroupJoinTransportDescription result;
|
||||
|
||||
if (const auto pwd = parseString(object, "pwd")) {
|
||||
result.pwd = pwd.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto ufrag = parseString(object, "ufrag")) {
|
||||
result.ufrag = ufrag.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto fingerprints = object.find("fingerprints");
|
||||
if (fingerprints == object.end() || !fingerprints->second.is_array()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
for (const auto &fingerprint : fingerprints->second.array_items()) {
|
||||
if (!fingerprint.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
GroupJoinTransportDescription::Fingerprint parsedFingerprint;
|
||||
|
||||
if (const auto hash = parseString(fingerprint.object_items(), "hash")) {
|
||||
parsedFingerprint.hash = hash.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto fingerprintValue = parseString(fingerprint.object_items(), "fingerprint")) {
|
||||
parsedFingerprint.fingerprint = fingerprintValue.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto setup = parseString(fingerprint.object_items(), "setup")) {
|
||||
parsedFingerprint.setup = setup.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
result.fingerprints.push_back(std::move(parsedFingerprint));
|
||||
}
|
||||
|
||||
const auto candidates = object.find("candidates");
|
||||
if (candidates == object.end() || !candidates->second.is_array()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
for (const auto &candidate : candidates->second.array_items()) {
|
||||
if (!candidate.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
GroupJoinTransportDescription::Candidate parsedCandidate;
|
||||
|
||||
if (const auto port = parseString(candidate.object_items(), "port")) {
|
||||
parsedCandidate.port = port.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto protocol = parseString(candidate.object_items(), "protocol")) {
|
||||
parsedCandidate.protocol = protocol.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto network = parseString(candidate.object_items(), "network")) {
|
||||
parsedCandidate.network = network.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto generation = parseString(candidate.object_items(), "generation")) {
|
||||
parsedCandidate.generation = generation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto id = parseString(candidate.object_items(), "id")) {
|
||||
parsedCandidate.id = id.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto component = parseString(candidate.object_items(), "component")) {
|
||||
parsedCandidate.component = component.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto foundation = parseString(candidate.object_items(), "foundation")) {
|
||||
parsedCandidate.foundation = foundation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto priority = parseString(candidate.object_items(), "priority")) {
|
||||
parsedCandidate.priority = priority.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto ip = parseString(candidate.object_items(), "ip")) {
|
||||
parsedCandidate.ip = ip.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto type = parseString(candidate.object_items(), "type")) {
|
||||
parsedCandidate.type = type.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto tcpType = parseString(candidate.object_items(), "tcptype")) {
|
||||
parsedCandidate.tcpType = tcpType.value();
|
||||
}
|
||||
|
||||
if (const auto relAddr = parseString(candidate.object_items(), "rel-addr")) {
|
||||
parsedCandidate.relAddr = relAddr.value();
|
||||
}
|
||||
|
||||
if (const auto relPort = parseString(candidate.object_items(), "rel-port")) {
|
||||
parsedCandidate.relPort = relPort.value();
|
||||
}
|
||||
|
||||
result.candidates.push_back(std::move(parsedCandidate));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinPayloadVideoPayloadType> parsePayloadType(json11::Json::object const &object) {
|
||||
GroupJoinPayloadVideoPayloadType result;
|
||||
|
||||
if (const auto id = parseInt(object, "id")) {
|
||||
result.id = (uint32_t)id.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto name = parseString(object, "name")) {
|
||||
result.name = name.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto clockrate = parseInt(object, "clockrate")) {
|
||||
result.clockrate = (uint32_t)clockrate.value();
|
||||
} else {
|
||||
result.clockrate = 0;
|
||||
}
|
||||
|
||||
if (const auto channels = parseInt(object, "channels")) {
|
||||
result.channels = (uint32_t)channels.value();
|
||||
} else {
|
||||
result.channels = 1;
|
||||
}
|
||||
|
||||
const auto parameters = object.find("parameters");
|
||||
if (parameters != object.end() && parameters->second.is_object()) {
|
||||
for (const auto ¶meter : parameters->second.object_items()) {
|
||||
if (parameter.second.is_string()) {
|
||||
result.parameters.push_back(std::make_pair(parameter.first, parameter.second.string_value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto rtcpFbs = object.find("rtcp-fbs");
|
||||
if (rtcpFbs != object.end() && rtcpFbs->second.is_array()) {
|
||||
for (const auto &item : rtcpFbs->second.array_items()) {
|
||||
if (item.is_object()) {
|
||||
const auto type = item.object_items().find("type");
|
||||
if (type != item.object_items().end() && type->second.is_string()) {
|
||||
GroupJoinPayloadVideoPayloadType::FeedbackType parsedFeedbackType;
|
||||
|
||||
const auto typeString = type->second.string_value();
|
||||
|
||||
const auto subtype = item.object_items().find("subtype");
|
||||
if (subtype != item.object_items().end() && subtype->second.is_string()) {
|
||||
parsedFeedbackType.type = typeString;
|
||||
parsedFeedbackType.subtype = subtype->second.string_value();
|
||||
} else {
|
||||
auto components = splitString(typeString, ' ');
|
||||
if (components.size() == 1) {
|
||||
parsedFeedbackType.type = components[0];
|
||||
} else if (components.size() == 2) {
|
||||
parsedFeedbackType.type = components[0];
|
||||
parsedFeedbackType.subtype = components[1];
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
result.feedbackTypes.push_back(std::move(parsedFeedbackType));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinVideoInformation> parseVideoInformation(json11::Json::object const &object) {
|
||||
GroupJoinVideoInformation result;
|
||||
|
||||
const auto serverSources = object.find("server_sources");
|
||||
if (serverSources != object.end() && serverSources->second.is_array()) {
|
||||
for (const auto &item : serverSources->second.array_items()) {
|
||||
if (item.is_number()) {
|
||||
int32_t value = item.int_value();
|
||||
uint32_t unsignedValue = *(uint32_t *)&value;
|
||||
result.serverVideoBandwidthProbingSsrc = unsignedValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto payloadTypes = object.find("payload-types");
|
||||
if (payloadTypes != object.end() && payloadTypes->second.is_array()) {
|
||||
for (const auto &payloadType : payloadTypes->second.array_items()) {
|
||||
if (payloadType.is_object()) {
|
||||
if (const auto parsedPayloadType = parsePayloadType(payloadType.object_items())) {
|
||||
result.payloadTypes.push_back(parsedPayloadType.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto rtpHdrexts = object.find("rtp-hdrexts");
|
||||
if (rtpHdrexts != object.end() && rtpHdrexts->second.is_array()) {
|
||||
for (const auto &rtpHdrext : rtpHdrexts->second.array_items()) {
|
||||
if (rtpHdrext.is_object()) {
|
||||
const auto id = rtpHdrext.object_items().find("id");
|
||||
if (id == rtpHdrext.object_items().end() || !id->second.is_number()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto uri = rtpHdrext.object_items().find("uri");
|
||||
if (uri == rtpHdrext.object_items().end() || !uri->second.is_string()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.extensionMap.push_back(std::make_pair(id->second.int_value(), uri->second.string_value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto endpointId = object.find("endpoint");
|
||||
if (endpointId != object.end() && endpointId->second.is_string()) {
|
||||
result.endpointId = endpointId->second.string_value();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::string GroupJoinInternalPayload::serialize() {
|
||||
json11::Json::object object;
|
||||
|
||||
int32_t signedSsrc = *(int32_t *)&audioSsrc;
|
||||
|
||||
object.insert(std::make_pair("ssrc", json11::Json(signedSsrc)));
|
||||
object.insert(std::make_pair("ufrag", json11::Json(transport.ufrag)));
|
||||
object.insert(std::make_pair("pwd", json11::Json(transport.pwd)));
|
||||
|
||||
json11::Json::array fingerprints;
|
||||
for (const auto &fingerprint : transport.fingerprints) {
|
||||
json11::Json::object fingerprintJson;
|
||||
|
||||
fingerprintJson.insert(std::make_pair("hash", json11::Json(fingerprint.hash)));
|
||||
fingerprintJson.insert(std::make_pair("fingerprint", json11::Json(fingerprint.fingerprint)));
|
||||
fingerprintJson.insert(std::make_pair("setup", json11::Json(fingerprint.setup)));
|
||||
|
||||
fingerprints.push_back(json11::Json(std::move(fingerprintJson)));
|
||||
}
|
||||
object.insert(std::make_pair("fingerprints", json11::Json(std::move(fingerprints))));
|
||||
|
||||
if (videoInformation) {
|
||||
json11::Json::array ssrcGroups;
|
||||
for (const auto &ssrcGroup : videoInformation->ssrcGroups) {
|
||||
json11::Json::object ssrcGroupJson;
|
||||
|
||||
json11::Json::array ssrcGroupSources;
|
||||
for (auto ssrc : ssrcGroup.ssrcs) {
|
||||
int32_t signedValue = *(int32_t *)&ssrc;
|
||||
ssrcGroupSources.push_back(json11::Json(signedValue));
|
||||
}
|
||||
|
||||
ssrcGroupJson.insert(std::make_pair("sources", json11::Json(std::move(ssrcGroupSources))));
|
||||
ssrcGroupJson.insert(std::make_pair("semantics", json11::Json(ssrcGroup.semantics)));
|
||||
|
||||
ssrcGroups.push_back(json11::Json(std::move(ssrcGroupJson)));
|
||||
}
|
||||
object.insert(std::make_pair("ssrc-groups", json11::Json(std::move(ssrcGroups))));
|
||||
}
|
||||
|
||||
auto json = json11::Json(std::move(object));
|
||||
return json.dump();
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinResponsePayload> GroupJoinResponsePayload::parse(std::string const &data) {
|
||||
std::string parsingError;
|
||||
auto json = json11::Json::parse(std::string(data.begin(), data.end()), parsingError);
|
||||
if (json.type() != json11::Json::OBJECT) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
tgcalls::GroupJoinResponsePayload result;
|
||||
|
||||
const auto transport = json.object_items().find("transport");
|
||||
if (transport == json.object_items().end() || !transport->second.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (const auto parsedTransport = parseTransportDescription(transport->second.object_items())) {
|
||||
result.transport = parsedTransport.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto video = json.object_items().find("video");
|
||||
if (video != json.object_items().end() && video->second.is_object()) {
|
||||
result.videoInformation = parseVideoInformation(video->second.object_items());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef TGCALLS_GROUP_JOIN_PAYLOAD_INTERNAL_H
|
||||
#define TGCALLS_GROUP_JOIN_PAYLOAD_INTERNAL_H
|
||||
|
||||
#include "GroupJoinPayload.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct GroupJoinResponsePayload {
|
||||
GroupJoinTransportDescription transport;
|
||||
absl::optional<GroupJoinVideoInformation> videoInformation;
|
||||
|
||||
static absl::optional<GroupJoinResponsePayload> parse(std::string const &data);
|
||||
};
|
||||
|
||||
struct GroupJoinInternalPayload {
|
||||
GroupJoinTransportDescription transport;
|
||||
|
||||
uint32_t audioSsrc = 0;
|
||||
absl::optional<GroupParticipantVideoInformation> videoInformation;
|
||||
|
||||
std::string serialize();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
680
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp
Normal file
680
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp
Normal file
|
|
@ -0,0 +1,680 @@
|
|||
#include "group/GroupNetworkManager.h"
|
||||
|
||||
#include "p2p/base/basic_packet_socket_factory.h"
|
||||
#include "p2p/client/basic_port_allocator.h"
|
||||
#include "p2p/base/p2p_transport_channel.h"
|
||||
#include "p2p/base/basic_async_resolver_factory.h"
|
||||
#include "api/packet_socket_factory.h"
|
||||
#include "rtc_base/rtc_certificate_generator.h"
|
||||
#include "p2p/base/ice_credentials_iterator.h"
|
||||
#include "api/jsep_ice_candidate.h"
|
||||
#include "p2p/base/dtls_transport.h"
|
||||
#include "p2p/base/dtls_transport_factory.h"
|
||||
#include "pc/dtls_srtp_transport.h"
|
||||
#include "pc/dtls_transport.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_util.h"
|
||||
#include "modules/rtp_rtcp/source/byte_io.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
#include "TurnCustomizerImpl.h"
|
||||
#include "SctpDataChannelProviderInterfaceImpl.h"
|
||||
#include "StaticThreads.h"
|
||||
#include "call/rtp_packet_sink_interface.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum {
|
||||
kRtcpExpectedVersion = 2,
|
||||
kRtcpMinHeaderLength = 4,
|
||||
kRtcpMinParseLength = 8,
|
||||
|
||||
kRtpExpectedVersion = 2,
|
||||
kRtpMinParseLength = 12
|
||||
};
|
||||
|
||||
static void updateHeaderWithVoiceActivity(rtc::CopyOnWriteBuffer *packet, const uint8_t* ptrRTPDataExtensionEnd, const uint8_t* ptr, bool voiceActivity, bool zeroAudioLevel) {
|
||||
while (ptrRTPDataExtensionEnd - ptr > 0) {
|
||||
// 0
|
||||
// 0 1 2 3 4 5 6 7
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
// | ID | len |
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
|
||||
// Note that 'len' is the header extension element length, which is the
|
||||
// number of bytes - 1.
|
||||
const int id = (*ptr & 0xf0) >> 4;
|
||||
const int len = (*ptr & 0x0f);
|
||||
ptr++;
|
||||
|
||||
if (id == 0) {
|
||||
// Padding byte, skip ignoring len.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (id == 15) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "RTP extension header 15 encountered. Terminate parsing.";
|
||||
return;
|
||||
}
|
||||
|
||||
if (ptrRTPDataExtensionEnd - ptr < (len + 1)) {
|
||||
RTC_LOG(LS_WARNING) << "Incorrect one-byte extension len: " << (len + 1)
|
||||
<< ", bytes left in buffer: "
|
||||
<< (ptrRTPDataExtensionEnd - ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (id == 1) { // kAudioLevelUri
|
||||
uint8_t audioLevel = ptr[0] & 0x7f;
|
||||
if (zeroAudioLevel) {
|
||||
if (audioLevel < 47) {
|
||||
audioLevel = 0;
|
||||
} else if (audioLevel < 107) {
|
||||
audioLevel = 106;
|
||||
} else {
|
||||
audioLevel = 127;
|
||||
}
|
||||
}
|
||||
bool parsedVoiceActivity = (ptr[0] & 0x80) != 0;
|
||||
|
||||
if (parsedVoiceActivity != voiceActivity) {
|
||||
ptrdiff_t byteOffset = ptr - packet->data();
|
||||
uint8_t *mutableBytes = packet->MutableData();
|
||||
uint8_t audioActivityBit = voiceActivity ? 0x80 : 0;
|
||||
mutableBytes[byteOffset] = audioLevel | audioActivityBit;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ptr += (len + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 // Currently unused.
|
||||
static void readHeaderVoiceActivity(const uint8_t* ptrRTPDataExtensionEnd, const uint8_t* ptr, bool &didRead, uint8_t &audioLevel, bool &voiceActivity) {
|
||||
while (ptrRTPDataExtensionEnd - ptr > 0) {
|
||||
// 0
|
||||
// 0 1 2 3 4 5 6 7
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
// | ID | len |
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
|
||||
// Note that 'len' is the header extension element length, which is the
|
||||
// number of bytes - 1.
|
||||
const int id = (*ptr & 0xf0) >> 4;
|
||||
const int len = (*ptr & 0x0f);
|
||||
ptr++;
|
||||
|
||||
if (id == 0) {
|
||||
// Padding byte, skip ignoring len.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (id == 15) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "RTP extension header 15 encountered. Terminate parsing.";
|
||||
return;
|
||||
}
|
||||
|
||||
if (ptrRTPDataExtensionEnd - ptr < (len + 1)) {
|
||||
RTC_LOG(LS_WARNING) << "Incorrect one-byte extension len: " << (len + 1)
|
||||
<< ", bytes left in buffer: "
|
||||
<< (ptrRTPDataExtensionEnd - ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (id == 1) { // kAudioLevelUri
|
||||
didRead = true;
|
||||
audioLevel = ptr[0] & 0x7f;
|
||||
voiceActivity = (ptr[0] & 0x80) != 0;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ptr += (len + 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void maybeUpdateRtpVoiceActivity(rtc::CopyOnWriteBuffer *packet, bool voiceActivity, bool zeroAudioLevel) {
|
||||
const uint8_t *_ptrRTPDataBegin = packet->data();
|
||||
const uint8_t *_ptrRTPDataEnd = packet->data() + packet->size();
|
||||
|
||||
const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin;
|
||||
if (length < kRtpMinParseLength) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Version
|
||||
const uint8_t V = _ptrRTPDataBegin[0] >> 6;
|
||||
// eXtension
|
||||
const bool X = ((_ptrRTPDataBegin[0] & 0x10) == 0) ? false : true;
|
||||
const uint8_t CC = _ptrRTPDataBegin[0] & 0x0f;
|
||||
|
||||
const uint8_t PT = _ptrRTPDataBegin[1] & 0x7f;
|
||||
|
||||
const uint8_t* ptr = &_ptrRTPDataBegin[4];
|
||||
|
||||
ptr += 4;
|
||||
|
||||
ptr += 4;
|
||||
|
||||
if (V != kRtpExpectedVersion) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t CSRCocts = CC * 4;
|
||||
|
||||
if ((ptr + CSRCocts) > _ptrRTPDataEnd) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (PT != 111) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint8_t i = 0; i < CC; ++i) {
|
||||
ptr += 4;
|
||||
}
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| defined by profile | length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| header extension |
|
||||
| .... |
|
||||
*/
|
||||
const ptrdiff_t remain = _ptrRTPDataEnd - ptr;
|
||||
if (remain < 4) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint16_t definedByProfile = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
|
||||
// in 32 bit words
|
||||
size_t XLen = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
XLen *= 4; // in bytes
|
||||
|
||||
if (static_cast<size_t>(remain) < (4 + XLen)) {
|
||||
return;
|
||||
}
|
||||
static constexpr uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
|
||||
if (definedByProfile == kRtpOneByteHeaderExtensionId) {
|
||||
const uint8_t* ptrRTPDataExtensionEnd = ptr + XLen;
|
||||
updateHeaderWithVoiceActivity(packet, ptrRTPDataExtensionEnd, ptr, voiceActivity, zeroAudioLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 // Currently unused.
|
||||
static void maybeReadRtpVoiceActivity(rtc::CopyOnWriteBuffer *packet, bool &didRead, uint32_t &ssrc, uint8_t &audioLevel, bool &voiceActivity) {
|
||||
const uint8_t *_ptrRTPDataBegin = packet->data();
|
||||
const uint8_t *_ptrRTPDataEnd = packet->data() + packet->size();
|
||||
|
||||
const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin;
|
||||
if (length < kRtpMinParseLength) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Version
|
||||
const uint8_t V = _ptrRTPDataBegin[0] >> 6;
|
||||
// eXtension
|
||||
const bool X = ((_ptrRTPDataBegin[0] & 0x10) == 0) ? false : true;
|
||||
const uint8_t CC = _ptrRTPDataBegin[0] & 0x0f;
|
||||
|
||||
const uint8_t PT = _ptrRTPDataBegin[1] & 0x7f;
|
||||
|
||||
const uint8_t* ptr = &_ptrRTPDataBegin[4];
|
||||
|
||||
ptr += 4;
|
||||
|
||||
ssrc = webrtc::ByteReader<uint32_t>::ReadBigEndian(ptr);
|
||||
ptr += 4;
|
||||
|
||||
if (V != kRtpExpectedVersion) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t CSRCocts = CC * 4;
|
||||
|
||||
if ((ptr + CSRCocts) > _ptrRTPDataEnd) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (PT != 111) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint8_t i = 0; i < CC; ++i) {
|
||||
ptr += 4;
|
||||
}
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| defined by profile | length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| header extension |
|
||||
| .... |
|
||||
*/
|
||||
const ptrdiff_t remain = _ptrRTPDataEnd - ptr;
|
||||
if (remain < 4) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint16_t definedByProfile = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
|
||||
// in 32 bit words
|
||||
size_t XLen = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
XLen *= 4; // in bytes
|
||||
|
||||
if (static_cast<size_t>(remain) < (4 + XLen)) {
|
||||
return;
|
||||
}
|
||||
static constexpr uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
|
||||
if (definedByProfile == kRtpOneByteHeaderExtensionId) {
|
||||
const uint8_t* ptrRTPDataExtensionEnd = ptr + XLen;
|
||||
readHeaderVoiceActivity(ptrRTPDataExtensionEnd, ptr, didRead, audioLevel, voiceActivity);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
class WrappedDtlsSrtpTransport : public webrtc::DtlsSrtpTransport {
|
||||
public:
|
||||
bool _voiceActivity = false;
|
||||
|
||||
public:
|
||||
WrappedDtlsSrtpTransport(bool rtcp_mux_enabled, const webrtc::FieldTrialsView& fieldTrials, std::function<void(webrtc::RtpPacketReceived const &, bool)> &&processRtpPacket, bool zeroAudioLevel) :
|
||||
webrtc::DtlsSrtpTransport(rtcp_mux_enabled, fieldTrials),
|
||||
_processRtpPacket(std::move(processRtpPacket)),
|
||||
_zeroAudioLevel(zeroAudioLevel) {
|
||||
}
|
||||
|
||||
virtual ~WrappedDtlsSrtpTransport() {
|
||||
}
|
||||
|
||||
bool SendRtpPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options, int flags) override {
|
||||
maybeUpdateRtpVoiceActivity(packet, _voiceActivity, _zeroAudioLevel);
|
||||
return webrtc::DtlsSrtpTransport::SendRtpPacket(packet, options, flags);
|
||||
}
|
||||
|
||||
void ProcessRtpPacket(webrtc::RtpPacketReceived const &packet, bool isUnresolved) override {
|
||||
_processRtpPacket(packet, isUnresolved);
|
||||
}
|
||||
|
||||
private:
|
||||
std::function<void(webrtc::RtpPacketReceived const &, bool)> _processRtpPacket;
|
||||
bool _zeroAudioLevel;
|
||||
};
|
||||
|
||||
webrtc::CryptoOptions GroupNetworkManager::getDefaulCryptoOptions() {
|
||||
auto options = webrtc::CryptoOptions();
|
||||
options.srtp.enable_aes128_sha1_80_crypto_cipher = false;
|
||||
options.srtp.enable_gcm_crypto_suites = true;
|
||||
return options;
|
||||
}
|
||||
|
||||
GroupNetworkManager::GroupNetworkManager(
|
||||
const webrtc::FieldTrialsView &fieldTrials,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(uint32_t, int)> unknownSsrcPacketReceived,
|
||||
std::function<void(bool)> dataChannelStateUpdated,
|
||||
std::function<void(std::string const &)> dataChannelMessageReceived,
|
||||
std::function<void(uint32_t, uint8_t, bool)> audioActivityUpdated,
|
||||
bool zeroAudioLevel,
|
||||
std::function<void(uint32_t)> anyActivityUpdated,
|
||||
std::shared_ptr<Threads> threads) :
|
||||
_threads(std::move(threads)),
|
||||
_stateUpdated(std::move(stateUpdated)),
|
||||
_unknownSsrcPacketReceived(std::move(unknownSsrcPacketReceived)),
|
||||
_dataChannelStateUpdated(dataChannelStateUpdated),
|
||||
_dataChannelMessageReceived(dataChannelMessageReceived),
|
||||
_audioActivityUpdated(audioActivityUpdated),
|
||||
_zeroAudioLevel(zeroAudioLevel),
|
||||
_anyActivityUpdated(anyActivityUpdated) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_localIceParameters = PeerIceParameters(rtc::CreateRandomString(cricket::ICE_UFRAG_LENGTH), rtc::CreateRandomString(cricket::ICE_PWD_LENGTH), false);
|
||||
|
||||
_localCertificate = rtc::RTCCertificateGenerator::GenerateCertificate(rtc::KeyParams(rtc::KT_ECDSA), absl::nullopt);
|
||||
|
||||
_networkMonitorFactory = PlatformInterface::SharedInstance()->createNetworkMonitorFactory();
|
||||
|
||||
_socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread()->socketserver()));
|
||||
_networkManager = std::make_unique<rtc::BasicNetworkManager>(_networkMonitorFactory.get(), _threads->getNetworkThread()->socketserver());
|
||||
_asyncResolverFactory = std::make_unique<webrtc::BasicAsyncDnsResolverFactory>();
|
||||
|
||||
_dtlsSrtpTransport = std::make_unique<WrappedDtlsSrtpTransport>(true, fieldTrials, [this](webrtc::RtpPacketReceived const &packet, bool isUnresolved) {
|
||||
this->RtpPacketReceived_n(packet, isUnresolved);
|
||||
}, _zeroAudioLevel);
|
||||
_dtlsSrtpTransport->SetDtlsTransports(nullptr, nullptr);
|
||||
_dtlsSrtpTransport->SetActiveResetSrtpParams(false);
|
||||
_dtlsSrtpTransport->SubscribeReadyToSend(this, [this](bool value) {
|
||||
this->DtlsReadyToSend(value);
|
||||
});
|
||||
|
||||
resetDtlsSrtpTransport();
|
||||
}
|
||||
|
||||
GroupNetworkManager::~GroupNetworkManager() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
RTC_LOG(LS_INFO) << "GroupNetworkManager::~GroupNetworkManager()";
|
||||
|
||||
_dataChannelInterface.reset();
|
||||
_dtlsSrtpTransport.reset();
|
||||
_dtlsTransport.reset();
|
||||
_transportChannel.reset();
|
||||
_asyncResolverFactory.reset();
|
||||
_portAllocator.reset();
|
||||
_networkManager.reset();
|
||||
_socketFactory.reset();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::resetDtlsSrtpTransport() {
|
||||
std::unique_ptr<cricket::BasicPortAllocator> portAllocator = std::make_unique<cricket::BasicPortAllocator>(_networkManager.get(), _socketFactory.get(), _turnCustomizer.get(), nullptr);
|
||||
|
||||
uint32_t flags = portAllocator->flags();
|
||||
|
||||
flags |=
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6 |
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6_ON_WIFI;
|
||||
|
||||
portAllocator->set_flags(flags);
|
||||
portAllocator->Initialize();
|
||||
|
||||
portAllocator->SetConfiguration({}, {}, 2, webrtc::NO_PRUNE, _turnCustomizer.get());
|
||||
|
||||
webrtc::IceTransportInit iceTransportInit;
|
||||
iceTransportInit.set_port_allocator(portAllocator.get());
|
||||
iceTransportInit.set_async_dns_resolver_factory(_asyncResolverFactory.get());
|
||||
|
||||
auto transportChannel = cricket::P2PTransportChannel::Create("transport", 0, std::move(iceTransportInit));
|
||||
|
||||
cricket::IceConfig iceConfig;
|
||||
iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY;
|
||||
iceConfig.prioritize_most_likely_candidate_pairs = true;
|
||||
iceConfig.regather_on_failed_networks_interval = 2000;
|
||||
transportChannel->SetIceConfig(iceConfig);
|
||||
|
||||
cricket::IceParameters localIceParameters(
|
||||
_localIceParameters.ufrag,
|
||||
_localIceParameters.pwd,
|
||||
false
|
||||
);
|
||||
|
||||
transportChannel->SetIceParameters(localIceParameters);
|
||||
const bool isOutgoing = false;
|
||||
transportChannel->SetIceRole(isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED);
|
||||
transportChannel->SetRemoteIceMode(cricket::ICEMODE_LITE);
|
||||
|
||||
transportChannel->SignalIceTransportStateChanged.connect(this, &GroupNetworkManager::transportStateChanged);
|
||||
transportChannel->SignalReadPacket.connect(this, &GroupNetworkManager::transportPacketReceived);
|
||||
|
||||
webrtc::CryptoOptions cryptoOptions = GroupNetworkManager::getDefaulCryptoOptions();
|
||||
|
||||
auto dtlsTransport = std::make_unique<cricket::DtlsTransport>(transportChannel.get(), cryptoOptions, nullptr);
|
||||
|
||||
dtlsTransport->SignalWritableState.connect(
|
||||
this, &GroupNetworkManager::OnTransportWritableState_n);
|
||||
dtlsTransport->SignalReceivingState.connect(
|
||||
this, &GroupNetworkManager::OnTransportReceivingState_n);
|
||||
|
||||
dtlsTransport->SetDtlsRole(rtc::SSLRole::SSL_SERVER);
|
||||
dtlsTransport->SetLocalCertificate(_localCertificate);
|
||||
|
||||
_dtlsSrtpTransport->SetDtlsTransports(dtlsTransport.get(), nullptr);
|
||||
|
||||
_dtlsTransport = std::move(dtlsTransport);
|
||||
_transportChannel = std::move(transportChannel);
|
||||
_portAllocator = std::move(portAllocator);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::start() {
|
||||
_transportChannel->MaybeStartGathering();
|
||||
|
||||
restartDataChannel();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::restartDataChannel() {
|
||||
_dataChannelStateUpdated(false);
|
||||
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_dataChannelInterface.reset(new SctpDataChannelProviderInterfaceImpl(
|
||||
_dtlsTransport.get(),
|
||||
true,
|
||||
[weak, threads = _threads](bool state) {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_dataChannelStateUpdated(state);
|
||||
},
|
||||
[weak, threads = _threads]() {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->restartDataChannel();
|
||||
},
|
||||
[weak, threads = _threads](std::string const &message) {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_dataChannelMessageReceived(message);
|
||||
},
|
||||
_threads
|
||||
));
|
||||
|
||||
_dataChannelInterface->updateIsConnected(_isConnected);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::stop() {
|
||||
_transportChannel->SignalIceTransportStateChanged.disconnect(this);
|
||||
_transportChannel->SignalReadPacket.disconnect(this);
|
||||
|
||||
_dtlsTransport->SignalWritableState.disconnect(this);
|
||||
_dtlsTransport->SignalReceivingState.disconnect(this);
|
||||
|
||||
_dataChannelInterface.reset();
|
||||
|
||||
_localIceParameters = PeerIceParameters(rtc::CreateRandomString(cricket::ICE_UFRAG_LENGTH), rtc::CreateRandomString(cricket::ICE_PWD_LENGTH), false);
|
||||
|
||||
_localCertificate = rtc::RTCCertificateGenerator::GenerateCertificate(rtc::KeyParams(rtc::KT_ECDSA), absl::nullopt);
|
||||
|
||||
resetDtlsSrtpTransport();
|
||||
}
|
||||
|
||||
PeerIceParameters GroupNetworkManager::getLocalIceParameters() {
|
||||
return _localIceParameters;
|
||||
}
|
||||
|
||||
std::unique_ptr<rtc::SSLFingerprint> GroupNetworkManager::getLocalFingerprint() {
|
||||
auto certificate = _localCertificate;
|
||||
if (!certificate) {
|
||||
return nullptr;
|
||||
}
|
||||
return rtc::SSLFingerprint::CreateFromCertificate(*certificate);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::setRemoteParams(PeerIceParameters const &remoteIceParameters, std::vector<cricket::Candidate> const &iceCandidates, rtc::SSLFingerprint *fingerprint) {
|
||||
_remoteIceParameters = remoteIceParameters;
|
||||
|
||||
cricket::IceParameters parameters(
|
||||
remoteIceParameters.ufrag,
|
||||
remoteIceParameters.pwd,
|
||||
true
|
||||
);
|
||||
|
||||
_transportChannel->SetRemoteIceParameters(parameters);
|
||||
|
||||
for (const auto &candidate : iceCandidates) {
|
||||
_transportChannel->AddRemoteCandidate(candidate);
|
||||
}
|
||||
|
||||
if (fingerprint) {
|
||||
_dtlsTransport->SetRemoteFingerprint(fingerprint->algorithm, fingerprint->digest.data(), fingerprint->digest.size());
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::sendDataChannelMessage(std::string const &message) {
|
||||
if (_dataChannelInterface) {
|
||||
_dataChannelInterface->sendDataChannelMessage(message);
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::setOutgoingVoiceActivity(bool isSpeech) {
|
||||
if (_dtlsSrtpTransport) {
|
||||
((WrappedDtlsSrtpTransport *)_dtlsSrtpTransport.get())->_voiceActivity = isSpeech;
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::RtpTransport *GroupNetworkManager::getRtpTransport() {
|
||||
return _dtlsSrtpTransport.get();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::checkConnectionTimeout() {
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_threads->getNetworkThread()->PostDelayedTask([weak]() {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t currentTimestamp = rtc::TimeMillis();
|
||||
const int64_t maxTimeout = 20000;
|
||||
|
||||
if (strong->_lastNetworkActivityMs + maxTimeout < currentTimestamp) {
|
||||
GroupNetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = false;
|
||||
emitState.isFailed = true;
|
||||
strong->_stateUpdated(emitState);
|
||||
}
|
||||
|
||||
strong->checkConnectionTimeout();
|
||||
}, webrtc::TimeDelta::Millis(1000));
|
||||
}
|
||||
|
||||
void GroupNetworkManager::candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::candidateGatheringState(cricket::IceTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::OnTransportWritableState_n(rtc::PacketTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
void GroupNetworkManager::OnTransportReceivingState_n(rtc::PacketTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::DtlsReadyToSend(bool isReadyToSend) {
|
||||
UpdateAggregateStates_n();
|
||||
|
||||
if (isReadyToSend) {
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_threads->getNetworkThread()->PostTask([weak]() {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->UpdateAggregateStates_n();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportStateChanged(cricket::IceTransportInternal *transport) {
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportReadyToSend(cricket::IceTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_lastNetworkActivityMs = rtc::TimeMillis();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::RtpPacketReceived_n(webrtc::RtpPacketReceived const &packet, bool isUnresolved) {
|
||||
if (packet.HasExtension(webrtc::kRtpExtensionAudioLevel)) {
|
||||
uint8_t audioLevel = 0;
|
||||
bool isSpeech = false;
|
||||
|
||||
if (packet.GetExtension<webrtc::AudioLevel>(&isSpeech, &audioLevel)) {
|
||||
if (_audioActivityUpdated) {
|
||||
_audioActivityUpdated(packet.Ssrc(), audioLevel, isSpeech);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_anyActivityUpdated) {
|
||||
_anyActivityUpdated(packet.Ssrc());
|
||||
}
|
||||
|
||||
if (isUnresolved && _unknownSsrcPacketReceived) {
|
||||
uint32_t ssrc = packet.Ssrc();
|
||||
int payloadType = packet.PayloadType();
|
||||
|
||||
_unknownSsrcPacketReceived(ssrc, payloadType);
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::UpdateAggregateStates_n() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
auto state = _transportChannel->GetIceTransportState();
|
||||
bool isConnected = false;
|
||||
switch (state) {
|
||||
case webrtc::IceTransportState::kConnected:
|
||||
case webrtc::IceTransportState::kCompleted:
|
||||
isConnected = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!_dtlsSrtpTransport->IsWritable(false)) {
|
||||
isConnected = false;
|
||||
}
|
||||
|
||||
if (_isConnected != isConnected) {
|
||||
_isConnected = isConnected;
|
||||
|
||||
GroupNetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = isConnected;
|
||||
_stateUpdated(emitState);
|
||||
|
||||
if (_dataChannelInterface) {
|
||||
_dataChannelInterface->updateIsConnected(isConnected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::sctpReadyToSendData() {
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
133
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h
Normal file
133
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#ifndef TGCALLS_GROUP_NETWORK_MANAGER_H
|
||||
#define TGCALLS_GROUP_NETWORK_MANAGER_H
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
// Compiler errors in conflicting Windows headers if not included here.
|
||||
#include <winsock2.h>
|
||||
#endif // WEBRTC_WIN
|
||||
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "rtc_base/third_party/sigslot/sigslot.h"
|
||||
#include "api/candidate.h"
|
||||
#include "media/base/media_channel.h"
|
||||
#include "pc/sctp_transport.h"
|
||||
#include "pc/sctp_data_channel.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#include "Message.h"
|
||||
#include "ThreadLocalObject.h"
|
||||
|
||||
namespace rtc {
|
||||
class BasicPacketSocketFactory;
|
||||
class BasicNetworkManager;
|
||||
class PacketTransportInternal;
|
||||
struct NetworkRoute;
|
||||
} // namespace rtc
|
||||
|
||||
namespace cricket {
|
||||
class BasicPortAllocator;
|
||||
class P2PTransportChannel;
|
||||
class IceTransportInternal;
|
||||
class DtlsTransport;
|
||||
} // namespace cricket
|
||||
|
||||
namespace webrtc {
|
||||
class BasicAsyncResolverFactory;
|
||||
class TurnCustomizer;
|
||||
class DtlsSrtpTransport;
|
||||
class RtpTransport;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct Message;
|
||||
class SctpDataChannelProviderInterfaceImpl;
|
||||
class Threads;
|
||||
|
||||
class GroupNetworkManager : public sigslot::has_slots<>, public std::enable_shared_from_this<GroupNetworkManager> {
|
||||
public:
|
||||
struct State {
|
||||
bool isReadyToSendData = false;
|
||||
bool isFailed = false;
|
||||
};
|
||||
|
||||
static webrtc::CryptoOptions getDefaulCryptoOptions();
|
||||
|
||||
GroupNetworkManager(
|
||||
const webrtc::FieldTrialsView& fieldTrials,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(uint32_t, int)> unknownSsrcPacketReceived,
|
||||
std::function<void(bool)> dataChannelStateUpdated,
|
||||
std::function<void(std::string const &)> dataChannelMessageReceived,
|
||||
std::function<void(uint32_t, uint8_t, bool)> audioActivityUpdated,
|
||||
bool zeroAudioLevel,
|
||||
std::function<void(uint32_t)> anyActivityUpdated,
|
||||
std::shared_ptr<Threads> threads);
|
||||
~GroupNetworkManager();
|
||||
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
PeerIceParameters getLocalIceParameters();
|
||||
std::unique_ptr<rtc::SSLFingerprint> getLocalFingerprint();
|
||||
void setRemoteParams(PeerIceParameters const &remoteIceParameters, std::vector<cricket::Candidate> const &iceCandidates, rtc::SSLFingerprint *fingerprint);
|
||||
|
||||
void sendDataChannelMessage(std::string const &message);
|
||||
|
||||
void setOutgoingVoiceActivity(bool isSpeech);
|
||||
|
||||
webrtc::RtpTransport *getRtpTransport();
|
||||
|
||||
private:
|
||||
void resetDtlsSrtpTransport();
|
||||
void restartDataChannel();
|
||||
void checkConnectionTimeout();
|
||||
void candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate);
|
||||
void candidateGatheringState(cricket::IceTransportInternal *transport);
|
||||
void OnTransportWritableState_n(rtc::PacketTransportInternal *transport);
|
||||
void OnTransportReceivingState_n(rtc::PacketTransportInternal *transport);
|
||||
void transportStateChanged(cricket::IceTransportInternal *transport);
|
||||
void transportReadyToSend(cricket::IceTransportInternal *transport);
|
||||
void transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused);
|
||||
void DtlsReadyToSend(bool DtlsReadyToSend);
|
||||
void UpdateAggregateStates_n();
|
||||
void RtpPacketReceived_n(webrtc::RtpPacketReceived const &packet, bool isUnresolved);
|
||||
void OnRtcpPacketReceived_n(rtc::CopyOnWriteBuffer *packet, int64_t packet_time_us);
|
||||
|
||||
void sctpReadyToSendData();
|
||||
|
||||
std::shared_ptr<Threads> _threads;
|
||||
std::function<void(const GroupNetworkManager::State &)> _stateUpdated;
|
||||
std::function<void(uint32_t, int)> _unknownSsrcPacketReceived;
|
||||
std::function<void(bool)> _dataChannelStateUpdated;
|
||||
std::function<void(std::string const &)> _dataChannelMessageReceived;
|
||||
std::function<void(uint32_t, uint8_t, bool)> _audioActivityUpdated;
|
||||
bool _zeroAudioLevel = false;
|
||||
std::function<void(uint32_t)> _anyActivityUpdated;
|
||||
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> _networkMonitorFactory;
|
||||
std::unique_ptr<rtc::BasicPacketSocketFactory> _socketFactory;
|
||||
std::unique_ptr<rtc::BasicNetworkManager> _networkManager;
|
||||
std::unique_ptr<webrtc::TurnCustomizer> _turnCustomizer;
|
||||
std::unique_ptr<cricket::BasicPortAllocator> _portAllocator;
|
||||
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface> _asyncResolverFactory;
|
||||
std::unique_ptr<cricket::P2PTransportChannel> _transportChannel;
|
||||
std::unique_ptr<cricket::DtlsTransport> _dtlsTransport;
|
||||
std::unique_ptr<webrtc::DtlsSrtpTransport> _dtlsSrtpTransport;
|
||||
|
||||
std::unique_ptr<SctpDataChannelProviderInterfaceImpl> _dataChannelInterface;
|
||||
|
||||
webrtc::scoped_refptr<rtc::RTCCertificate> _localCertificate;
|
||||
PeerIceParameters _localIceParameters;
|
||||
absl::optional<PeerIceParameters> _remoteIceParameters;
|
||||
|
||||
bool _isConnected = false;
|
||||
int64_t _lastNetworkActivityMs = 0;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
1109
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp
Normal file
1109
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp
Normal file
File diff suppressed because it is too large
Load diff
54
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h
Normal file
54
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#ifndef TGCALLS_STREAMING_MEDIA_CONTEXT_H
|
||||
#define TGCALLS_STREAMING_MEDIA_CONTEXT_H
|
||||
|
||||
#include "GroupInstanceImpl.h"
|
||||
#include <stdint.h>
|
||||
#include "../StaticThreads.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class StreamingMediaContextPrivate;
|
||||
|
||||
class StreamingMediaContext {
|
||||
public:
|
||||
struct VideoChannel {
|
||||
VideoChannelDescription::Quality quality = VideoChannelDescription::Quality::Thumbnail;
|
||||
std::string endpoint;
|
||||
|
||||
VideoChannel(VideoChannelDescription::Quality quality_, std::string endpoint_) :
|
||||
quality(quality_),
|
||||
endpoint(endpoint_) {
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
struct StreamingMediaContextArguments {
|
||||
std::shared_ptr<Threads> threads;
|
||||
bool isUnifiedBroadcast = false;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::function<void(int64_t)>)> requestCurrentTime;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, std::function<void(BroadcastPart &&)>)> requestAudioBroadcastPart;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function<void(BroadcastPart &&)>)> requestVideoBroadcastPart;
|
||||
std::function<void(uint32_t, float, bool)> updateAudioLevel;
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
public:
|
||||
StreamingMediaContext(StreamingMediaContextArguments &&arguments);
|
||||
~StreamingMediaContext();
|
||||
|
||||
StreamingMediaContext& operator=(const StreamingMediaContext&) = delete;
|
||||
StreamingMediaContext& operator=(StreamingMediaContext&&) = delete;
|
||||
|
||||
void setActiveVideoChannels(std::vector<VideoChannel> const &videoChannels);
|
||||
void setVolume(uint32_t ssrc, double volume);
|
||||
void addVideoSink(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
void getAudio(int16_t *audio_samples, const size_t num_samples, const size_t num_channels, const uint32_t samples_per_sec);
|
||||
|
||||
private:
|
||||
std::shared_ptr<StreamingMediaContextPrivate> _private;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
923
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp
Normal file
923
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp
Normal file
|
|
@ -0,0 +1,923 @@
|
|||
#include "VideoStreamingPart.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
|
||||
#include "AVIOContextImpl.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
class MediaDataPacket {
|
||||
public:
|
||||
MediaDataPacket() : _packet(av_packet_alloc()) {
|
||||
}
|
||||
|
||||
MediaDataPacket(MediaDataPacket &&other) : _packet(other._packet) {
|
||||
other._packet = nullptr;
|
||||
}
|
||||
|
||||
~MediaDataPacket() {
|
||||
if (_packet) {
|
||||
av_packet_free(&_packet);
|
||||
}
|
||||
}
|
||||
|
||||
AVPacket *packet() {
|
||||
return _packet;
|
||||
}
|
||||
|
||||
private:
|
||||
AVPacket *_packet = nullptr;
|
||||
};
|
||||
|
||||
class DecodableFrame {
|
||||
public:
|
||||
DecodableFrame(MediaDataPacket packet, int64_t pts, int64_t dts):
|
||||
_packet(std::move(packet)),
|
||||
_pts(pts),
|
||||
_dts(dts) {
|
||||
}
|
||||
|
||||
~DecodableFrame() {
|
||||
}
|
||||
|
||||
MediaDataPacket &packet() {
|
||||
return _packet;
|
||||
}
|
||||
|
||||
int64_t pts() {
|
||||
return _pts;
|
||||
}
|
||||
|
||||
int64_t dts() {
|
||||
return _dts;
|
||||
}
|
||||
|
||||
private:
|
||||
MediaDataPacket _packet;
|
||||
int64_t _pts = 0;
|
||||
int64_t _dts = 0;
|
||||
};
|
||||
|
||||
class Frame {
|
||||
public:
|
||||
Frame() {
|
||||
_frame = av_frame_alloc();
|
||||
}
|
||||
|
||||
Frame(Frame &&other) {
|
||||
_frame = other._frame;
|
||||
other._frame = nullptr;
|
||||
}
|
||||
|
||||
~Frame() {
|
||||
if (_frame) {
|
||||
av_frame_free(&_frame);
|
||||
}
|
||||
}
|
||||
|
||||
AVFrame *frame() {
|
||||
return _frame;
|
||||
}
|
||||
|
||||
double pts(AVStream *stream, double &firstFramePts) {
|
||||
int64_t framePts = _frame->pts;
|
||||
double spf = av_q2d(stream->time_base);
|
||||
double value = ((double)framePts) * spf;
|
||||
|
||||
if (firstFramePts < 0.0) {
|
||||
firstFramePts = value;
|
||||
}
|
||||
|
||||
return value - firstFramePts;
|
||||
}
|
||||
|
||||
private:
|
||||
AVFrame *_frame = nullptr;
|
||||
};
|
||||
|
||||
struct VideoStreamEvent {
|
||||
int32_t offset = 0;
|
||||
std::string endpointId;
|
||||
int32_t rotation = 0;
|
||||
int32_t extra = 0;
|
||||
};
|
||||
|
||||
struct VideoStreamInfo {
|
||||
std::string container;
|
||||
int32_t activeMask = 0;
|
||||
std::vector<VideoStreamEvent> events;
|
||||
};
|
||||
|
||||
absl::optional<int32_t> readInt32(std::vector<uint8_t> const &data, int &offset) {
|
||||
if (offset + 4 > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, 4);
|
||||
offset += 4;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
absl::optional<uint8_t> readBytesAsInt32(std::vector<uint8_t> const &data, int &offset, int count) {
|
||||
if (offset + count > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (count <= 4) {
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, count);
|
||||
offset += count;
|
||||
return value;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t roundUp(int32_t numToRound, int32_t multiple) {
|
||||
if (multiple == 0) {
|
||||
return numToRound;
|
||||
}
|
||||
|
||||
int32_t remainder = numToRound % multiple;
|
||||
if (remainder == 0) {
|
||||
return numToRound;
|
||||
}
|
||||
|
||||
return numToRound + multiple - remainder;
|
||||
}
|
||||
|
||||
absl::optional<std::string> readSerializedString(std::vector<uint8_t> const &data, int &offset) {
|
||||
if (const auto tmp = readBytesAsInt32(data, offset, 1)) {
|
||||
int paddingBytes = 0;
|
||||
int length = 0;
|
||||
if (tmp.value() == 254) {
|
||||
if (const auto len = readBytesAsInt32(data, offset, 3)) {
|
||||
length = len.value();
|
||||
paddingBytes = roundUp(length, 4) - length;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
else {
|
||||
length = tmp.value();
|
||||
paddingBytes = roundUp(length + 1, 4) - (length + 1);
|
||||
}
|
||||
|
||||
if (offset + length > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
std::string result(data.data() + offset, data.data() + offset + length);
|
||||
|
||||
offset += length;
|
||||
offset += paddingBytes;
|
||||
|
||||
return result;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamEvent> readVideoStreamEvent(std::vector<uint8_t> const &data, int &offset) {
|
||||
VideoStreamEvent event;
|
||||
|
||||
if (const auto offsetValue = readInt32(data, offset)) {
|
||||
event.offset = offsetValue.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto endpointId = readSerializedString(data, offset)) {
|
||||
event.endpointId = endpointId.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto rotation = readInt32(data, offset)) {
|
||||
event.rotation = rotation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto extra = readInt32(data, offset)) {
|
||||
event.extra = extra.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamInfo> consumeVideoStreamInfo(std::vector<uint8_t> &data) {
|
||||
int offset = 0;
|
||||
if (const auto signature = readInt32(data, offset)) {
|
||||
if (signature.value() != 0xa12e810d) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
VideoStreamInfo info;
|
||||
|
||||
if (const auto container = readSerializedString(data, offset)) {
|
||||
info.container = container.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto activeMask = readInt32(data, offset)) {
|
||||
info.activeMask = activeMask.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto eventCount = readInt32(data, offset)) {
|
||||
if (eventCount > 0) {
|
||||
if (const auto event = readVideoStreamEvent(data, offset)) {
|
||||
info.events.push_back(event.value());
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
data.erase(data.begin(), data.begin() + offset);
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
bool areCodecParametersEqual(AVCodecParameters const &lhs, AVCodecParameters const &rhs) {
|
||||
if (lhs.codec_id != rhs.codec_id) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.extradata_size != rhs.extradata_size) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.extradata_size != 0) {
|
||||
if (memcmp(lhs.extradata, rhs.extradata, lhs.extradata_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (lhs.format != rhs.format) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.profile != rhs.profile) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.level != rhs.level) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.width != rhs.width) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.height != rhs.height) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.sample_aspect_ratio.num != rhs.sample_aspect_ratio.num) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.sample_aspect_ratio.den != rhs.sample_aspect_ratio.den) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.field_order != rhs.field_order) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_range != rhs.color_range) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_primaries != rhs.color_primaries) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_trc != rhs.color_trc) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_space != rhs.color_space) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.chroma_location != rhs.chroma_location) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class VideoStreamingDecoderState {
|
||||
public:
|
||||
static std::unique_ptr<VideoStreamingDecoderState> create(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
AVCodec const *codec = nullptr;
|
||||
if (!codec) {
|
||||
codec = avcodec_find_decoder(codecParameters->codec_id);
|
||||
}
|
||||
if (!codec) {
|
||||
return nullptr;
|
||||
}
|
||||
AVCodecContext *codecContext = avcodec_alloc_context3(codec);
|
||||
int ret = avcodec_parameters_to_context(codecContext, codecParameters);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&codecContext);
|
||||
return nullptr;
|
||||
} else {
|
||||
codecContext->pkt_timebase = pktTimebase;
|
||||
|
||||
PlatformInterface::SharedInstance()->setupVideoDecoding(codecContext);
|
||||
|
||||
ret = avcodec_open2(codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&codecContext);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_unique<VideoStreamingDecoderState>(
|
||||
codecContext,
|
||||
codecParameters,
|
||||
pktTimebase
|
||||
);
|
||||
}
|
||||
|
||||
public:
|
||||
VideoStreamingDecoderState(
|
||||
AVCodecContext *codecContext,
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
_codecContext = codecContext;
|
||||
_codecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_codecParameters, codecParameters);
|
||||
_pktTimebase = pktTimebase;
|
||||
}
|
||||
|
||||
~VideoStreamingDecoderState() {
|
||||
if (_codecContext) {
|
||||
avcodec_close(_codecContext);
|
||||
avcodec_free_context(&_codecContext);
|
||||
}
|
||||
if (_codecParameters) {
|
||||
avcodec_parameters_free(&_codecParameters);
|
||||
}
|
||||
}
|
||||
|
||||
bool supportsDecoding(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) const {
|
||||
if (!areCodecParametersEqual(*_codecParameters, *codecParameters)) {
|
||||
return false;
|
||||
}
|
||||
if (_pktTimebase.num != pktTimebase.num) {
|
||||
return false;
|
||||
}
|
||||
if (_pktTimebase.den != pktTimebase.den) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int sendFrame(std::shared_ptr<DecodableFrame> frame) {
|
||||
if (frame) {
|
||||
int status = avcodec_send_packet(_codecContext, frame->packet().packet());
|
||||
return status;
|
||||
} else {
|
||||
int status = avcodec_send_packet(_codecContext, nullptr);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
int receiveFrame(Frame &frame) {
|
||||
int status = avcodec_receive_frame(_codecContext, frame.frame());
|
||||
return status;
|
||||
}
|
||||
|
||||
void reset() {
|
||||
avcodec_flush_buffers(_codecContext);
|
||||
}
|
||||
|
||||
private:
|
||||
AVCodecContext *_codecContext = nullptr;
|
||||
AVCodecParameters *_codecParameters = nullptr;
|
||||
AVRational _pktTimebase;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
class VideoStreamingSharedStateInternal {
|
||||
public:
|
||||
VideoStreamingSharedStateInternal() {
|
||||
}
|
||||
|
||||
~VideoStreamingSharedStateInternal() {
|
||||
}
|
||||
|
||||
void updateDecoderState(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
if (_decoderState && _decoderState->supportsDecoding(codecParameters, pktTimebase)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_decoderState.reset();
|
||||
_decoderState = VideoStreamingDecoderState::create(codecParameters, pktTimebase);
|
||||
}
|
||||
|
||||
int sendFrame(std::shared_ptr<DecodableFrame> frame) {
|
||||
if (!_decoderState) {
|
||||
return AVERROR(EIO);
|
||||
}
|
||||
return _decoderState->sendFrame(frame);
|
||||
}
|
||||
|
||||
int receiveFrame(Frame &frame) {
|
||||
if (!_decoderState) {
|
||||
return AVERROR(EIO);
|
||||
}
|
||||
return _decoderState->receiveFrame(frame);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
if (!_decoderState) {
|
||||
return;
|
||||
}
|
||||
_decoderState->reset();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoStreamingDecoderState> _decoderState;
|
||||
};
|
||||
|
||||
VideoStreamingSharedState::VideoStreamingSharedState() {
|
||||
_impl = new VideoStreamingSharedStateInternal();
|
||||
}
|
||||
|
||||
VideoStreamingSharedState::~VideoStreamingSharedState() {
|
||||
delete _impl;
|
||||
}
|
||||
|
||||
class VideoStreamingPartInternal {
|
||||
public:
|
||||
VideoStreamingPartInternal(std::string endpointId, webrtc::VideoRotation rotation, std::vector<uint8_t> &&fileData, std::string const &container) :
|
||||
_endpointId(endpointId),
|
||||
_rotation(rotation) {
|
||||
_avIoContext = std::make_unique<AVIOContextImpl>(std::move(fileData));
|
||||
|
||||
int ret = 0;
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
const
|
||||
#endif
|
||||
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
|
||||
if (!inputFormat) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext = avformat_alloc_context();
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext->pb = _avIoContext->getContext();
|
||||
|
||||
if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
_inputFormatContext = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
AVCodecParameters *videoCodecParameters = nullptr;
|
||||
AVStream *videoStream = nullptr;
|
||||
for (int i = 0; i < _inputFormatContext->nb_streams; i++) {
|
||||
AVStream *inStream = _inputFormatContext->streams[i];
|
||||
|
||||
AVCodecParameters *inCodecpar = inStream->codecpar;
|
||||
if (inCodecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
|
||||
continue;
|
||||
}
|
||||
videoCodecParameters = inCodecpar;
|
||||
videoStream = inStream;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (videoCodecParameters && videoStream) {
|
||||
_videoCodecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_videoCodecParameters, videoCodecParameters);
|
||||
_videoStream = videoStream;
|
||||
|
||||
/*const AVCodec *codec = avcodec_find_decoder(videoCodecParameters->codec_id);
|
||||
if (codec) {
|
||||
_codecContext = avcodec_alloc_context3(codec);
|
||||
ret = avcodec_parameters_to_context(_codecContext, videoCodecParameters);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_codecContext->pkt_timebase = videoStream->time_base;
|
||||
|
||||
ret = avcodec_open2(_codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_videoStream = videoStream;
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
~VideoStreamingPartInternal() {
|
||||
if (_videoCodecParameters) {
|
||||
avcodec_parameters_free(&_videoCodecParameters);
|
||||
}
|
||||
if (_inputFormatContext) {
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
}
|
||||
}
|
||||
|
||||
std::string endpointId() {
|
||||
return _endpointId;
|
||||
}
|
||||
|
||||
absl::optional<MediaDataPacket> readPacket() {
|
||||
if (_didReadToEnd) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (!_inputFormatContext) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
MediaDataPacket packet;
|
||||
int result = av_read_frame(_inputFormatContext, packet.packet());
|
||||
if (result < 0) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
||||
std::shared_ptr<DecodableFrame> readNextDecodableFrame() {
|
||||
while (true) {
|
||||
absl::optional<MediaDataPacket> packet = readPacket();
|
||||
if (packet) {
|
||||
if (_videoStream && packet->packet()->stream_index == _videoStream->index) {
|
||||
return std::make_shared<DecodableFrame>(std::move(packet.value()), packet->packet()->pts, packet->packet()->dts);
|
||||
}
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> convertCurrentFrame() {
|
||||
auto platformFrameBuffer = PlatformInterface::SharedInstance()->createPlatformFrameFromData(_frame.frame());
|
||||
if (platformFrameBuffer) {
|
||||
auto videoFrame = webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(platformFrameBuffer)
|
||||
.set_rotation(_rotation)
|
||||
.build();
|
||||
|
||||
return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream, _firstFramePts), _frameIndex);
|
||||
} else {
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> i420Buffer = webrtc::I420Buffer::Copy(
|
||||
_frame.frame()->width,
|
||||
_frame.frame()->height,
|
||||
_frame.frame()->data[0],
|
||||
_frame.frame()->linesize[0],
|
||||
_frame.frame()->data[1],
|
||||
_frame.frame()->linesize[1],
|
||||
_frame.frame()->data[2],
|
||||
_frame.frame()->linesize[2]
|
||||
);
|
||||
if (i420Buffer) {
|
||||
auto videoFrame = webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(i420Buffer)
|
||||
.set_rotation(_rotation)
|
||||
.build();
|
||||
|
||||
return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream, _firstFramePts), _frameIndex);
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getNextFrame(VideoStreamingSharedState const *sharedState) {
|
||||
if (!_videoStream) {
|
||||
return {};
|
||||
}
|
||||
if (!_videoCodecParameters) {
|
||||
return {};
|
||||
}
|
||||
|
||||
sharedState->impl()->updateDecoderState(_videoCodecParameters, _videoStream->time_base);
|
||||
|
||||
while (true) {
|
||||
if (_didReadToEnd) {
|
||||
if (!_finalFrames.empty()) {
|
||||
auto frame = _finalFrames[0];
|
||||
_finalFrames.erase(_finalFrames.begin());
|
||||
return frame;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const auto frame = readNextDecodableFrame();
|
||||
if (frame) {
|
||||
int sendStatus = sharedState->impl()->sendFrame(frame);
|
||||
if (sendStatus == 0) {
|
||||
int receiveStatus = sharedState->impl()->receiveFrame(_frame);
|
||||
if (receiveStatus == 0) {
|
||||
auto convertedFrame = convertCurrentFrame();
|
||||
if (convertedFrame) {
|
||||
_frameIndex++;
|
||||
return convertedFrame;
|
||||
}
|
||||
} else if (receiveStatus == AVERROR(EAGAIN)) {
|
||||
// more data needed
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_receive_frame failed with result: " << receiveStatus;
|
||||
_didReadToEnd = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_send_packet failed with result: " << sendStatus;
|
||||
_didReadToEnd = true;
|
||||
return {};
|
||||
}
|
||||
} else {
|
||||
_didReadToEnd = true;
|
||||
int sendStatus = sharedState->impl()->sendFrame(nullptr);
|
||||
if (sendStatus == 0) {
|
||||
while (true) {
|
||||
int receiveStatus = sharedState->impl()->receiveFrame(_frame);
|
||||
if (receiveStatus == 0) {
|
||||
auto convertedFrame = convertCurrentFrame();
|
||||
if (convertedFrame) {
|
||||
_frameIndex++;
|
||||
_finalFrames.push_back(convertedFrame.value());
|
||||
}
|
||||
} else {
|
||||
if (receiveStatus != AVERROR_EOF) {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_receive_frame (drain) failed with result: " << receiveStatus;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_send_packet (drain) failed with result: " << sendStatus;
|
||||
}
|
||||
sharedState->impl()->reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::string _endpointId;
|
||||
webrtc::VideoRotation _rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
|
||||
std::unique_ptr<AVIOContextImpl> _avIoContext;
|
||||
|
||||
AVFormatContext *_inputFormatContext = nullptr;
|
||||
AVStream *_videoStream = nullptr;
|
||||
Frame _frame;
|
||||
|
||||
AVCodecParameters *_videoCodecParameters = nullptr;
|
||||
|
||||
std::vector<VideoStreamingPartFrame> _finalFrames;
|
||||
|
||||
int _frameIndex = 0;
|
||||
double _firstFramePts = -1.0;
|
||||
bool _didReadToEnd = false;
|
||||
};
|
||||
|
||||
class VideoStreamingPartState {
|
||||
public:
|
||||
VideoStreamingPartState(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType) {
|
||||
_videoStreamInfo = consumeVideoStreamInfo(data);
|
||||
if (!_videoStreamInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _videoStreamInfo->events.size(); i++) {
|
||||
if (_videoStreamInfo->events[i].offset < 0) {
|
||||
continue;
|
||||
}
|
||||
size_t endOffset = 0;
|
||||
if (i == _videoStreamInfo->events.size() - 1) {
|
||||
endOffset = data.size();
|
||||
} else {
|
||||
endOffset = _videoStreamInfo->events[i + 1].offset;
|
||||
}
|
||||
if (endOffset <= _videoStreamInfo->events[i].offset) {
|
||||
continue;
|
||||
}
|
||||
if (endOffset > data.size()) {
|
||||
continue;
|
||||
}
|
||||
std::vector<uint8_t> dataSlice(data.begin() + _videoStreamInfo->events[i].offset, data.begin() + endOffset);
|
||||
webrtc::VideoRotation rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
switch (_videoStreamInfo->events[i].rotation) {
|
||||
case 0: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
break;
|
||||
}
|
||||
case 90: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_90;
|
||||
break;
|
||||
}
|
||||
case 180: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_180;
|
||||
break;
|
||||
}
|
||||
case 270: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_270;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
switch (contentType) {
|
||||
case VideoStreamingPart::ContentType::Audio: {
|
||||
auto part = std::make_unique<AudioStreamingPart>(std::move(dataSlice), _videoStreamInfo->container, true);
|
||||
_parsedAudioParts.push_back(std::move(part));
|
||||
|
||||
break;
|
||||
}
|
||||
case VideoStreamingPart::ContentType::Video: {
|
||||
auto part = std::make_unique<VideoStreamingPartInternal>(_videoStreamInfo->events[i].endpointId, rotation, std::move(dataSlice), _videoStreamInfo->container);
|
||||
_parsedVideoParts.push_back(std::move(part));
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~VideoStreamingPartState() {
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp) {
|
||||
while (true) {
|
||||
while (_availableFrames.size() >= 2) {
|
||||
if (timestamp >= _availableFrames[1].pts) {
|
||||
_availableFrames.erase(_availableFrames.begin());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (_availableFrames.size() < 2) {
|
||||
if (!_parsedVideoParts.empty()) {
|
||||
auto result = _parsedVideoParts[0]->getNextFrame(sharedState);
|
||||
if (result) {
|
||||
_availableFrames.push_back(result.value());
|
||||
} else {
|
||||
_parsedVideoParts.erase(_parsedVideoParts.begin());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!_availableFrames.empty()) {
|
||||
for (size_t i = 1; i < _availableFrames.size(); i++) {
|
||||
if (timestamp < _availableFrames[i].pts) {
|
||||
return _availableFrames[i - 1];
|
||||
}
|
||||
}
|
||||
return _availableFrames[_availableFrames.size() - 1];
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<std::string> getActiveEndpointId() const {
|
||||
if (!_parsedVideoParts.empty()) {
|
||||
return _parsedVideoParts[0]->endpointId();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
bool hasRemainingFrames() {
|
||||
return !_parsedVideoParts.empty() || getAudioRemainingMilliseconds() > 0;
|
||||
}
|
||||
|
||||
int getAudioRemainingMilliseconds() {
|
||||
while (!_parsedAudioParts.empty()) {
|
||||
auto firstPartResult = _parsedAudioParts[0]->getRemainingMilliseconds();
|
||||
if (firstPartResult <= 0) {
|
||||
_parsedAudioParts.erase(_parsedAudioParts.begin());
|
||||
} else {
|
||||
return firstPartResult;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
while (!_parsedAudioParts.empty()) {
|
||||
auto firstPartResult = _parsedAudioParts[0]->get10msPerChannel(persistentDecoder);
|
||||
if (firstPartResult.empty()) {
|
||||
_parsedAudioParts.erase(_parsedAudioParts.begin());
|
||||
} else {
|
||||
return firstPartResult;
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
absl::optional<VideoStreamInfo> _videoStreamInfo;
|
||||
std::vector<std::unique_ptr<VideoStreamingPartInternal>> _parsedVideoParts;
|
||||
std::vector<VideoStreamingPartFrame> _availableFrames;
|
||||
|
||||
std::vector<std::unique_ptr<AudioStreamingPart>> _parsedAudioParts;
|
||||
};
|
||||
|
||||
VideoStreamingPart::VideoStreamingPart(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType) {
|
||||
if (!data.empty()) {
|
||||
_state = new VideoStreamingPartState(std::move(data), contentType);
|
||||
}
|
||||
}
|
||||
|
||||
VideoStreamingPart::~VideoStreamingPart() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> VideoStreamingPart::getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp) {
|
||||
return _state
|
||||
? _state->getFrameAtRelativeTimestamp(sharedState, timestamp)
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
absl::optional<std::string> VideoStreamingPart::getActiveEndpointId() const {
|
||||
return _state
|
||||
? _state->getActiveEndpointId()
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
bool VideoStreamingPart::hasRemainingFrames() const {
|
||||
return _state
|
||||
? _state->hasRemainingFrames()
|
||||
: false;
|
||||
}
|
||||
|
||||
int VideoStreamingPart::getAudioRemainingMilliseconds() {
|
||||
return _state
|
||||
? _state->getAudioRemainingMilliseconds()
|
||||
: 0;
|
||||
}
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> VideoStreamingPart::getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
return _state
|
||||
? _state->getAudio10msPerChannel(persistentDecoder)
|
||||
: std::vector<AudioStreamingPart::StreamingPartChannel>();
|
||||
}
|
||||
|
||||
}
|
||||
78
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h
Normal file
78
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#ifndef TGCALLS_VIDEO_STREAMING_PART_H
|
||||
#define TGCALLS_VIDEO_STREAMING_PART_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/video/video_frame.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include "AudioStreamingPart.h"
|
||||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoStreamingPartState;
|
||||
class VideoStreamingSharedStateInternal;
|
||||
|
||||
struct VideoStreamingPartFrame {
|
||||
std::string endpointId;
|
||||
webrtc::VideoFrame frame;
|
||||
double pts = 0;
|
||||
int index = 0;
|
||||
|
||||
VideoStreamingPartFrame(std::string endpointId_, webrtc::VideoFrame const &frame_, double pts_, int index_) :
|
||||
endpointId(endpointId_),
|
||||
frame(frame_),
|
||||
pts(pts_),
|
||||
index(index_) {
|
||||
}
|
||||
};
|
||||
|
||||
class VideoStreamingSharedState {
|
||||
public:
|
||||
VideoStreamingSharedState();
|
||||
~VideoStreamingSharedState();
|
||||
|
||||
VideoStreamingSharedStateInternal *impl() const {
|
||||
return _impl;
|
||||
}
|
||||
|
||||
private:
|
||||
VideoStreamingSharedStateInternal *_impl = nullptr;
|
||||
};
|
||||
|
||||
class VideoStreamingPart {
|
||||
public:
|
||||
enum class ContentType {
|
||||
Audio,
|
||||
Video
|
||||
};
|
||||
|
||||
public:
|
||||
explicit VideoStreamingPart(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType);
|
||||
~VideoStreamingPart();
|
||||
|
||||
VideoStreamingPart(const VideoStreamingPart&) = delete;
|
||||
VideoStreamingPart(VideoStreamingPart&& other) {
|
||||
_state = other._state;
|
||||
other._state = nullptr;
|
||||
}
|
||||
VideoStreamingPart& operator=(const VideoStreamingPart&) = delete;
|
||||
VideoStreamingPart& operator=(VideoStreamingPart&&) = delete;
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp);
|
||||
absl::optional<std::string> getActiveEndpointId() const;
|
||||
bool hasRemainingFrames() const;
|
||||
|
||||
int getAudioRemainingMilliseconds();
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
VideoStreamingPartState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
Loading…
Add table
Add a link
Reference in a new issue