Commit 3a91a83a authored by Ken MacKay's avatar Ken MacKay Committed by Commit Bot

[Chromecast] Reduce memory allocation/free on mixer thread

Bug: internal b/137439407
Change-Id: Ia316552cfd0d884f1c55ce32603b426778f08cae
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1715765Reviewed-by: default avatarSergey Volk <servolk@chromium.org>
Commit-Queue: Kenneth MacKay <kmackay@chromium.org>
Cr-Commit-Position: refs/heads/master@{#680584}
parent fbee8107
......@@ -121,6 +121,7 @@ cast_source_set("audio_helpers") {
deps = [
"//base",
"//chromecast/public/media",
"//media",
]
}
......@@ -240,6 +241,8 @@ cast_source_set("for_mixer_audio") {
"cast_media_shlib_mixer_audio.cc",
"direct_mixer_source.cc",
"direct_mixer_source.h",
"loopback_handler.cc",
"loopback_handler.h",
"media_pipeline_backend_for_mixer.cc",
"media_pipeline_backend_for_mixer.h",
"stream_mixer.cc",
......
This diff is collapsed.
......@@ -5,9 +5,12 @@
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_FADER_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_FADER_H_
#include <cstdint>
#include <memory>
#include "base/macros.h"
#include "base/time/time.h"
#include "chromecast/public/media/media_pipeline_backend.h"
namespace media {
class AudioBus;
......@@ -24,57 +27,67 @@ namespace media {
// constructor/destructor must be called on the same thread.
class AudioFader {
public:
using RenderingDelay = MediaPipelineBackend::AudioDecoder::RenderingDelay;
// The source of real audio data for the fader.
class Source {
public:
// Fills at most |num_frames| frames of audio into |buffer|, starting at
// |frame_offset|. Returns the actual number of frames of audio that were
// filled (may be less than |num_frames| if the source does not have
// enough data). This method is only called synchronously from within
// a call to FillFrames().
virtual int FillFaderFrames(::media::AudioBus* buffer,
int frame_offset,
int num_frames) = 0;
// Called to get more audio data for playback. The source must fill in
// the |channels| with up to |num_frames| of audio. Note that only planar
// float format is supported. The |rendering_delay| indicates when the
// first frame of the filled data will be played out.
// Note that this method is called on a high priority audio output thread
// and must not block.
// Returns the number of frames filled.
virtual int FillFaderFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channels) = 0;
protected:
virtual ~Source() = default;
};
// |fade_frames| is the number of frames over which a complete fade in/out
// will take place.
AudioFader(Source* source, int num_channels, int fade_frames);
AudioFader(Source* source,
base::TimeDelta fade_time,
int num_channels,
int sample_rate,
double playback_rate);
AudioFader(Source* source,
int fade_frames,
int num_channels,
int sample_rate,
double playback_rate);
~AudioFader();
int buffered_frames() const { return buffered_frames_; }
// Fills |buffer| with up to |num_frames| frames of data, starting at
// |write_offset| within |buffer|, and fading as appropriate to avoid
// pops/clicks. This will call through to the source to get more data. Returns
// the number of frames filled.
int FillFrames(int num_frames, ::media::AudioBus* buffer, int write_offset);
// Fills in |channel_data| with |num_frames| frames of properly faded audio.
// The |rendering_delay| should reflect when the first sample of the filled
// audio is expected to play out.
int FillFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channel_data);
// Returns the total number of frames that will be requested from the source
// (potentially over multiple calls to source_->FillFaderFrames()) if
// FillFrames() is called to fill |num_fill_frames| frames.
int FramesNeededFromSource(int num_fill_frames) const;
// Helper methods to fade in/out an AudioBus. |buffer| contains the data to
// fade; |filled_frames| is the amount of data actually in |buffer| (if the
// buffer was partially filled, this will not be equal to buffer->frames()).
// |write_offset| is the offset within |buffer| to starting writing frames
// to. |fade_frames| is the number of frames over which a complete fade should
// Helper methods to fade in/out a buffer. |channel_data| contains the data to
// fade; |filled_frames| is the amount of data actually in |channel_data|.
// |fade_frames| is the number of frames over which a complete fade should
// happen (ie, how many frames it takes to go from a 1.0 to 0.0 multiplier).
// |fade_frames_remaining| is the number of frames left in the current fade
// (which will be less than |fade_frames| if part of the fade has already
// been completed on a previous buffer).
static void FadeInHelper(::media::AudioBus* buffer,
static void FadeInHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining);
static void FadeOutHelper(::media::AudioBus* buffer,
static void FadeOutHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining);
......@@ -86,22 +99,23 @@ class AudioFader {
kFadingOut,
};
void CompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset);
void IncompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset);
void FadeIn(::media::AudioBus* buffer, int filled_frames, int write_offset);
void FadeOut(::media::AudioBus* buffer, int filled_frames, int write_offset);
int64_t FramesToMicroseconds(int64_t frames);
void CompleteFill(float* const* channel_data, int filled_frames);
void IncompleteFill(float* const* channel_data, int filled_frames);
void FadeIn(float* const* channel_data, int filled_frames);
void FadeOut(float* const* channel_data, int filled_frames);
Source* const source_;
const int fade_frames_;
const int num_channels_;
const int sample_rate_;
const double playback_rate_;
State state_;
State state_ = State::kSilent;
std::unique_ptr<::media::AudioBus> fade_buffer_;
int buffered_frames_;
int fade_frames_remaining_;
int buffered_frames_ = 0;
int fade_frames_remaining_ = 0;
DISALLOW_COPY_AND_ASSIGN(AudioFader);
};
......
......@@ -22,6 +22,10 @@
namespace chromecast {
namespace media {
namespace {
const int kDefaultBufferSize = 2048;
} // namespace
class AudioOutputRedirector::InputImpl : public AudioOutputRedirectorInput {
public:
using RenderingDelay = MediaPipelineBackend::AudioDecoder::RenderingDelay;
......@@ -59,6 +63,10 @@ AudioOutputRedirector::InputImpl::InputImpl(
DCHECK(output_redirector_);
DCHECK(mixer_input_);
temp_buffer_ = ::media::AudioBus::Create(mixer_input_->num_channels(),
kDefaultBufferSize);
temp_buffer_->Zero();
mixer_input_->AddAudioOutputRedirector(this);
}
......@@ -84,16 +92,21 @@ void AudioOutputRedirector::InputImpl::Redirect(::media::AudioBus* const buffer,
buffer->CopyPartialFramesTo(0, num_frames, 0, temp_buffer_.get());
}
const int num_channels = buffer->channels();
float* channels[num_channels];
for (int c = 0; c < num_channels; ++c) {
channels[c] = buffer->channel(c);
}
if (previous_ended_in_silence_) {
if (!redirected) {
// Smoothly fade in from previous silence.
AudioFader::FadeInHelper(temp_buffer_.get(), num_frames, 0, num_frames,
AudioFader::FadeInHelper(channels, num_channels, num_frames, num_frames,
num_frames);
}
} else if (redirected) {
// Smoothly fade out to silence, since output is now being redirected by a
// previous output splitter.
AudioFader::FadeOutHelper(temp_buffer_.get(), num_frames, 0, num_frames,
AudioFader::FadeOutHelper(channels, num_channels, num_frames, num_frames,
num_frames);
}
previous_ended_in_silence_ = redirected;
......@@ -111,6 +124,13 @@ AudioOutputRedirector::AudioOutputRedirector(
channel_data_(config.num_output_channels) {
DCHECK(output_);
DCHECK_GT(config_.num_output_channels, 0);
mixed_ = ::media::AudioBus::Create(config_.num_output_channels,
kDefaultBufferSize);
mixed_->Zero();
for (int c = 0; c < config_.num_output_channels; ++c) {
channel_data_[c] = mixed_->channel(c);
}
}
AudioOutputRedirector::~AudioOutputRedirector() = default;
......
......@@ -7,6 +7,7 @@
#include <string>
#include "base/callback.h"
#include "base/containers/circular_deque.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
......@@ -147,8 +148,13 @@ class BufferingMixerSource : public MixerInput::Source,
bool mixer_error_ = false;
scoped_refptr<DecoderBufferBase> pending_data_;
base::circular_deque<scoped_refptr<DecoderBufferBase>> queue_;
// We let the caller thread free audio buffers since freeing memory can
// be expensive sometimes; we want to avoid potentially long-running
// operations on the mixer thread.
std::vector<scoped_refptr<DecoderBufferBase>> buffers_to_be_freed_;
int queued_frames_ = 0;
RenderingDelay mixer_rendering_delay_;
RenderingDelay last_buffer_delay_;
int extra_delay_frames_ = 0;
int current_buffer_offset_ = 0;
AudioFader fader_;
......@@ -229,13 +235,13 @@ class BufferingMixerSource : public MixerInput::Source,
void FinalizeAudioPlayback() override;
// AudioFader::Source implementation:
int FillFaderFrames(::media::AudioBus* dest,
int frame_offset,
int num_frames) override;
int FillFaderFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channels) override;
RenderingDelay QueueData(scoped_refptr<DecoderBufferBase> data);
void PostPcmCompletion(RenderingDelay delay);
void PostPcmCompletion();
void PostEos();
void PostError(MixerError error);
void PostAudioReadyForPlayback();
......@@ -257,10 +263,17 @@ class BufferingMixerSource : public MixerInput::Source,
const int start_threshold_frames_;
bool audio_ready_for_playback_fired_ = false;
// Only used on the caller thread.
std::vector<scoped_refptr<DecoderBufferBase>> old_buffers_to_be_freed_;
LockedMembers locked_members_;
int remaining_silence_frames_ = 0;
base::RepeatingClosure pcm_completion_task_;
base::RepeatingClosure eos_task_;
base::RepeatingClosure ready_for_playback_task_;
base::WeakPtr<BufferingMixerSource> weak_this_;
base::WeakPtrFactory<BufferingMixerSource> weak_factory_;
......
......@@ -62,6 +62,7 @@ void FilterGroup::Initialize(const AudioPostProcessor2::Config& output_config) {
ResizeBuffers();
// Run a buffer of 0's to initialize rendering delay.
std::fill_n(interleaved_.data(), interleaved_.size(), 0.0f);
delay_seconds_ = post_processing_pipeline_->ProcessFrames(
interleaved_.data(), input_frames_per_write_, last_volume_,
true /* is_silence */);
......@@ -203,6 +204,7 @@ int FilterGroup::GetOutputChannelCount() const {
void FilterGroup::ResizeBuffers() {
mixed_ = ::media::AudioBus::Create(num_channels_, input_frames_per_write_);
mixed_->Zero();
temp_buffers_.clear();
for (MixerInput* input : active_inputs_) {
AddTempBuffer(input->num_channels(), input_frames_per_write_);
......@@ -217,6 +219,7 @@ void FilterGroup::AddTempBuffer(int num_channels, int num_frames) {
if (!temp_buffers_[num_channels]) {
temp_buffers_[num_channels] =
::media::AudioBus::Create(num_channels, num_frames);
temp_buffers_[num_channels]->Zero();
}
}
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/loopback_handler.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/containers/flat_set.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "base/threading/thread.h"
#include "chromecast/public/media/external_audio_pipeline_shlib.h"
namespace chromecast {
namespace media {
namespace {
const int kDefaultBufferSize = 1024 * 2 * sizeof(float);
const int kNumBuffers = 16;
const int kMaxTasks = kNumBuffers + 1;
class LoopbackHandlerImpl : public LoopbackHandler,
public CastMediaShlib::LoopbackAudioObserver {
public:
LoopbackHandlerImpl(scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: external_audio_pipeline_supported_(
ExternalAudioPipelineShlib::IsSupported()),
task_signal_(&lock_) {
CreateBuffersIfNeeded(kDefaultBufferSize);
if (task_runner) {
task_runner_ = std::move(task_runner);
} else {
thread_ = std::make_unique<base::Thread>("CMA loopback");
base::Thread::Options options;
options.priority = base::ThreadPriority::REALTIME_AUDIO;
thread_->StartWithOptions(options);
task_runner_ = thread_->task_runner();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::LoopbackTaskLoop,
base::Unretained(this)));
}
if (external_audio_pipeline_supported_) {
ExternalAudioPipelineShlib::AddExternalLoopbackAudioObserver(this);
}
}
private:
struct Task {
Task(int64_t expected_playback_time,
SampleFormat format,
int sample_rate,
int channels,
uint32_t tag,
std::unique_ptr<uint8_t[]> data,
int length)
: expected_playback_time(expected_playback_time),
format(format),
sample_rate(sample_rate),
channels(channels),
tag(tag),
data(std::move(data)),
length(length) {}
const int64_t expected_playback_time;
const SampleFormat format;
const int sample_rate;
const int channels;
const uint32_t tag;
std::unique_ptr<uint8_t[]> data;
const int length;
};
~LoopbackHandlerImpl() override {
{
base::AutoLock lock(lock_);
stop_thread_ = true;
}
task_signal_.Signal();
if (thread_) {
thread_->Stop();
}
}
// LoopbackHandler implementation:
void Destroy() override {
if (external_audio_pipeline_supported_) {
ExternalAudioPipelineShlib::RemoveExternalLoopbackAudioObserver(this);
} else {
delete this;
}
}
void SetDataSize(int data_size_bytes) override {
CreateBuffersIfNeeded(data_size_bytes);
}
scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() override {
return task_runner_;
}
void AddObserver(CastMediaShlib::LoopbackAudioObserver* observer) override {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::AddObserverOnThread,
base::Unretained(this), observer));
task_signal_.Signal();
}
void RemoveObserver(
CastMediaShlib::LoopbackAudioObserver* observer) override {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::RemoveObserverOnThread,
base::Unretained(this), observer));
task_signal_.Signal();
}
void SendData(int64_t timestamp,
int sample_rate,
int num_channels,
float* data,
int frames) override {
if (external_audio_pipeline_supported_) {
return;
}
SendLoopbackData(timestamp, kSampleFormatF32, sample_rate, num_channels,
reinterpret_cast<uint8_t*>(data),
frames * num_channels * sizeof(float));
}
void SendInterrupt() override {
base::AutoLock lock(lock_);
SendInterruptedLocked();
}
// CastMediaShlib::LoopbackAudioObserver implementation:
void OnLoopbackAudio(int64_t timestamp,
SampleFormat format,
int sample_rate,
int num_channels,
uint8_t* data,
int length) override {
SendLoopbackData(timestamp, format, sample_rate, num_channels, data,
length);
}
void OnLoopbackInterrupted() override { SendInterrupt(); }
void OnRemoved() override {
// We expect that external pipeline will not invoke any other callbacks
// after this one.
delete this;
// No need to pipe this, LoopbackHandlerImpl will let the other observer
// know when it's being removed.
}
void AddObserverOnThread(CastMediaShlib::LoopbackAudioObserver* observer) {
DCHECK(task_runner_->BelongsToCurrentThread());
LOG(INFO) << __func__;
DCHECK(observer);
observers_.insert(observer);
}
void RemoveObserverOnThread(CastMediaShlib::LoopbackAudioObserver* observer) {
DCHECK(task_runner_->BelongsToCurrentThread());
LOG(INFO) << __func__;
DCHECK(observer);
observers_.erase(observer);
observer->OnRemoved();
}
void CreateBuffersIfNeeded(int buffer_size_bytes) {
if (buffer_size_bytes <= buffer_size_) {
return;
}
LOG(INFO) << "Create new buffers, size = " << buffer_size_bytes;
base::AutoLock lock(lock_);
++buffer_tag_;
buffers_.clear();
for (int i = 0; i < kNumBuffers; ++i) {
auto buffer = std::make_unique<uint8_t[]>(buffer_size_bytes);
std::fill_n(buffer.get(), buffer_size_bytes, 0);
buffers_.push_back(std::move(buffer));
}
buffer_size_ = buffer_size_bytes;
tasks_.reserve(kMaxTasks);
new_tasks_.reserve(kMaxTasks);
}
void SendLoopbackData(int64_t timestamp,
SampleFormat format,
int sample_rate,
int num_channels,
uint8_t* data,
int length) {
CreateBuffersIfNeeded(length);
{
base::AutoLock lock(lock_);
if (buffers_.empty() || tasks_.size() >= kNumBuffers) {
LOG(ERROR) << "Can't send loopback data";
SendInterruptedLocked();
return;
}
std::unique_ptr<uint8_t[]> buffer = std::move(buffers_.back());
buffers_.pop_back();
std::copy(data, data + length, buffer.get());
tasks_.emplace_back(timestamp, format, sample_rate, num_channels,
buffer_tag_, std::move(buffer), length);
}
if (thread_) {
task_signal_.Signal();
} else {
HandleLoopbackTask(&tasks_.back());
tasks_.pop_back();
}
}
void SendInterruptedLocked() {
lock_.AssertAcquired();
if (tasks_.size() == kMaxTasks) {
return;
}
tasks_.emplace_back(0, kSampleFormatF32, 0, 0, 0, nullptr, 0);
if (thread_) {
task_signal_.Signal();
} else {
HandleLoopbackTask(&tasks_.back());
tasks_.pop_back();
}
}
void LoopbackTaskLoop() {
DCHECK(task_runner_->BelongsToCurrentThread());
{
base::AutoLock lock(lock_);
if (stop_thread_)
return;
if (tasks_.empty()) {
task_signal_.Wait();
}
new_tasks_.swap(tasks_);
}
for (auto& task : new_tasks_) {
HandleLoopbackTask(&task);
}
new_tasks_.clear();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::LoopbackTaskLoop,
base::Unretained(this)));
}
void HandleLoopbackTask(Task* task) {
if (!task->data) {
for (auto* observer : observers_) {
observer->OnLoopbackInterrupted();
}
return;
}
for (auto* observer : observers_) {
observer->OnLoopbackAudio(task->expected_playback_time, task->format,
task->sample_rate, task->channels,
task->data.get(), task->length);
}
base::AutoLock lock(lock_);
if (task->tag == buffer_tag_) {
// Only return the buffer if the tag matches. Otherwise, the buffer size
// may have changed (so we should just delete the buffer).
buffers_.push_back(std::move(task->data));
}
}
const bool external_audio_pipeline_supported_;
std::unique_ptr<base::Thread> thread_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
int buffer_size_ = 0;
base::Lock lock_;
uint32_t buffer_tag_ GUARDED_BY(lock_) = 0;
std::vector<std::unique_ptr<uint8_t[]>> buffers_ GUARDED_BY(lock_);
bool stop_thread_ GUARDED_BY(lock_);
base::ConditionVariable task_signal_;
std::vector<Task> tasks_;
std::vector<Task> new_tasks_;
base::flat_set<CastMediaShlib::LoopbackAudioObserver*> observers_;
DISALLOW_COPY_AND_ASSIGN(LoopbackHandlerImpl);
};
} // namespace
// static
std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter>
LoopbackHandler::Create(
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
return std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter>(
new LoopbackHandlerImpl(std::move(task_runner)));
}
} // namespace media
} // namespace chromecast
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
#include <cstdint>
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "chromecast/public/cast_media_shlib.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace chromecast {
namespace media {
// Handles loopback audio from the mixer.
class LoopbackHandler {
public:
struct Deleter {
void operator()(LoopbackHandler* obj) { obj->Destroy(); }
};
static std::unique_ptr<LoopbackHandler, Deleter> Create(
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
virtual void Destroy() = 0;
virtual void SetDataSize(int data_size_bytes) = 0;
virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
virtual void AddObserver(CastMediaShlib::LoopbackAudioObserver* observer) = 0;
virtual void RemoveObserver(
CastMediaShlib::LoopbackAudioObserver* observer) = 0;
virtual void SendData(int64_t timestamp,
int sample_rate,
int num_channels,
float* data,
int frames) = 0;
virtual void SendInterrupt() = 0;
protected:
virtual ~LoopbackHandler() = default;
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
......@@ -161,6 +161,10 @@ int MixerInput::FillAudioData(int num_frames,
redirected = true;
}
float* channels[num_channels_];
for (int c = 0; c < num_channels_; ++c) {
channels[c] = dest->channel(c);
}
if (first_buffer_ && redirected) {
// If the first buffer is redirected, don't provide any data to the mixer
// (we want to avoid a 'blip' of sound from the first buffer if it is being
......@@ -173,11 +177,11 @@ int MixerInput::FillAudioData(int num_frames,
filled = 0;
} else {
// Smoothly fade in from previous silence.
AudioFader::FadeInHelper(dest, filled, 0, filled, filled);
AudioFader::FadeInHelper(channels, num_channels_, filled, filled, filled);
}
} else if (redirected) {
// Smoothly fade out to silence, since output is now being redirected.
AudioFader::FadeOutHelper(dest, filled, 0, filled, filled);
AudioFader::FadeOutHelper(channels, num_channels_, filled, filled, filled);
}
previous_ended_in_silence_ = redirected;
first_buffer_ = false;
......
......@@ -12,6 +12,7 @@
#include <utility>
#include <vector>
#include "base/callback.h"
#include "base/containers/flat_map.h"
#include "base/containers/flat_set.h"
#include "base/macros.h"
......@@ -20,6 +21,7 @@
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "chromecast/media/cma/backend/loopback_handler.h"
#include "chromecast/media/cma/backend/mixer_input.h"
#include "chromecast/media/cma/backend/mixer_pipeline.h"
#include "chromecast/public/cast_media_shlib.h"
......@@ -124,7 +126,6 @@ class StreamMixer {
int num_output_channels() const { return num_output_channels_; }
private:
class ExternalLoopbackAudioObserver;
class BaseExternalMediaVolumeChangeRequestObserver
: public ExternalAudioPipelineShlib::
ExternalMediaVolumeChangeRequestObserver {
......@@ -160,30 +161,15 @@ class StreamMixer {
void RemoveInputOnThread(MixerInput::Source* input_source);
void SetCloseTimeout();
void UpdatePlayoutChannel();
void UpdateLoopbackChannelCount();
void PlaybackLoop();
void WriteOneBuffer();
void WriteMixedPcm(int frames, int64_t expected_playback_time);
void MixToMono(float* data, int frames, int channels);
void RemoveLoopbackAudioObserverOnThread(
CastMediaShlib::LoopbackAudioObserver* observer);
void RemoveAudioOutputRedirectorOnThread(AudioOutputRedirector* redirector);
void PostLoopbackData(int64_t expected_playback_time,
SampleFormat sample_format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length);
void PostLoopbackInterrupted();
void SendLoopbackData(int64_t expected_playback_time,
SampleFormat sample_format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length);
int GetSampleRateForDeviceId(const std::string& device);
MediaPipelineBackend::AudioDecoder::RenderingDelay GetTotalRenderingDelay(
......@@ -195,8 +181,7 @@ class StreamMixer {
std::unique_ptr<MixerPipeline> mixer_pipeline_;
std::unique_ptr<base::Thread> mixer_thread_;
scoped_refptr<base::SingleThreadTaskRunner> mixer_task_runner_;
std::unique_ptr<base::Thread> loopback_thread_;
scoped_refptr<base::SingleThreadTaskRunner> loopback_task_runner_;
std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter> loopback_handler_;
std::unique_ptr<ThreadHealthChecker> health_checker_;
void OnHealthCheckFailed();
......@@ -216,16 +201,16 @@ class StreamMixer {
int frames_per_write_ = 0;
int redirector_samples_per_second_ = 0;
int redirector_frames_per_write_ = 0;
int loopback_channel_count_ = 0;
State state_;
base::TimeTicks close_timestamp_;
base::RepeatingClosure playback_loop_task_;
base::flat_map<MixerInput::Source*, std::unique_ptr<MixerInput>> inputs_;
base::flat_map<MixerInput::Source*, std::unique_ptr<MixerInput>>
ignored_inputs_;
base::flat_set<CastMediaShlib::LoopbackAudioObserver*> loopback_observers_;
base::flat_map<AudioContentType, VolumeInfo> volume_info_;
base::flat_map<AudioOutputRedirector*, std::unique_ptr<AudioOutputRedirector>>
......@@ -234,8 +219,6 @@ class StreamMixer {
const bool external_audio_pipeline_supported_;
std::unique_ptr<BaseExternalMediaVolumeChangeRequestObserver>
external_volume_observer_;
std::unique_ptr<ExternalLoopbackAudioObserver>
external_loopback_audio_observer_;
base::WeakPtrFactory<StreamMixer> weak_factory_;
......
......@@ -89,14 +89,9 @@ class ExternalAudioPipelineTest : public ::testing::Test {
}
// Run async operations in the stream mixer.
void RunLoopForMixer() {
// SendLoopbackData.
base::RunLoop run_loop1;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop1.QuitClosure());
run_loop1.Run();
// Playbackloop.
base::RunLoop run_loop2;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop2.QuitClosure());
run_loop2.Run();
base::RunLoop run_loop;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
run_loop.Run();
}
protected:
......@@ -201,11 +196,10 @@ TEST_F(ExternalAudioPipelineTest, ExternalAudioPipelineLoopbackData) {
mixer_->AddLoopbackAudioObserver(&mock_loopback_observer);
mixer_->AddInput(&input);
RunLoopForMixer();
// Send data to the stream mixer.
input.SetData(std::move(data));
RunLoopForMixer();
// Get actual data from our mocked loopback observer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment