Commit 3a91a83a authored by Ken MacKay's avatar Ken MacKay Committed by Commit Bot

[Chromecast] Reduce memory allocation/free on mixer thread

Bug: internal b/137439407
Change-Id: Ia316552cfd0d884f1c55ce32603b426778f08cae
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1715765Reviewed-by: default avatarSergey Volk <servolk@chromium.org>
Commit-Queue: Kenneth MacKay <kmackay@chromium.org>
Cr-Commit-Position: refs/heads/master@{#680584}
parent fbee8107
......@@ -121,6 +121,7 @@ cast_source_set("audio_helpers") {
deps = [
"//base",
"//chromecast/public/media",
"//media",
]
}
......@@ -240,6 +241,8 @@ cast_source_set("for_mixer_audio") {
"cast_media_shlib_mixer_audio.cc",
"direct_mixer_source.cc",
"direct_mixer_source.h",
"loopback_handler.cc",
"loopback_handler.h",
"media_pipeline_backend_for_mixer.cc",
"media_pipeline_backend_for_mixer.h",
"stream_mixer.cc",
......
......@@ -4,7 +4,7 @@
#include "chromecast/media/cma/backend/audio_fader.h"
#include <string.h>
#include <algorithm>
#include "base/logging.h"
#include "media/base/audio_bus.h"
......@@ -12,15 +12,35 @@
namespace chromecast {
namespace media {
AudioFader::AudioFader(Source* source, int num_channels, int fade_frames)
AudioFader::AudioFader(Source* source,
base::TimeDelta fade_time,
int num_channels,
int sample_rate,
double playback_rate)
: AudioFader(
source,
std::round(fade_time.InSecondsF() * sample_rate * playback_rate),
num_channels,
sample_rate,
playback_rate) {}
AudioFader::AudioFader(Source* source,
int fade_frames,
int num_channels,
int sample_rate,
double playback_rate)
: source_(source),
fade_frames_(fade_frames),
state_(State::kSilent),
fade_buffer_(::media::AudioBus::Create(num_channels, fade_frames)),
buffered_frames_(0),
fade_frames_remaining_(0) {
num_channels_(num_channels),
sample_rate_(sample_rate),
playback_rate_(playback_rate) {
DCHECK(source_);
DCHECK_GE(fade_frames_, 0);
DCHECK_GT(fade_frames_, 0);
DCHECK_GT(num_channels_, 0);
DCHECK_GT(sample_rate_, 0);
fade_buffer_ = ::media::AudioBus::Create(num_channels, fade_frames_);
fade_buffer_->Zero();
}
AudioFader::~AudioFader() = default;
......@@ -31,49 +51,65 @@ int AudioFader::FramesNeededFromSource(int num_fill_frames) const {
return num_fill_frames + fade_frames_ - buffered_frames_;
}
int64_t AudioFader::FramesToMicroseconds(int64_t frames) {
return frames * base::Time::kMicrosecondsPerSecond /
(sample_rate_ * playback_rate_);
}
int AudioFader::FillFrames(int num_frames,
::media::AudioBus* buffer,
int write_offset) {
DCHECK(buffer);
DCHECK_EQ(buffer->channels(), fade_buffer_->channels());
DCHECK_LE(write_offset + num_frames, buffer->frames());
RenderingDelay rendering_delay,
float* const* channel_data) {
DCHECK(channel_data);
// First, copy data from buffered_frames_.
int filled_frames = std::min(buffered_frames_, num_frames);
fade_buffer_->CopyPartialFramesTo(0, filled_frames, write_offset, buffer);
if (filled_frames > 0) {
for (int c = 0; c < num_channels_; ++c) {
float* fade_channel = fade_buffer_->channel(c);
// First, copy data from buffered_frames_.
std::copy_n(fade_channel, filled_frames, channel_data[c]);
// Move data in fade_buffer_ to start.
std::copy(fade_channel + filled_frames, fade_channel + buffered_frames_,
fade_channel);
}
buffered_frames_ -= filled_frames;
num_frames -= filled_frames;
// Move data in fade_buffer_ to start.
for (int c = 0; c < fade_buffer_->channels(); ++c) {
float* channel_data = fade_buffer_->channel(c);
memmove(channel_data, channel_data + filled_frames,
buffered_frames_ * sizeof(float));
}
float* fill_channel_data[num_channels_];
if (num_frames > 0) {
// Still need more frames; ask source to fill.
int extra_fill = source_->FillFaderFrames(
buffer, filled_frames + write_offset, num_frames);
filled_frames += extra_fill;
num_frames -= extra_fill;
for (int c = 0; c < num_channels_; ++c) {
fill_channel_data[c] = channel_data[c] + filled_frames;
}
RenderingDelay delay = rendering_delay;
delay.delay_microseconds +=
FramesToMicroseconds(filled_frames + buffered_frames_);
int filled = source_->FillFaderFrames(num_frames, delay, fill_channel_data);
filled_frames += filled;
num_frames -= filled;
}
// Refill fade_buffer_ from source.
buffered_frames_ += source_->FillFaderFrames(
fade_buffer_.get(), buffered_frames_, fade_frames_ - buffered_frames_);
for (int c = 0; c < num_channels_; ++c) {
fill_channel_data[c] = fade_buffer_->channel(c) + buffered_frames_;
}
RenderingDelay delay = rendering_delay;
delay.delay_microseconds +=
FramesToMicroseconds(filled_frames + buffered_frames_);
buffered_frames_ += source_->FillFaderFrames(fade_frames_ - buffered_frames_,
delay, fill_channel_data);
const bool complete = (num_frames == 0 && buffered_frames_ == fade_frames_);
if (complete) {
CompleteFill(buffer, filled_frames, write_offset);
CompleteFill(channel_data, filled_frames);
} else {
IncompleteFill(buffer, filled_frames, write_offset);
IncompleteFill(channel_data, filled_frames);
}
return filled_frames;
}
void AudioFader::CompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset) {
void AudioFader::CompleteFill(float* const* channel_data, int filled_frames) {
switch (state_) {
case State::kSilent:
// Fade in.
......@@ -93,17 +129,17 @@ void AudioFader::CompleteFill(::media::AudioBus* buffer,
std::max(0, fade_frames_ - fade_frames_remaining_ - 1);
break;
}
FadeIn(buffer, filled_frames, write_offset);
FadeIn(channel_data, filled_frames);
}
void AudioFader::IncompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset) {
void AudioFader::IncompleteFill(float* const* channel_data, int filled_frames) {
switch (state_) {
case State::kSilent:
// Remain silent.
buffered_frames_ = 0;
buffer->ZeroFramesPartial(write_offset, filled_frames);
for (int c = 0; c < num_channels_; ++c) {
std::fill_n(channel_data[c], filled_frames, 0);
}
return;
case State::kFadingIn:
// Fade back out.
......@@ -120,79 +156,75 @@ void AudioFader::IncompleteFill(::media::AudioBus* buffer,
// Continue fading out.
break;
}
FadeOut(buffer, filled_frames, write_offset);
FadeOut(channel_data, filled_frames);
}
// static
void AudioFader::FadeInHelper(::media::AudioBus* buffer,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining) {
const float inverse_fade_frames = 1.0f / static_cast<float>(fade_frames);
const int fade_limit = std::min(filled_frames, fade_frames_remaining + 1);
void AudioFader::FadeIn(float* const* channel_data, int filled_frames) {
DCHECK(state_ == State::kFadingIn);
DCHECK_LE(write_offset + fade_limit, buffer->frames());
for (int c = 0; c < buffer->channels(); ++c) {
float* channel_data = buffer->channel(c);
for (int f = write_offset; f < (write_offset + fade_limit); ++f) {
const float fade_multiplier =
1.0 - (fade_frames_remaining - f) * inverse_fade_frames;
channel_data[f] *= fade_multiplier;
}
FadeInHelper(channel_data, num_channels_, filled_frames, fade_frames_,
fade_frames_remaining_);
fade_frames_remaining_ = std::max(0, fade_frames_remaining_ - filled_frames);
if (fade_frames_remaining_ == 0) {
state_ = State::kPlaying;
}
}
// static
void AudioFader::FadeOutHelper(::media::AudioBus* buffer,
void AudioFader::FadeInHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining) {
const float inverse_fade_frames = 1.0f / static_cast<float>(fade_frames);
const int fade_limit = std::min(filled_frames, fade_frames_remaining + 1);
DCHECK_LE(write_offset + fade_limit, buffer->frames());
for (int c = 0; c < buffer->channels(); ++c) {
float* channel_data = buffer->channel(c);
for (int f = write_offset; f < (write_offset + fade_limit); ++f) {
for (int c = 0; c < num_channels; ++c) {
float* channel = channel_data[c];
for (int f = 0; f < fade_limit; ++f) {
const float fade_multiplier =
(fade_frames_remaining - f) * inverse_fade_frames;
channel_data[f] *= fade_multiplier;
}
1.0 - (fade_frames_remaining - f) * inverse_fade_frames;
channel[f] *= fade_multiplier;
}
if (filled_frames > fade_frames_remaining) {
buffer->ZeroFramesPartial(write_offset + fade_frames_remaining,
filled_frames - fade_frames_remaining);
}
}
void AudioFader::FadeIn(::media::AudioBus* buffer,
int filled_frames,
int write_offset) {
DCHECK(state_ == State::kFadingIn);
void AudioFader::FadeOut(float* const* channel_data, int filled_frames) {
DCHECK(state_ == State::kFadingOut);
FadeInHelper(buffer, filled_frames, write_offset, fade_frames_,
FadeOutHelper(channel_data, num_channels_, filled_frames, fade_frames_,
fade_frames_remaining_);
fade_frames_remaining_ = std::max(0, fade_frames_remaining_ - filled_frames);
if (fade_frames_remaining_ == 0) {
state_ = State::kPlaying;
state_ = State::kSilent;
buffered_frames_ = 0;
}
}
void AudioFader::FadeOut(::media::AudioBus* buffer,
// static
void AudioFader::FadeOutHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset) {
DCHECK(state_ == State::kFadingOut);
FadeOutHelper(buffer, filled_frames, write_offset, fade_frames_,
fade_frames_remaining_);
fade_frames_remaining_ = std::max(0, fade_frames_remaining_ - filled_frames);
int fade_frames,
int fade_frames_remaining) {
const float inverse_fade_frames = 1.0f / static_cast<float>(fade_frames);
const int fade_limit = std::min(filled_frames, fade_frames_remaining + 1);
if (fade_frames_remaining_ == 0) {
state_ = State::kSilent;
buffered_frames_ = 0;
for (int c = 0; c < num_channels; ++c) {
float* channel = channel_data[c];
for (int f = 0; f < fade_limit; ++f) {
const float fade_multiplier =
(fade_frames_remaining - f) * inverse_fade_frames;
channel[f] *= fade_multiplier;
}
}
if (filled_frames > fade_frames_remaining) {
for (int c = 0; c < num_channels; ++c) {
std::fill_n(channel_data[c] + fade_frames_remaining,
filled_frames - fade_frames_remaining, 0);
}
}
}
......
......@@ -5,9 +5,12 @@
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_FADER_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_FADER_H_
#include <cstdint>
#include <memory>
#include "base/macros.h"
#include "base/time/time.h"
#include "chromecast/public/media/media_pipeline_backend.h"
namespace media {
class AudioBus;
......@@ -24,57 +27,67 @@ namespace media {
// constructor/destructor must be called on the same thread.
class AudioFader {
public:
using RenderingDelay = MediaPipelineBackend::AudioDecoder::RenderingDelay;
// The source of real audio data for the fader.
class Source {
public:
// Fills at most |num_frames| frames of audio into |buffer|, starting at
// |frame_offset|. Returns the actual number of frames of audio that were
// filled (may be less than |num_frames| if the source does not have
// enough data). This method is only called synchronously from within
// a call to FillFrames().
virtual int FillFaderFrames(::media::AudioBus* buffer,
int frame_offset,
int num_frames) = 0;
// Called to get more audio data for playback. The source must fill in
// the |channels| with up to |num_frames| of audio. Note that only planar
// float format is supported. The |rendering_delay| indicates when the
// first frame of the filled data will be played out.
// Note that this method is called on a high priority audio output thread
// and must not block.
// Returns the number of frames filled.
virtual int FillFaderFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channels) = 0;
protected:
virtual ~Source() = default;
};
// |fade_frames| is the number of frames over which a complete fade in/out
// will take place.
AudioFader(Source* source, int num_channels, int fade_frames);
AudioFader(Source* source,
base::TimeDelta fade_time,
int num_channels,
int sample_rate,
double playback_rate);
AudioFader(Source* source,
int fade_frames,
int num_channels,
int sample_rate,
double playback_rate);
~AudioFader();
int buffered_frames() const { return buffered_frames_; }
// Fills |buffer| with up to |num_frames| frames of data, starting at
// |write_offset| within |buffer|, and fading as appropriate to avoid
// pops/clicks. This will call through to the source to get more data. Returns
// the number of frames filled.
int FillFrames(int num_frames, ::media::AudioBus* buffer, int write_offset);
// Fills in |channel_data| with |num_frames| frames of properly faded audio.
// The |rendering_delay| should reflect when the first sample of the filled
// audio is expected to play out.
int FillFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channel_data);
// Returns the total number of frames that will be requested from the source
// (potentially over multiple calls to source_->FillFaderFrames()) if
// FillFrames() is called to fill |num_fill_frames| frames.
int FramesNeededFromSource(int num_fill_frames) const;
// Helper methods to fade in/out an AudioBus. |buffer| contains the data to
// fade; |filled_frames| is the amount of data actually in |buffer| (if the
// buffer was partially filled, this will not be equal to buffer->frames()).
// |write_offset| is the offset within |buffer| to starting writing frames
// to. |fade_frames| is the number of frames over which a complete fade should
// Helper methods to fade in/out a buffer. |channel_data| contains the data to
// fade; |filled_frames| is the amount of data actually in |channel_data|.
// |fade_frames| is the number of frames over which a complete fade should
// happen (ie, how many frames it takes to go from a 1.0 to 0.0 multiplier).
// |fade_frames_remaining| is the number of frames left in the current fade
// (which will be less than |fade_frames| if part of the fade has already
// been completed on a previous buffer).
static void FadeInHelper(::media::AudioBus* buffer,
static void FadeInHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining);
static void FadeOutHelper(::media::AudioBus* buffer,
static void FadeOutHelper(float* const* channel_data,
int num_channels,
int filled_frames,
int write_offset,
int fade_frames,
int fade_frames_remaining);
......@@ -86,22 +99,23 @@ class AudioFader {
kFadingOut,
};
void CompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset);
void IncompleteFill(::media::AudioBus* buffer,
int filled_frames,
int write_offset);
void FadeIn(::media::AudioBus* buffer, int filled_frames, int write_offset);
void FadeOut(::media::AudioBus* buffer, int filled_frames, int write_offset);
int64_t FramesToMicroseconds(int64_t frames);
void CompleteFill(float* const* channel_data, int filled_frames);
void IncompleteFill(float* const* channel_data, int filled_frames);
void FadeIn(float* const* channel_data, int filled_frames);
void FadeOut(float* const* channel_data, int filled_frames);
Source* const source_;
const int fade_frames_;
const int num_channels_;
const int sample_rate_;
const double playback_rate_;
State state_;
State state_ = State::kSilent;
std::unique_ptr<::media::AudioBus> fade_buffer_;
int buffered_frames_;
int fade_frames_remaining_;
int buffered_frames_ = 0;
int fade_frames_remaining_ = 0;
DISALLOW_COPY_AND_ASSIGN(AudioFader);
};
......
......@@ -21,6 +21,7 @@ namespace {
const int kNumChannels = 2;
const int kFadeFrames = 128;
const int kSampleRate = 48000;
std::unique_ptr<::media::AudioBus> CreateAudioBus(int num_frames) {
auto buffer = ::media::AudioBus::Create(kNumChannels, num_frames);
......@@ -41,18 +42,17 @@ class TestFaderSource : public AudioFader::Source {
last_filled_frames_(0) {}
// AudioFader::Source implementation:
int FillFaderFrames(::media::AudioBus* buffer,
int frame_offset,
int num_frames) override {
int FillFaderFrames(int num_frames,
AudioFader::RenderingDelay rendering_delay,
float* const* channels) override {
last_requested_frames_ = num_frames;
total_requested_frames_ += num_frames;
int count = std::min(num_frames, max_fill_frames_);
last_filled_frames_ = count;
for (int c = 0; c < buffer->channels(); ++c) {
float* channel_data = buffer->channel(c) + frame_offset;
std::fill_n(channel_data, count, 1.0f);
for (int c = 0; c < kNumChannels; ++c) {
std::fill_n(channels[c], count, 1.0f);
}
return count;
......@@ -77,7 +77,7 @@ class TestFaderSource : public AudioFader::Source {
TEST(AudioFaderTest, Startup) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -88,9 +88,12 @@ TEST(AudioFaderTest, Startup) {
EXPECT_EQ(frames_needed, kFadeFrames + kFillSize);
auto dest = CreateAudioBus(kFillSize);
// TODO(almasrymina): need to add unittests for cases where write_offset is
// non-zero.
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Test that FramesNeededFromSource() works correctly.
EXPECT_EQ(source.total_requested_frames(), frames_needed);
......@@ -105,7 +108,7 @@ TEST(AudioFaderTest, Startup) {
TEST(AudioFaderTest, FadeInOver2Buffers) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -113,7 +116,12 @@ TEST(AudioFaderTest, FadeInOver2Buffers) {
const int kFillSize = kFadeFrames * 2 / 3;
int frames_needed = fader.FramesNeededFromSource(kFillSize);
auto dest = CreateAudioBus(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Fader's internal buffer should be full.
EXPECT_EQ(fader.buffered_frames(), kFadeFrames);
......@@ -124,7 +132,8 @@ TEST(AudioFaderTest, FadeInOver2Buffers) {
// Fill more data.
frames_needed += fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
EXPECT_EQ(fader.buffered_frames(), kFadeFrames);
// Test that FramesNeededFromSource() works correctly.
......@@ -139,7 +148,7 @@ TEST(AudioFaderTest, FadeInOver2Buffers) {
TEST(AudioFaderTest, ContinuePlaying) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -148,14 +157,20 @@ TEST(AudioFaderTest, ContinuePlaying) {
auto dest = CreateAudioBus(kFillSize);
int frames_needed = fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Data should be faded in.
EXPECT_EQ(dest->channel(0)[kFadeFrames], 1.0f);
// Now request more data. Data should remain fully faded in.
frames_needed += fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
EXPECT_EQ(dest->channel(0)[0], 1.0f);
// Test that FramesNeededFromSource() works correctly.
......@@ -167,7 +182,7 @@ TEST(AudioFaderTest, ContinuePlaying) {
TEST(AudioFaderTest, FadeOut) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -176,21 +191,28 @@ TEST(AudioFaderTest, FadeOut) {
auto dest = CreateAudioBus(kFillSize);
int frames_needed = fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Data should be faded in.
EXPECT_EQ(dest->channel(0)[kFadeFrames], 1.0f);
// Now request more data. Data should remain fully faded in.
frames_needed += fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
EXPECT_EQ(dest->channel(0)[0], 1.0f);
// Now make the source not provide enough data.
EXPECT_GT(fader.FramesNeededFromSource(kFillSize), 0);
source.set_max_fill_frames(0);
frames_needed += fader.FramesNeededFromSource(kFillSize);
int filled = fader.FillFrames(kFillSize, dest.get(), 0);
int filled =
fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels);
EXPECT_EQ(filled, kFadeFrames);
// Test that FramesNeededFromSource() works correctly.
......@@ -207,7 +229,7 @@ TEST(AudioFaderTest, FadeOut) {
TEST(AudioFaderTest, FadeOutPartially) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -216,21 +238,28 @@ TEST(AudioFaderTest, FadeOutPartially) {
auto dest = CreateAudioBus(kFillSize);
int frames_needed = fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Data should be faded in.
EXPECT_EQ(dest->channel(0)[kFadeFrames], 1.0f);
// Now request more data. Data should remain fully faded in.
frames_needed += fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
EXPECT_EQ(dest->channel(0)[0], 1.0f);
// Now make the source not provide enough data.
EXPECT_GT(fader.FramesNeededFromSource(kFillSize), 0);
source.set_max_fill_frames(0);
frames_needed += fader.FramesNeededFromSource(kFadeFrames / 3);
int filled = fader.FillFrames(kFadeFrames / 3, dest.get(), 0);
int filled =
fader.FillFrames(kFadeFrames / 3, AudioFader::RenderingDelay(), channels);
EXPECT_EQ(filled, kFadeFrames / 3);
// Data should be partially faded out.
......@@ -245,7 +274,8 @@ TEST(AudioFaderTest, FadeOutPartially) {
// Now let the source provide data again.
source.set_max_fill_frames(std::numeric_limits<int>::max());
frames_needed += fader.FramesNeededFromSource(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Data should fade back in from the point it faded out to.
EXPECT_GE(dest->channel(0)[0], fade_min);
EXPECT_EQ(dest->channel(0)[kFillSize - 1], 1.0f);
......@@ -259,7 +289,7 @@ TEST(AudioFaderTest, FadeOutPartially) {
TEST(AudioFaderTest, IncompleteFadeIn) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -271,7 +301,12 @@ TEST(AudioFaderTest, IncompleteFadeIn) {
// from silence, the fader should output silence.
auto dest = CreateAudioBus(kFillSize);
source.set_max_fill_frames(10);
int filled = fader.FillFrames(kFillSize, dest.get(), 0);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
int filled =
fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels);
// Test that FramesNeededFromSource() works correctly.
EXPECT_EQ(source.total_requested_frames(), frames_needed);
......@@ -287,7 +322,7 @@ TEST(AudioFaderTest, IncompleteFadeIn) {
TEST(AudioFaderTest, FadeInPartially) {
TestFaderSource source;
AudioFader fader(&source, kNumChannels, kFadeFrames);
AudioFader fader(&source, kFadeFrames, kNumChannels, kSampleRate, 1.0);
// Fader has no buffered frames initially.
EXPECT_EQ(fader.buffered_frames(), 0);
......@@ -296,7 +331,12 @@ TEST(AudioFaderTest, FadeInPartially) {
int frames_needed = fader.FramesNeededFromSource(kFillSize);
auto dest = CreateAudioBus(kFillSize);
EXPECT_EQ(fader.FillFrames(kFillSize, dest.get(), 0), kFillSize);
float* channels[kNumChannels];
for (int c = 0; c < kNumChannels; ++c) {
channels[c] = dest->channel(c);
}
EXPECT_EQ(fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels),
kFillSize);
// Fader's internal buffer should be full.
EXPECT_EQ(fader.buffered_frames(), kFadeFrames);
......@@ -311,7 +351,8 @@ TEST(AudioFaderTest, FadeInPartially) {
// back out to silence.
source.set_max_fill_frames(0);
frames_needed += fader.FramesNeededFromSource(kFillSize);
int filled = fader.FillFrames(kFillSize, dest.get(), 0);
int filled =
fader.FillFrames(kFillSize, AudioFader::RenderingDelay(), channels);
// Data should be faded out.
EXPECT_LE(dest->channel(0)[0], fade_max);
......
......@@ -22,6 +22,10 @@
namespace chromecast {
namespace media {
namespace {
const int kDefaultBufferSize = 2048;
} // namespace
class AudioOutputRedirector::InputImpl : public AudioOutputRedirectorInput {
public:
using RenderingDelay = MediaPipelineBackend::AudioDecoder::RenderingDelay;
......@@ -59,6 +63,10 @@ AudioOutputRedirector::InputImpl::InputImpl(
DCHECK(output_redirector_);
DCHECK(mixer_input_);
temp_buffer_ = ::media::AudioBus::Create(mixer_input_->num_channels(),
kDefaultBufferSize);
temp_buffer_->Zero();
mixer_input_->AddAudioOutputRedirector(this);
}
......@@ -84,16 +92,21 @@ void AudioOutputRedirector::InputImpl::Redirect(::media::AudioBus* const buffer,
buffer->CopyPartialFramesTo(0, num_frames, 0, temp_buffer_.get());
}
const int num_channels = buffer->channels();
float* channels[num_channels];
for (int c = 0; c < num_channels; ++c) {
channels[c] = buffer->channel(c);
}
if (previous_ended_in_silence_) {
if (!redirected) {
// Smoothly fade in from previous silence.
AudioFader::FadeInHelper(temp_buffer_.get(), num_frames, 0, num_frames,
AudioFader::FadeInHelper(channels, num_channels, num_frames, num_frames,
num_frames);
}
} else if (redirected) {
// Smoothly fade out to silence, since output is now being redirected by a
// previous output splitter.
AudioFader::FadeOutHelper(temp_buffer_.get(), num_frames, 0, num_frames,
AudioFader::FadeOutHelper(channels, num_channels, num_frames, num_frames,
num_frames);
}
previous_ended_in_silence_ = redirected;
......@@ -111,6 +124,13 @@ AudioOutputRedirector::AudioOutputRedirector(
channel_data_(config.num_output_channels) {
DCHECK(output_);
DCHECK_GT(config_.num_output_channels, 0);
mixed_ = ::media::AudioBus::Create(config_.num_output_channels,
kDefaultBufferSize);
mixed_->Zero();
for (int c = 0; c < config_.num_output_channels; ++c) {
channel_data_[c] = mixed_->channel(c);
}
}
AudioOutputRedirector::~AudioOutputRedirector() = default;
......
......@@ -26,11 +26,6 @@
#include "media/base/audio_timestamp_helper.h"
#include "media/base/decoder_buffer.h"
#define POST_TASK_TO_CALLER_THREAD(task, ...) \
caller_task_runner_->PostTask( \
FROM_HERE, \
base::BindOnce(&BufferingMixerSource::task, weak_this_, ##__VA_ARGS__));
namespace chromecast {
namespace media {
......@@ -38,7 +33,7 @@ namespace {
const int kNumOutputChannels = 2;
const int64_t kDefaultInputQueueMs = 90;
const int kFadeTimeMs = 5;
constexpr base::TimeDelta kFadeTime = base::TimeDelta::FromMilliseconds(5);
const int kDefaultAudioReadyForPlaybackThresholdMs = 70;
// Special queue size and start threshold for "communications" streams to avoid
......@@ -46,6 +41,8 @@ const int kDefaultAudioReadyForPlaybackThresholdMs = 70;
const int64_t kCommsInputQueueMs = 200;
const int64_t kCommsStartThresholdMs = 150;
const int kFreeBufferListSize = 64;
std::string AudioContentTypeToString(media::AudioContentType type) {
switch (type) {
case media::AudioContentType::kAlarm:
......@@ -110,10 +107,14 @@ BufferingMixerSource::LockedMembers::Members::Members(
int64_t playback_start_timestamp,
int64_t playback_start_pts)
: fader_(source,
kFadeTime,
num_channels,
MsToSamples(kFadeTimeMs, input_samples_per_second)),
input_samples_per_second,
1.0 /* playback_rate */),
playback_start_timestamp_(playback_start_timestamp),
playback_start_pts_(playback_start_pts) {}
playback_start_pts_(playback_start_pts) {
buffers_to_be_freed_.reserve(kFreeBufferListSize);
}
BufferingMixerSource::LockedMembers::Members::~Members() = default;
......@@ -191,7 +192,15 @@ BufferingMixerSource::BufferingMixerSource(Delegate* delegate,
DCHECK(delegate_);
DCHECK(mixer_);
DCHECK_LE(start_threshold_frames_, max_queued_frames_);
weak_this_ = weak_factory_.GetWeakPtr();
old_buffers_to_be_freed_.reserve(kFreeBufferListSize);
pcm_completion_task_ =
base::BindRepeating(&BufferingMixerSource::PostPcmCompletion, weak_this_);
eos_task_ = base::BindRepeating(&BufferingMixerSource::PostEos, weak_this_);
ready_for_playback_task_ = base::BindRepeating(
&BufferingMixerSource::PostAudioReadyForPlayback, weak_this_);
mixer_->AddInput(this);
}
......@@ -289,6 +298,8 @@ int BufferingMixerSource::playout_channel() {
void BufferingMixerSource::WritePcm(scoped_refptr<DecoderBufferBase> data) {
DCHECK(caller_task_runner_->BelongsToCurrentThread());
RenderingDelay delay;
{
auto locked = locked_members_.Lock();
if (locked->state_ == State::kUninitialized ||
locked->queued_frames_ + locked->fader_.buffered_frames() >=
......@@ -297,8 +308,11 @@ void BufferingMixerSource::WritePcm(scoped_refptr<DecoderBufferBase> data) {
locked->pending_data_ = std::move(data);
return;
}
RenderingDelay delay = QueueData(std::move(data));
PostPcmCompletion(delay);
old_buffers_to_be_freed_.swap(locked->buffers_to_be_freed_);
delay = QueueData(std::move(data));
}
old_buffers_to_be_freed_.clear();
delegate_->OnWritePcmCompletion(delay);
}
BufferingMixerSource::RenderingDelay BufferingMixerSource::QueueData(
......@@ -308,7 +322,7 @@ BufferingMixerSource::RenderingDelay BufferingMixerSource::QueueData(
LOG(INFO) << "End of stream for " << device_id_ << " (" << this << ")";
locked->state_ = State::kGotEos;
if (!locked->started_ && locked->playback_start_timestamp_ != INT64_MIN) {
POST_TASK_TO_CALLER_THREAD(PostAudioReadyForPlayback);
caller_task_runner_->PostTask(FROM_HERE, ready_for_playback_task_);
}
} else if (locked->started_ ||
data->timestamp() +
......@@ -323,7 +337,7 @@ BufferingMixerSource::RenderingDelay BufferingMixerSource::QueueData(
if (!locked->started_ &&
locked->queued_frames_ >= start_threshold_frames_ &&
locked->playback_start_timestamp_ != INT64_MIN) {
POST_TASK_TO_CALLER_THREAD(PostAudioReadyForPlayback);
caller_task_runner_->PostTask(FROM_HERE, ready_for_playback_task_);
}
}
// Otherwise, drop |data| since it is before the start PTS.
......@@ -359,7 +373,6 @@ void BufferingMixerSource::InitializeAudioPlayback(
RenderingDelay initial_rendering_delay) {
// Start accepting buffers into the queue.
bool queued_data = false;
RenderingDelay pending_buffer_delay;
{
auto locked = locked_members_.Lock();
locked->mixer_rendering_delay_ = initial_rendering_delay;
......@@ -372,13 +385,13 @@ void BufferingMixerSource::InitializeAudioPlayback(
if (locked->pending_data_ &&
locked->queued_frames_ + locked->fader_.buffered_frames() <
max_queued_frames_) {
pending_buffer_delay = QueueData(std::move(locked->pending_data_));
locked->last_buffer_delay_ = QueueData(std::move(locked->pending_data_));
queued_data = true;
}
}
if (queued_data) {
POST_TASK_TO_CALLER_THREAD(PostPcmCompletion, pending_buffer_delay);
caller_task_runner_->PostTask(FROM_HERE, pcm_completion_task_);
}
}
......@@ -498,7 +511,6 @@ int BufferingMixerSource::FillAudioPlaybackFrames(
bool queued_more_data = false;
bool signal_eos = false;
bool remove_self = false;
RenderingDelay pending_buffer_delay;
{
auto locked = locked_members_.Lock();
......@@ -539,7 +551,11 @@ int BufferingMixerSource::FillAudioPlaybackFrames(
remaining_silence_frames_ = 0;
}
filled = locked->fader_.FillFrames(num_frames, buffer, write_offset);
float* channels[num_channels_];
for (int c = 0; c < num_channels_; ++c) {
channels[c] = buffer->channel(c) + write_offset;
}
filled = locked->fader_.FillFrames(num_frames, rendering_delay, channels);
locked->mixer_rendering_delay_ = rendering_delay;
locked->extra_delay_frames_ = num_frames + locked->fader_.buffered_frames();
......@@ -548,7 +564,7 @@ int BufferingMixerSource::FillAudioPlaybackFrames(
if (locked->pending_data_ &&
locked->queued_frames_ + locked->fader_.buffered_frames() <
max_queued_frames_) {
pending_buffer_delay = QueueData(std::move(locked->pending_data_));
locked->last_buffer_delay_ = QueueData(std::move(locked->pending_data_));
queued_more_data = true;
}
......@@ -567,10 +583,10 @@ int BufferingMixerSource::FillAudioPlaybackFrames(
}
if (queued_more_data) {
POST_TASK_TO_CALLER_THREAD(PostPcmCompletion, pending_buffer_delay);
caller_task_runner_->PostTask(FROM_HERE, pcm_completion_task_);
}
if (signal_eos) {
POST_TASK_TO_CALLER_THREAD(PostEos);
caller_task_runner_->PostTask(FROM_HERE, eos_task_);
}
if (remove_self) {
......@@ -583,11 +599,10 @@ int64_t BufferingMixerSource::DataToFrames(int64_t size_in_bytes) {
return size_in_bytes / (num_channels_ * sizeof(float));
}
int BufferingMixerSource::FillFaderFrames(::media::AudioBus* dest,
int frame_offset,
int num_frames) {
DCHECK(dest);
DCHECK_EQ(num_channels_, dest->channels());
int BufferingMixerSource::FillFaderFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channels) {
DCHECK(channels);
auto locked = locked_members_.AssertAcquired();
if (locked->zero_fader_frames_ || !locked->started_ || locked->paused_ ||
......@@ -607,31 +622,25 @@ int BufferingMixerSource::FillFaderFrames(::media::AudioBus* dest,
std::min(num_frames, buffer_frames - locked->current_buffer_offset_);
DCHECK(frames_to_copy >= 0 && frames_to_copy <= num_frames)
<< " frames_to_copy=" << frames_to_copy << " num_frames=" << num_frames
<< " buffer_frames=" << buffer_frames
<< " buffer_frames=" << buffer_frames << " num_filled=" << num_filled
<< " locked->current_buffer_offset_=" << locked->current_buffer_offset_
<< " buffer=" << buffer->data_size();
DCHECK_LE(frames_to_copy + frame_offset, dest->frames())
<< " frames_to_copy=" << frames_to_copy
<< " dest->frames()=" << dest->frames()
<< " frame_offset=" << frame_offset;
const float* buffer_samples =
reinterpret_cast<const float*>(buffer->data());
for (int c = 0; c < num_channels_; ++c) {
const float* buffer_channel = buffer_samples + (buffer_frames * c);
memcpy(dest->channel(c) + frame_offset,
buffer_channel + locked->current_buffer_offset_,
frames_to_copy * sizeof(float));
std::copy_n(buffer_channel + locked->current_buffer_offset_,
frames_to_copy, channels[c] + num_filled);
}
num_frames -= frames_to_copy;
locked->queued_frames_ -= frames_to_copy;
frame_offset += frames_to_copy;
num_filled += frames_to_copy;
locked->current_buffer_offset_ += frames_to_copy;
if (locked->current_buffer_offset_ == buffer_frames) {
locked->buffers_to_be_freed_.push_back(std::move(locked->queue_.front()));
locked->queue_.pop_front();
locked->current_buffer_offset_ = 0;
}
......@@ -640,8 +649,13 @@ int BufferingMixerSource::FillFaderFrames(::media::AudioBus* dest,
return num_filled;
}
void BufferingMixerSource::PostPcmCompletion(RenderingDelay delay) {
void BufferingMixerSource::PostPcmCompletion() {
DCHECK(caller_task_runner_->BelongsToCurrentThread());
RenderingDelay delay;
{
auto locked = locked_members_.Lock();
delay = locked->last_buffer_delay_;
}
delegate_->OnWritePcmCompletion(delay);
}
......@@ -665,7 +679,9 @@ void BufferingMixerSource::OnAudioPlaybackError(MixerError error) {
<< " now being ignored due to output sample rate change";
}
POST_TASK_TO_CALLER_THREAD(PostError, error);
caller_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&BufferingMixerSource::PostError, weak_this_, error));
auto locked = locked_members_.Lock();
locked->mixer_error_ = true;
......
......@@ -7,6 +7,7 @@
#include <string>
#include "base/callback.h"
#include "base/containers/circular_deque.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
......@@ -147,8 +148,13 @@ class BufferingMixerSource : public MixerInput::Source,
bool mixer_error_ = false;
scoped_refptr<DecoderBufferBase> pending_data_;
base::circular_deque<scoped_refptr<DecoderBufferBase>> queue_;
// We let the caller thread free audio buffers since freeing memory can
// be expensive sometimes; we want to avoid potentially long-running
// operations on the mixer thread.
std::vector<scoped_refptr<DecoderBufferBase>> buffers_to_be_freed_;
int queued_frames_ = 0;
RenderingDelay mixer_rendering_delay_;
RenderingDelay last_buffer_delay_;
int extra_delay_frames_ = 0;
int current_buffer_offset_ = 0;
AudioFader fader_;
......@@ -229,13 +235,13 @@ class BufferingMixerSource : public MixerInput::Source,
void FinalizeAudioPlayback() override;
// AudioFader::Source implementation:
int FillFaderFrames(::media::AudioBus* dest,
int frame_offset,
int num_frames) override;
int FillFaderFrames(int num_frames,
RenderingDelay rendering_delay,
float* const* channels) override;
RenderingDelay QueueData(scoped_refptr<DecoderBufferBase> data);
void PostPcmCompletion(RenderingDelay delay);
void PostPcmCompletion();
void PostEos();
void PostError(MixerError error);
void PostAudioReadyForPlayback();
......@@ -257,10 +263,17 @@ class BufferingMixerSource : public MixerInput::Source,
const int start_threshold_frames_;
bool audio_ready_for_playback_fired_ = false;
// Only used on the caller thread.
std::vector<scoped_refptr<DecoderBufferBase>> old_buffers_to_be_freed_;
LockedMembers locked_members_;
int remaining_silence_frames_ = 0;
base::RepeatingClosure pcm_completion_task_;
base::RepeatingClosure eos_task_;
base::RepeatingClosure ready_for_playback_task_;
base::WeakPtr<BufferingMixerSource> weak_this_;
base::WeakPtrFactory<BufferingMixerSource> weak_factory_;
......
......@@ -62,6 +62,7 @@ void FilterGroup::Initialize(const AudioPostProcessor2::Config& output_config) {
ResizeBuffers();
// Run a buffer of 0's to initialize rendering delay.
std::fill_n(interleaved_.data(), interleaved_.size(), 0.0f);
delay_seconds_ = post_processing_pipeline_->ProcessFrames(
interleaved_.data(), input_frames_per_write_, last_volume_,
true /* is_silence */);
......@@ -203,6 +204,7 @@ int FilterGroup::GetOutputChannelCount() const {
void FilterGroup::ResizeBuffers() {
mixed_ = ::media::AudioBus::Create(num_channels_, input_frames_per_write_);
mixed_->Zero();
temp_buffers_.clear();
for (MixerInput* input : active_inputs_) {
AddTempBuffer(input->num_channels(), input_frames_per_write_);
......@@ -217,6 +219,7 @@ void FilterGroup::AddTempBuffer(int num_channels, int num_frames) {
if (!temp_buffers_[num_channels]) {
temp_buffers_[num_channels] =
::media::AudioBus::Create(num_channels, num_frames);
temp_buffers_[num_channels]->Zero();
}
}
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/loopback_handler.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/containers/flat_set.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "base/threading/thread.h"
#include "chromecast/public/media/external_audio_pipeline_shlib.h"
namespace chromecast {
namespace media {
namespace {
const int kDefaultBufferSize = 1024 * 2 * sizeof(float);
const int kNumBuffers = 16;
const int kMaxTasks = kNumBuffers + 1;
class LoopbackHandlerImpl : public LoopbackHandler,
public CastMediaShlib::LoopbackAudioObserver {
public:
LoopbackHandlerImpl(scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: external_audio_pipeline_supported_(
ExternalAudioPipelineShlib::IsSupported()),
task_signal_(&lock_) {
CreateBuffersIfNeeded(kDefaultBufferSize);
if (task_runner) {
task_runner_ = std::move(task_runner);
} else {
thread_ = std::make_unique<base::Thread>("CMA loopback");
base::Thread::Options options;
options.priority = base::ThreadPriority::REALTIME_AUDIO;
thread_->StartWithOptions(options);
task_runner_ = thread_->task_runner();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::LoopbackTaskLoop,
base::Unretained(this)));
}
if (external_audio_pipeline_supported_) {
ExternalAudioPipelineShlib::AddExternalLoopbackAudioObserver(this);
}
}
private:
struct Task {
Task(int64_t expected_playback_time,
SampleFormat format,
int sample_rate,
int channels,
uint32_t tag,
std::unique_ptr<uint8_t[]> data,
int length)
: expected_playback_time(expected_playback_time),
format(format),
sample_rate(sample_rate),
channels(channels),
tag(tag),
data(std::move(data)),
length(length) {}
const int64_t expected_playback_time;
const SampleFormat format;
const int sample_rate;
const int channels;
const uint32_t tag;
std::unique_ptr<uint8_t[]> data;
const int length;
};
~LoopbackHandlerImpl() override {
{
base::AutoLock lock(lock_);
stop_thread_ = true;
}
task_signal_.Signal();
if (thread_) {
thread_->Stop();
}
}
// LoopbackHandler implementation:
void Destroy() override {
if (external_audio_pipeline_supported_) {
ExternalAudioPipelineShlib::RemoveExternalLoopbackAudioObserver(this);
} else {
delete this;
}
}
void SetDataSize(int data_size_bytes) override {
CreateBuffersIfNeeded(data_size_bytes);
}
scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() override {
return task_runner_;
}
void AddObserver(CastMediaShlib::LoopbackAudioObserver* observer) override {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::AddObserverOnThread,
base::Unretained(this), observer));
task_signal_.Signal();
}
void RemoveObserver(
CastMediaShlib::LoopbackAudioObserver* observer) override {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::RemoveObserverOnThread,
base::Unretained(this), observer));
task_signal_.Signal();
}
void SendData(int64_t timestamp,
int sample_rate,
int num_channels,
float* data,
int frames) override {
if (external_audio_pipeline_supported_) {
return;
}
SendLoopbackData(timestamp, kSampleFormatF32, sample_rate, num_channels,
reinterpret_cast<uint8_t*>(data),
frames * num_channels * sizeof(float));
}
void SendInterrupt() override {
base::AutoLock lock(lock_);
SendInterruptedLocked();
}
// CastMediaShlib::LoopbackAudioObserver implementation:
void OnLoopbackAudio(int64_t timestamp,
SampleFormat format,
int sample_rate,
int num_channels,
uint8_t* data,
int length) override {
SendLoopbackData(timestamp, format, sample_rate, num_channels, data,
length);
}
void OnLoopbackInterrupted() override { SendInterrupt(); }
void OnRemoved() override {
// We expect that external pipeline will not invoke any other callbacks
// after this one.
delete this;
// No need to pipe this, LoopbackHandlerImpl will let the other observer
// know when it's being removed.
}
void AddObserverOnThread(CastMediaShlib::LoopbackAudioObserver* observer) {
DCHECK(task_runner_->BelongsToCurrentThread());
LOG(INFO) << __func__;
DCHECK(observer);
observers_.insert(observer);
}
void RemoveObserverOnThread(CastMediaShlib::LoopbackAudioObserver* observer) {
DCHECK(task_runner_->BelongsToCurrentThread());
LOG(INFO) << __func__;
DCHECK(observer);
observers_.erase(observer);
observer->OnRemoved();
}
void CreateBuffersIfNeeded(int buffer_size_bytes) {
if (buffer_size_bytes <= buffer_size_) {
return;
}
LOG(INFO) << "Create new buffers, size = " << buffer_size_bytes;
base::AutoLock lock(lock_);
++buffer_tag_;
buffers_.clear();
for (int i = 0; i < kNumBuffers; ++i) {
auto buffer = std::make_unique<uint8_t[]>(buffer_size_bytes);
std::fill_n(buffer.get(), buffer_size_bytes, 0);
buffers_.push_back(std::move(buffer));
}
buffer_size_ = buffer_size_bytes;
tasks_.reserve(kMaxTasks);
new_tasks_.reserve(kMaxTasks);
}
void SendLoopbackData(int64_t timestamp,
SampleFormat format,
int sample_rate,
int num_channels,
uint8_t* data,
int length) {
CreateBuffersIfNeeded(length);
{
base::AutoLock lock(lock_);
if (buffers_.empty() || tasks_.size() >= kNumBuffers) {
LOG(ERROR) << "Can't send loopback data";
SendInterruptedLocked();
return;
}
std::unique_ptr<uint8_t[]> buffer = std::move(buffers_.back());
buffers_.pop_back();
std::copy(data, data + length, buffer.get());
tasks_.emplace_back(timestamp, format, sample_rate, num_channels,
buffer_tag_, std::move(buffer), length);
}
if (thread_) {
task_signal_.Signal();
} else {
HandleLoopbackTask(&tasks_.back());
tasks_.pop_back();
}
}
void SendInterruptedLocked() {
lock_.AssertAcquired();
if (tasks_.size() == kMaxTasks) {
return;
}
tasks_.emplace_back(0, kSampleFormatF32, 0, 0, 0, nullptr, 0);
if (thread_) {
task_signal_.Signal();
} else {
HandleLoopbackTask(&tasks_.back());
tasks_.pop_back();
}
}
void LoopbackTaskLoop() {
DCHECK(task_runner_->BelongsToCurrentThread());
{
base::AutoLock lock(lock_);
if (stop_thread_)
return;
if (tasks_.empty()) {
task_signal_.Wait();
}
new_tasks_.swap(tasks_);
}
for (auto& task : new_tasks_) {
HandleLoopbackTask(&task);
}
new_tasks_.clear();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&LoopbackHandlerImpl::LoopbackTaskLoop,
base::Unretained(this)));
}
void HandleLoopbackTask(Task* task) {
if (!task->data) {
for (auto* observer : observers_) {
observer->OnLoopbackInterrupted();
}
return;
}
for (auto* observer : observers_) {
observer->OnLoopbackAudio(task->expected_playback_time, task->format,
task->sample_rate, task->channels,
task->data.get(), task->length);
}
base::AutoLock lock(lock_);
if (task->tag == buffer_tag_) {
// Only return the buffer if the tag matches. Otherwise, the buffer size
// may have changed (so we should just delete the buffer).
buffers_.push_back(std::move(task->data));
}
}
const bool external_audio_pipeline_supported_;
std::unique_ptr<base::Thread> thread_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
int buffer_size_ = 0;
base::Lock lock_;
uint32_t buffer_tag_ GUARDED_BY(lock_) = 0;
std::vector<std::unique_ptr<uint8_t[]>> buffers_ GUARDED_BY(lock_);
bool stop_thread_ GUARDED_BY(lock_);
base::ConditionVariable task_signal_;
std::vector<Task> tasks_;
std::vector<Task> new_tasks_;
base::flat_set<CastMediaShlib::LoopbackAudioObserver*> observers_;
DISALLOW_COPY_AND_ASSIGN(LoopbackHandlerImpl);
};
} // namespace
// static
std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter>
LoopbackHandler::Create(
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
return std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter>(
new LoopbackHandlerImpl(std::move(task_runner)));
}
} // namespace media
} // namespace chromecast
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
#include <cstdint>
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "chromecast/public/cast_media_shlib.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace chromecast {
namespace media {
// Handles loopback audio from the mixer.
class LoopbackHandler {
public:
struct Deleter {
void operator()(LoopbackHandler* obj) { obj->Destroy(); }
};
static std::unique_ptr<LoopbackHandler, Deleter> Create(
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
virtual void Destroy() = 0;
virtual void SetDataSize(int data_size_bytes) = 0;
virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
virtual void AddObserver(CastMediaShlib::LoopbackAudioObserver* observer) = 0;
virtual void RemoveObserver(
CastMediaShlib::LoopbackAudioObserver* observer) = 0;
virtual void SendData(int64_t timestamp,
int sample_rate,
int num_channels,
float* data,
int frames) = 0;
virtual void SendInterrupt() = 0;
protected:
virtual ~LoopbackHandler() = default;
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_LOOPBACK_HANDLER_H_
......@@ -161,6 +161,10 @@ int MixerInput::FillAudioData(int num_frames,
redirected = true;
}
float* channels[num_channels_];
for (int c = 0; c < num_channels_; ++c) {
channels[c] = dest->channel(c);
}
if (first_buffer_ && redirected) {
// If the first buffer is redirected, don't provide any data to the mixer
// (we want to avoid a 'blip' of sound from the first buffer if it is being
......@@ -173,11 +177,11 @@ int MixerInput::FillAudioData(int num_frames,
filled = 0;
} else {
// Smoothly fade in from previous silence.
AudioFader::FadeInHelper(dest, filled, 0, filled, filled);
AudioFader::FadeInHelper(channels, num_channels_, filled, filled, filled);
}
} else if (redirected) {
// Smoothly fade out to silence, since output is now being redirected.
AudioFader::FadeOutHelper(dest, filled, 0, filled, filled);
AudioFader::FadeOutHelper(channels, num_channels_, filled, filled, filled);
}
previous_ended_in_silence_ = redirected;
first_buffer_ = false;
......
......@@ -33,86 +33,21 @@
#include "media/audio/audio_device_description.h"
#define RUN_ON_MIXER_THREAD(method, ...) \
do { \
mixer_task_runner_->PostTask( \
FROM_HERE, base::BindOnce(&StreamMixer::method, base::Unretained(this), \
##__VA_ARGS__));
FROM_HERE, base::BindOnce(&StreamMixer::method, \
base::Unretained(this), ##__VA_ARGS__)); \
} while (0)
#define MAKE_SURE_MIXER_THREAD(method, ...) \
if (!mixer_task_runner_->RunsTasksInCurrentSequence()) { \
RUN_ON_MIXER_THREAD(method, ##__VA_ARGS__) \
return; \
}
#define RUN_ON_LOOPBACK_THREAD(method, ...) \
loopback_task_runner_->PostTask( \
FROM_HERE, base::BindOnce(&StreamMixer::method, base::Unretained(this), \
##__VA_ARGS__));
#define MAKE_SURE_LOOPBACK_THREAD(method, ...) \
if (!loopback_task_runner_->RunsTasksInCurrentSequence()) { \
RUN_ON_LOOPBACK_THREAD(method, ##__VA_ARGS__) \
RUN_ON_MIXER_THREAD(method, ##__VA_ARGS__); \
return; \
}
namespace chromecast {
namespace media {
constexpr base::TimeDelta kMixerThreadCheckTimeout =
base::TimeDelta::FromSeconds(10);
constexpr base::TimeDelta kHealthCheckInterval =
base::TimeDelta::FromSeconds(5);
class StreamMixer::ExternalLoopbackAudioObserver
: public CastMediaShlib::LoopbackAudioObserver {
public:
ExternalLoopbackAudioObserver(StreamMixer* mixer) : mixer_(mixer) {}
void OnLoopbackAudio(int64_t timestamp,
SampleFormat format,
int sample_rate,
int num_channels,
uint8_t* data,
int length) override {
auto loopback_data = std::make_unique<uint8_t[]>(length);
std::copy(data, data + length, loopback_data.get());
mixer_->PostLoopbackData(timestamp, format, sample_rate, num_channels,
std::move(loopback_data), length);
}
void OnLoopbackInterrupted() override { mixer_->PostLoopbackInterrupted(); }
void OnRemoved() override {
// We expect that external pipeline will not invoke any other callbacks
// after this one.
delete this;
// No need to pipe this, StreamMixer will let the other observer know when
// it's being removed.
}
private:
StreamMixer* const mixer_;
};
class StreamMixer::ExternalMediaVolumeChangeRequestObserver
: public StreamMixer::BaseExternalMediaVolumeChangeRequestObserver {
public:
ExternalMediaVolumeChangeRequestObserver(StreamMixer* mixer) : mixer_(mixer) {
DCHECK(mixer_);
}
// ExternalAudioPipelineShlib::ExternalMediaVolumeChangeRequestObserver
// implementation:
void OnVolumeChangeRequest(float new_volume) override {
mixer_->SetVolume(AudioContentType::kMedia, new_volume);
}
void OnMuteChangeRequest(bool new_muted) override {
mixer_->SetMuted(AudioContentType::kMedia, new_muted);
}
private:
StreamMixer* const mixer_;
};
namespace {
const int kNumInputChannels = 2;
......@@ -132,6 +67,11 @@ const int kMediaDuckFadeMs = 150;
const int kMediaUnduckFadeMs = 700;
const int kDefaultFilterFrameAlignment = 64;
constexpr base::TimeDelta kMixerThreadCheckTimeout =
base::TimeDelta::FromSeconds(10);
constexpr base::TimeDelta kHealthCheckInterval =
base::TimeDelta::FromSeconds(5);
int GetFixedOutputSampleRate() {
int fixed_sample_rate = GetSwitchValueNonNegativeInt(
switches::kAudioOutputSampleRate, MixerOutputStream::kInvalidSampleRate);
......@@ -160,13 +100,41 @@ base::TimeDelta GetNoInputCloseTimeout() {
void UseHighPriority() {
#if (!defined(OS_FUCHSIA) && !defined(OS_ANDROID))
const struct sched_param kAudioPrio = {10};
pthread_setschedparam(pthread_self(), SCHED_RR, &kAudioPrio);
struct sched_param params;
params.sched_priority = sched_get_priority_max(SCHED_FIFO);
pthread_setschedparam(pthread_self(), SCHED_FIFO, &params);
int policy = 0;
struct sched_param actual_params;
pthread_getschedparam(pthread_self(), &policy, &actual_params);
LOG(INFO) << "Actual priority = " << actual_params.sched_priority
<< ", policy = " << policy;
#endif
}
} // namespace
class StreamMixer::ExternalMediaVolumeChangeRequestObserver
: public StreamMixer::BaseExternalMediaVolumeChangeRequestObserver {
public:
ExternalMediaVolumeChangeRequestObserver(StreamMixer* mixer) : mixer_(mixer) {
DCHECK(mixer_);
}
// ExternalAudioPipelineShlib::ExternalMediaVolumeChangeRequestObserver
// implementation:
void OnVolumeChangeRequest(float new_volume) override {
mixer_->SetVolume(AudioContentType::kMedia, new_volume);
}
void OnMuteChangeRequest(bool new_muted) override {
mixer_->SetMuted(AudioContentType::kMedia, new_muted);
}
private:
StreamMixer* const mixer_;
};
float StreamMixer::VolumeInfo::GetEffectiveVolume() {
return std::min(volume, limit);
}
......@@ -191,6 +159,7 @@ StreamMixer::StreamMixer(
std::make_unique<PostProcessingPipelineFactoryImpl>()),
mixer_thread_(std::move(mixer_thread)),
mixer_task_runner_(std::move(mixer_task_runner)),
loopback_handler_(LoopbackHandler::Create(mixer_task_runner_)),
num_output_channels_(
GetSwitchValueNonNegativeInt(switches::kAudioOutputChannels,
kNumInputChannels)),
......@@ -222,20 +191,12 @@ StreamMixer::StreamMixer(
mixer_task_runner_ = mixer_thread_->task_runner();
mixer_task_runner_->PostTask(FROM_HERE, base::BindOnce(&UseHighPriority));
loopback_thread_ = std::make_unique<base::Thread>("CMA mixer loopback");
base::Thread::Options loopback_options;
loopback_options.priority = base::ThreadPriority::REALTIME_AUDIO;
loopback_thread_->StartWithOptions(loopback_options);
loopback_task_runner_ = loopback_thread_->task_runner();
health_checker_ = std::make_unique<ThreadHealthChecker>(
mixer_task_runner_, loopback_task_runner_, kHealthCheckInterval,
kMixerThreadCheckTimeout,
mixer_task_runner_, loopback_handler_->GetTaskRunner(),
kHealthCheckInterval, kMixerThreadCheckTimeout,
base::BindRepeating(&StreamMixer::OnHealthCheckFailed,
base::Unretained(this)));
LOG(INFO) << "Mixer health checker started";
} else {
loopback_task_runner_ = mixer_task_runner_;
}
if (fixed_output_sample_rate_ != MixerOutputStream::kInvalidSampleRate) {
......@@ -255,10 +216,6 @@ StreamMixer::StreamMixer(
std::make_unique<ExternalMediaVolumeChangeRequestObserver>(this);
ExternalAudioPipelineShlib::AddExternalMediaVolumeChangeRequestObserver(
external_volume_observer_.get());
external_loopback_audio_observer_ =
std::make_unique<ExternalLoopbackAudioObserver>(this);
ExternalAudioPipelineShlib::AddExternalLoopbackAudioObserver(
external_loopback_audio_observer_.get());
}
}
......@@ -326,6 +283,7 @@ void StreamMixer::CreatePostProcessors(CastMediaShlib::ResultCallback callback,
}
CHECK(mixer_pipeline_) << "Unable to load post processor config!";
UpdateLoopbackChannelCount();
CHECK(PostProcessorsHaveCorrectNumOutputs());
if (state_ == kStateRunning) {
......@@ -337,6 +295,12 @@ void StreamMixer::CreatePostProcessors(CastMediaShlib::ResultCallback callback,
}
}
void StreamMixer::UpdateLoopbackChannelCount() {
loopback_channel_count_ = num_output_channels_ == 1
? 1
: mixer_pipeline_->GetLoopbackChannelCount();
}
void StreamMixer::ResetPostProcessorsForTest(
std::unique_ptr<PostProcessingPipelineFactory> pipeline_factory,
const std::string& pipeline_json) {
......@@ -350,13 +314,11 @@ void StreamMixer::ResetPostProcessorsForTest(
void StreamMixer::SetNumOutputChannelsForTest(int num_output_channels) {
DCHECK(mixer_task_runner_->BelongsToCurrentThread());
num_output_channels_ = num_output_channels;
UpdateLoopbackChannelCount();
}
StreamMixer::~StreamMixer() {
LOG(INFO) << __func__;
if (loopback_thread_) {
loopback_thread_->Stop();
}
mixer_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&StreamMixer::FinalizeOnMixerThread,
......@@ -366,9 +328,6 @@ StreamMixer::~StreamMixer() {
}
if (external_volume_observer_) {
ExternalAudioPipelineShlib::RemoveExternalLoopbackAudioObserver(
external_loopback_audio_observer_.get());
external_loopback_audio_observer_.release();
ExternalAudioPipelineShlib::RemoveExternalMediaVolumeChangeRequestObserver(
external_volume_observer_.get());
}
......@@ -421,6 +380,9 @@ void StreamMixer::Start() {
output_->OptimalWriteFramesCount() & ~(filter_frame_alignment_ - 1);
CHECK_GT(frames_per_write_, 0);
loopback_handler_->SetDataSize(frames_per_write_ * loopback_channel_count_ *
sizeof(float));
// Initialize filters.
mixer_pipeline_->Initialize(output_samples_per_second_, frames_per_write_);
......@@ -447,14 +409,14 @@ void StreamMixer::Start() {
}
state_ = kStateRunning;
playback_loop_task_ = base::BindRepeating(&StreamMixer::PlaybackLoop,
weak_factory_.GetWeakPtr());
// Write one buffer of silence to get correct rendering delay in the
// postprocessors.
WriteOneBuffer();
mixer_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&StreamMixer::PlaybackLoop, weak_factory_.GetWeakPtr()));
mixer_task_runner_->PostTask(FROM_HERE, playback_loop_task_);
}
void StreamMixer::Stop() {
......@@ -462,8 +424,7 @@ void StreamMixer::Stop() {
DCHECK(mixer_task_runner_->BelongsToCurrentThread());
weak_factory_.InvalidateWeakPtrs();
PostLoopbackInterrupted();
loopback_handler_->SendInterrupt();
if (output_) {
output_->Stop();
......@@ -648,9 +609,7 @@ void StreamMixer::PlaybackLoop() {
WriteOneBuffer();
mixer_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&StreamMixer::PlaybackLoop, weak_factory_.GetWeakPtr()));
mixer_task_runner_->PostTask(FROM_HERE, playback_loop_task_);
}
void StreamMixer::WriteOneBuffer() {
......@@ -685,29 +644,20 @@ void StreamMixer::WriteMixedPcm(int frames, int64_t expected_playback_time) {
// Downmix reference signal to mono to reduce CPU load.
int mix_channel_count = mixer_pipeline_->GetLoopbackChannelCount();
int loopback_channel_count = mix_channel_count;
float* mixed_data = mixer_pipeline_->GetLoopbackOutput();
if (num_output_channels_ == 1 && mix_channel_count != 1) {
if (loopback_channel_count_ == 1 && mix_channel_count != 1) {
MixToMono(mixed_data, frames, mix_channel_count);
loopback_channel_count = 1;
}
// Hard limit to [1.0, -1.0]
for (int i = 0; i < frames * loopback_channel_count; ++i) {
for (int i = 0; i < frames * loopback_channel_count_; ++i) {
// TODO(bshaya): Warn about clipping here.
mixed_data[i] = std::min(1.0f, std::max(-1.0f, mixed_data[i]));
}
if (!external_audio_pipeline_supported_) {
size_t length = frames * loopback_channel_count * sizeof(float);
auto loopback_data = std::make_unique<uint8_t[]>(length);
uint8_t* data = reinterpret_cast<uint8_t*>(mixed_data);
std::copy(data, data + length, loopback_data.get());
PostLoopbackData(expected_playback_time, kSampleFormatF32,
output_samples_per_second_, loopback_channel_count,
std::move(loopback_data), length);
}
loopback_handler_->SendData(expected_playback_time,
output_samples_per_second_,
loopback_channel_count_, mixed_data, frames);
// Drop extra channels from linearize filter if necessary.
float* linearized_data = mixer_pipeline_->GetOutput();
......@@ -726,7 +676,7 @@ void StreamMixer::WriteMixedPcm(int frames, int64_t expected_playback_time) {
&playback_interrupted);
if (playback_interrupted) {
PostLoopbackInterrupted();
loopback_handler_->SendInterrupt();
}
}
......@@ -749,26 +699,12 @@ void StreamMixer::MixToMono(float* data, int frames, int channels) {
void StreamMixer::AddLoopbackAudioObserver(
CastMediaShlib::LoopbackAudioObserver* observer) {
MAKE_SURE_LOOPBACK_THREAD(AddLoopbackAudioObserver, observer);
LOG(INFO) << __func__;
DCHECK(observer);
loopback_observers_.insert(observer);
loopback_handler_->AddObserver(observer);
}
void StreamMixer::RemoveLoopbackAudioObserver(
CastMediaShlib::LoopbackAudioObserver* observer) {
// Always post a task to avoid synchronous deletion.
RUN_ON_LOOPBACK_THREAD(RemoveLoopbackAudioObserverOnThread, observer);
}
void StreamMixer::RemoveLoopbackAudioObserverOnThread(
CastMediaShlib::LoopbackAudioObserver* observer) {
DCHECK(loopback_task_runner_->BelongsToCurrentThread());
LOG(INFO) << __func__;
loopback_observers_.erase(observer);
observer->OnRemoved();
loopback_handler_->RemoveObserver(observer);
}
void StreamMixer::AddAudioOutputRedirector(
......@@ -815,37 +751,6 @@ void StreamMixer::ModifyAudioOutputRedirection(
}
}
void StreamMixer::PostLoopbackData(int64_t expected_playback_time,
SampleFormat format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length) {
RUN_ON_LOOPBACK_THREAD(SendLoopbackData, expected_playback_time, format,
sample_rate, channels, std::move(data), length);
}
void StreamMixer::SendLoopbackData(int64_t expected_playback_time,
SampleFormat format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length) {
DCHECK(loopback_task_runner_->BelongsToCurrentThread());
for (CastMediaShlib::LoopbackAudioObserver* observer : loopback_observers_) {
observer->OnLoopbackAudio(expected_playback_time, format, sample_rate,
channels, data.get(), length);
}
}
void StreamMixer::PostLoopbackInterrupted() {
MAKE_SURE_LOOPBACK_THREAD(PostLoopbackInterrupted);
for (auto* observer : loopback_observers_) {
observer->OnLoopbackInterrupted();
}
}
void StreamMixer::SetVolume(AudioContentType type, float level) {
MAKE_SURE_MIXER_THREAD(SetVolume, type, level);
DCHECK(type != AudioContentType::kOther);
......@@ -935,17 +840,14 @@ bool StreamMixer::PostProcessorsHaveCorrectNumOutputs() {
num_output_channels_ == 1 ||
num_output_channels_ == mixer_pipeline_->GetOutputChannelCount();
if (!correct_num_outputs) {
LOG(WARNING) << "PostProcessor configuration channel count does not match "
LOG(ERROR) << "PostProcessor configuration channel count does not match "
<< "command line flag: "
<< mixer_pipeline_->GetOutputChannelCount() << " vs "
<< num_output_channels_;
return false;
}
int loopback_channel_count = num_output_channels_ == 1
? 1
: mixer_pipeline_->GetLoopbackChannelCount();
if (loopback_channel_count > 2) {
LOG(WARNING) << "PostProcessor configuration has " << loopback_channel_count
if (loopback_channel_count_ > 2) {
LOG(ERROR) << "PostProcessor configuration has " << loopback_channel_count_
<< " channels after 'mix' group, but only 1 or 2 are allowed.";
return false;
}
......
......@@ -12,6 +12,7 @@
#include <utility>
#include <vector>
#include "base/callback.h"
#include "base/containers/flat_map.h"
#include "base/containers/flat_set.h"
#include "base/macros.h"
......@@ -20,6 +21,7 @@
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "chromecast/media/cma/backend/loopback_handler.h"
#include "chromecast/media/cma/backend/mixer_input.h"
#include "chromecast/media/cma/backend/mixer_pipeline.h"
#include "chromecast/public/cast_media_shlib.h"
......@@ -124,7 +126,6 @@ class StreamMixer {
int num_output_channels() const { return num_output_channels_; }
private:
class ExternalLoopbackAudioObserver;
class BaseExternalMediaVolumeChangeRequestObserver
: public ExternalAudioPipelineShlib::
ExternalMediaVolumeChangeRequestObserver {
......@@ -160,30 +161,15 @@ class StreamMixer {
void RemoveInputOnThread(MixerInput::Source* input_source);
void SetCloseTimeout();
void UpdatePlayoutChannel();
void UpdateLoopbackChannelCount();
void PlaybackLoop();
void WriteOneBuffer();
void WriteMixedPcm(int frames, int64_t expected_playback_time);
void MixToMono(float* data, int frames, int channels);
void RemoveLoopbackAudioObserverOnThread(
CastMediaShlib::LoopbackAudioObserver* observer);
void RemoveAudioOutputRedirectorOnThread(AudioOutputRedirector* redirector);
void PostLoopbackData(int64_t expected_playback_time,
SampleFormat sample_format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length);
void PostLoopbackInterrupted();
void SendLoopbackData(int64_t expected_playback_time,
SampleFormat sample_format,
int sample_rate,
int channels,
std::unique_ptr<uint8_t[]> data,
int length);
int GetSampleRateForDeviceId(const std::string& device);
MediaPipelineBackend::AudioDecoder::RenderingDelay GetTotalRenderingDelay(
......@@ -195,8 +181,7 @@ class StreamMixer {
std::unique_ptr<MixerPipeline> mixer_pipeline_;
std::unique_ptr<base::Thread> mixer_thread_;
scoped_refptr<base::SingleThreadTaskRunner> mixer_task_runner_;
std::unique_ptr<base::Thread> loopback_thread_;
scoped_refptr<base::SingleThreadTaskRunner> loopback_task_runner_;
std::unique_ptr<LoopbackHandler, LoopbackHandler::Deleter> loopback_handler_;
std::unique_ptr<ThreadHealthChecker> health_checker_;
void OnHealthCheckFailed();
......@@ -216,16 +201,16 @@ class StreamMixer {
int frames_per_write_ = 0;
int redirector_samples_per_second_ = 0;
int redirector_frames_per_write_ = 0;
int loopback_channel_count_ = 0;
State state_;
base::TimeTicks close_timestamp_;
base::RepeatingClosure playback_loop_task_;
base::flat_map<MixerInput::Source*, std::unique_ptr<MixerInput>> inputs_;
base::flat_map<MixerInput::Source*, std::unique_ptr<MixerInput>>
ignored_inputs_;
base::flat_set<CastMediaShlib::LoopbackAudioObserver*> loopback_observers_;
base::flat_map<AudioContentType, VolumeInfo> volume_info_;
base::flat_map<AudioOutputRedirector*, std::unique_ptr<AudioOutputRedirector>>
......@@ -234,8 +219,6 @@ class StreamMixer {
const bool external_audio_pipeline_supported_;
std::unique_ptr<BaseExternalMediaVolumeChangeRequestObserver>
external_volume_observer_;
std::unique_ptr<ExternalLoopbackAudioObserver>
external_loopback_audio_observer_;
base::WeakPtrFactory<StreamMixer> weak_factory_;
......
......@@ -89,14 +89,9 @@ class ExternalAudioPipelineTest : public ::testing::Test {
}
// Run async operations in the stream mixer.
void RunLoopForMixer() {
// SendLoopbackData.
base::RunLoop run_loop1;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop1.QuitClosure());
run_loop1.Run();
// Playbackloop.
base::RunLoop run_loop2;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop2.QuitClosure());
run_loop2.Run();
base::RunLoop run_loop;
message_loop_->task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
run_loop.Run();
}
protected:
......@@ -201,11 +196,10 @@ TEST_F(ExternalAudioPipelineTest, ExternalAudioPipelineLoopbackData) {
mixer_->AddLoopbackAudioObserver(&mock_loopback_observer);
mixer_->AddInput(&input);
RunLoopForMixer();
// Send data to the stream mixer.
input.SetData(std::move(data));
RunLoopForMixer();
// Get actual data from our mocked loopback observer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment