Commit 2f20fd8e authored by Ken MacKay's avatar Ken MacKay Committed by Commit Bot

Modify postprocessor volume metadata

Merge-With: eureka-internal/441076
Merge-With: eureka-internal/441060

Bug: internal b/165807391
Test: cast_audio_backend_unittests
Change-Id: I2ef33d20f5aa3b5cda8db238d575d318c02092d0
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2369581Reviewed-by: default avatarYuchen Liu <yucliu@chromium.org>
Commit-Queue: Kenneth MacKay <kmackay@chromium.org>
Cr-Commit-Position: refs/heads/master@{#801147}
parent d927d209
...@@ -112,7 +112,7 @@ void FilterGroup::Initialize(const AudioPostProcessor2::Config& output_config) { ...@@ -112,7 +112,7 @@ void FilterGroup::Initialize(const AudioPostProcessor2::Config& output_config) {
// Run a buffer of 0's to initialize rendering delay. // Run a buffer of 0's to initialize rendering delay.
std::fill_n(interleaved_.data(), interleaved_.size(), 0.0f); std::fill_n(interleaved_.data(), interleaved_.size(), 0.0f);
delay_seconds_ = post_processing_pipeline_->ProcessFrames( delay_seconds_ = post_processing_pipeline_->ProcessFrames(
interleaved_.data(), input_frames_per_write_, last_volume_, interleaved_.data(), input_frames_per_write_, last_volume_, last_volume_,
true /* is_silence */); true /* is_silence */);
} }
...@@ -163,6 +163,7 @@ float FilterGroup::MixAndFilter( ...@@ -163,6 +163,7 @@ float FilterGroup::MixAndFilter(
DCHECK_EQ(num_output_frames, output_config_.output_frames_per_write); DCHECK_EQ(num_output_frames, output_config_.output_frames_per_write);
float volume = 0.0f; float volume = 0.0f;
float target_volume = 0.0f;
AudioContentType content_type = static_cast<AudioContentType>(-1); AudioContentType content_type = static_cast<AudioContentType>(-1);
rendering_delay.delay_microseconds += GetRenderingDelayMicroseconds(); rendering_delay.delay_microseconds += GetRenderingDelayMicroseconds();
...@@ -172,6 +173,8 @@ float FilterGroup::MixAndFilter( ...@@ -172,6 +173,8 @@ float FilterGroup::MixAndFilter(
for (const auto& filter_group : mixed_inputs_) { for (const auto& filter_group : mixed_inputs_) {
volume = std::max(volume, filter_group.group->MixAndFilter( volume = std::max(volume, filter_group.group->MixAndFilter(
input_frames_per_write_, rendering_delay)); input_frames_per_write_, rendering_delay));
target_volume =
std::max(target_volume, filter_group.group->target_volume());
content_type = std::max(content_type, filter_group.group->content_type()); content_type = std::max(content_type, filter_group.group->content_type());
} }
...@@ -203,6 +206,7 @@ float FilterGroup::MixAndFilter( ...@@ -203,6 +206,7 @@ float FilterGroup::MixAndFilter(
} }
volume = std::max(volume, input->InstantaneousVolume()); volume = std::max(volume, input->InstantaneousVolume());
target_volume = std::max(volume, input->TargetVolume());
content_type = std::max(content_type, input->content_type()); content_type = std::max(content_type, input->content_type());
} }
} }
...@@ -233,6 +237,7 @@ float FilterGroup::MixAndFilter( ...@@ -233,6 +237,7 @@ float FilterGroup::MixAndFilter(
bool is_silence = (volume == 0.0f); bool is_silence = (volume == 0.0f);
if (!is_silence) { if (!is_silence) {
last_volume_ = volume; last_volume_ = volume;
target_volume_ = target_volume;
DCHECK_NE(-1, static_cast<int>(content_type)) DCHECK_NE(-1, static_cast<int>(content_type))
<< "Got frames without content type."; << "Got frames without content type.";
if (content_type != content_type_) { if (content_type != content_type_) {
...@@ -242,7 +247,8 @@ float FilterGroup::MixAndFilter( ...@@ -242,7 +247,8 @@ float FilterGroup::MixAndFilter(
} }
delay_seconds_ = post_processing_pipeline_->ProcessFrames( delay_seconds_ = post_processing_pipeline_->ProcessFrames(
interleaved_.data(), input_frames_per_write_, last_volume_, is_silence); interleaved_.data(), input_frames_per_write_, last_volume_,
target_volume_, is_silence);
return last_volume_; return last_volume_;
} }
......
...@@ -52,6 +52,7 @@ class FilterGroup { ...@@ -52,6 +52,7 @@ class FilterGroup {
int num_channels() const { return num_channels_; } int num_channels() const { return num_channels_; }
float last_volume() const { return last_volume_; } float last_volume() const { return last_volume_; }
float target_volume() const { return target_volume_; }
std::string name() const { return name_; } std::string name() const { return name_; }
AudioContentType content_type() const { return content_type_; } AudioContentType content_type() const { return content_type_; }
int input_frames_per_write() const { return input_frames_per_write_; } int input_frames_per_write() const { return input_frames_per_write_; }
...@@ -152,7 +153,8 @@ class FilterGroup { ...@@ -152,7 +153,8 @@ class FilterGroup {
int input_samples_per_second_ = 0; int input_samples_per_second_ = 0;
int input_frames_per_write_ = 0; int input_frames_per_write_ = 0;
int frames_zeroed_ = 0; int frames_zeroed_ = 0;
float last_volume_ = 0.0; float last_volume_ = 0.0f;
float target_volume_ = 0.0f;
double delay_seconds_ = 0; double delay_seconds_ = 0;
MediaPipelineBackend::AudioDecoder::RenderingDelay rendering_delay_to_output_; MediaPipelineBackend::AudioDecoder::RenderingDelay rendering_delay_to_output_;
AudioContentType content_type_ = AudioContentType::kMedia; AudioContentType content_type_ = AudioContentType::kMedia;
......
...@@ -41,16 +41,17 @@ class MockPostProcessingPipeline : public PostProcessingPipeline { ...@@ -41,16 +41,17 @@ class MockPostProcessingPipeline : public PostProcessingPipeline {
explicit MockPostProcessingPipeline(int num_output_channels) explicit MockPostProcessingPipeline(int num_output_channels)
: num_output_channels_(num_output_channels) { : num_output_channels_(num_output_channels) {
ON_CALL(*this, ProcessFrames(_, _, _, _)) ON_CALL(*this, ProcessFrames(_, _, _, _, _))
.WillByDefault( .WillByDefault(
testing::Invoke(this, &MockPostProcessingPipeline::StorePtr)); testing::Invoke(this, &MockPostProcessingPipeline::StorePtr));
} }
~MockPostProcessingPipeline() override {} ~MockPostProcessingPipeline() override {}
MOCK_METHOD4(ProcessFrames, MOCK_METHOD5(ProcessFrames,
double(float* data, double(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence)); bool is_silence));
MOCK_METHOD2(SetPostProcessorConfig, MOCK_METHOD2(SetPostProcessorConfig,
void(const std::string& name, const std::string& config)); void(const std::string& name, const std::string& config));
...@@ -71,6 +72,7 @@ class MockPostProcessingPipeline : public PostProcessingPipeline { ...@@ -71,6 +72,7 @@ class MockPostProcessingPipeline : public PostProcessingPipeline {
double StorePtr(float* data, double StorePtr(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence) { bool is_silence) {
output_buffer_ = data; output_buffer_ = data;
return 0; return 0;
...@@ -89,17 +91,18 @@ class InvertChannelPostProcessor : public MockPostProcessingPipeline { ...@@ -89,17 +91,18 @@ class InvertChannelPostProcessor : public MockPostProcessingPipeline {
explicit InvertChannelPostProcessor(int channels, int channel_to_invert) explicit InvertChannelPostProcessor(int channels, int channel_to_invert)
: MockPostProcessingPipeline(channels), : MockPostProcessingPipeline(channels),
channel_to_invert_(channel_to_invert) { channel_to_invert_(channel_to_invert) {
ON_CALL(*this, ProcessFrames(_, _, _, _)) ON_CALL(*this, ProcessFrames(_, _, _, _, _))
.WillByDefault(testing::Invoke( .WillByDefault(testing::Invoke(
this, &InvertChannelPostProcessor::DoInvertChannel)); this, &InvertChannelPostProcessor::DoInvertChannel));
} }
~InvertChannelPostProcessor() override {} ~InvertChannelPostProcessor() override {}
MOCK_METHOD4(ProcessFrames, MOCK_METHOD5(ProcessFrames,
double(float* data, double(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence)); bool is_silence));
MOCK_METHOD2(SetPostProcessorConfig, MOCK_METHOD2(SetPostProcessorConfig,
void(const std::string& name, const std::string& config)); void(const std::string& name, const std::string& config));
...@@ -108,6 +111,7 @@ class InvertChannelPostProcessor : public MockPostProcessingPipeline { ...@@ -108,6 +111,7 @@ class InvertChannelPostProcessor : public MockPostProcessingPipeline {
int DoInvertChannel(float* data, int DoInvertChannel(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence) { bool is_silence) {
output_buffer_ = data; output_buffer_ = data;
for (int fr = 0; fr < num_frames; ++fr) { for (int fr = 0; fr < num_frames; ++fr) {
...@@ -214,7 +218,7 @@ class FilterGroupTest : public testing::Test { ...@@ -214,7 +218,7 @@ class FilterGroupTest : public testing::Test {
TEST_F(FilterGroupTest, Passthrough) { TEST_F(FilterGroupTest, Passthrough) {
MakeFilterGroup(std::make_unique<NiceMock<MockPostProcessingPipeline>>()); MakeFilterGroup(std::make_unique<NiceMock<MockPostProcessingPipeline>>());
EXPECT_CALL(*post_processor_, ProcessFrames(_, kInputFrames, _, false)); EXPECT_CALL(*post_processor_, ProcessFrames(_, kInputFrames, _, _, false));
filter_group_->MixAndFilter(kInputFrames, RenderingDelay()); filter_group_->MixAndFilter(kInputFrames, RenderingDelay());
AssertPassthrough(); AssertPassthrough();
......
...@@ -30,7 +30,7 @@ namespace media { ...@@ -30,7 +30,7 @@ namespace media {
namespace { namespace {
const int64_t kMicrosecondsPerSecond = 1000 * 1000; const int64_t kMicrosecondsPerSecond = 1000 * 1000;
const int kDefaultSlewTimeMs = 15; const int kDefaultSlewTimeMs = 50;
const int kDefaultFillBufferFrames = 2048; const int kDefaultFillBufferFrames = 2048;
int RoundUpMultiple(int value, int multiple) { int RoundUpMultiple(int value, int multiple) {
......
...@@ -21,7 +21,7 @@ MockPostProcessor::MockPostProcessor(MockPostProcessorFactory* factory, ...@@ -21,7 +21,7 @@ MockPostProcessor::MockPostProcessor(MockPostProcessorFactory* factory,
DCHECK(factory_); DCHECK(factory_);
CHECK(factory_->instances.insert({name_, this}).second); CHECK(factory_->instances.insert({name_, this}).second);
ON_CALL(*this, ProcessFrames(_, _, _, _)) ON_CALL(*this, ProcessFrames(_, _, _, _, _))
.WillByDefault( .WillByDefault(
testing::Invoke(this, &MockPostProcessor::DoProcessFrames)); testing::Invoke(this, &MockPostProcessor::DoProcessFrames));
......
...@@ -29,10 +29,11 @@ class MockPostProcessor : public PostProcessingPipeline { ...@@ -29,10 +29,11 @@ class MockPostProcessor : public PostProcessingPipeline {
const base::Value* filter_description_list, const base::Value* filter_description_list,
int channels); int channels);
~MockPostProcessor() override; ~MockPostProcessor() override;
MOCK_METHOD4(ProcessFrames, MOCK_METHOD5(ProcessFrames,
double(float* data, double(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence)); bool is_silence));
MOCK_METHOD1(SetContentType, void(AudioContentType)); MOCK_METHOD1(SetContentType, void(AudioContentType));
bool SetOutputConfig(const AudioPostProcessor2::Config& config) override { bool SetOutputConfig(const AudioPostProcessor2::Config& config) override {
...@@ -54,6 +55,7 @@ class MockPostProcessor : public PostProcessingPipeline { ...@@ -54,6 +55,7 @@ class MockPostProcessor : public PostProcessingPipeline {
double DoProcessFrames(float* data, double DoProcessFrames(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence) { bool is_silence) {
output_buffer_ = data; output_buffer_ = data;
return static_cast<double>(rendering_delay_frames_) / sample_rate_; return static_cast<double>(rendering_delay_frames_) / sample_rate_;
......
...@@ -27,6 +27,7 @@ class PostProcessingPipeline { ...@@ -27,6 +27,7 @@ class PostProcessingPipeline {
virtual double ProcessFrames(float* data, virtual double ProcessFrames(float* data,
int num_frames, int num_frames,
float current_multiplier, float current_multiplier,
float target_volume,
bool is_silence) = 0; bool is_silence) = 0;
virtual float* GetOutputBuffer() = 0; virtual float* GetOutputBuffer() = 0;
virtual int NumOutputChannels() const = 0; virtual int NumOutputChannels() const = 0;
......
...@@ -119,6 +119,7 @@ PostProcessingPipelineImpl::~PostProcessingPipelineImpl() = default; ...@@ -119,6 +119,7 @@ PostProcessingPipelineImpl::~PostProcessingPipelineImpl() = default;
double PostProcessingPipelineImpl::ProcessFrames(float* data, double PostProcessingPipelineImpl::ProcessFrames(float* data,
int num_input_frames, int num_input_frames,
float current_multiplier, float current_multiplier,
float target_multiplier,
bool is_silence) { bool is_silence) {
DCHECK_GT(input_sample_rate_, 0); DCHECK_GT(input_sample_rate_, 0);
DCHECK(data); DCHECK(data);
...@@ -146,13 +147,14 @@ double PostProcessingPipelineImpl::ProcessFrames(float* data, ...@@ -146,13 +147,14 @@ double PostProcessingPipelineImpl::ProcessFrames(float* data,
silence_frames_processed_ = 0; silence_frames_processed_ = 0;
} }
UpdateCastVolume(current_multiplier); UpdateCastVolume(current_multiplier, target_multiplier);
AudioPostProcessor2::Metadata metadata = {current_dbfs_, target_dbfs_,
cast_volume_};
delay_s_ = 0; delay_s_ = 0;
for (auto& processor : processors_) { for (auto& processor : processors_) {
processor.ptr->ProcessFrames(output_buffer_, processor.ptr->ProcessFrames(output_buffer_,
processor.input_frames_per_write, cast_volume_, processor.input_frames_per_write, &metadata);
current_dbfs_);
const auto& status = processor.ptr->GetStatus(); const auto& status = processor.ptr->GetStatus();
delay_s_ += static_cast<double>(status.rendering_delay_frames) / delay_s_ += static_cast<double>(status.rendering_delay_frames) /
status.input_sample_rate; status.input_sample_rate;
...@@ -227,16 +229,23 @@ int PostProcessingPipelineImpl::GetRingingTimeInFrames() { ...@@ -227,16 +229,23 @@ int PostProcessingPipelineImpl::GetRingingTimeInFrames() {
return memory_frames; return memory_frames;
} }
void PostProcessingPipelineImpl::UpdateCastVolume(float multiplier) { void PostProcessingPipelineImpl::UpdateCastVolume(float multiplier,
float target) {
DCHECK_GE(multiplier, 0.0); DCHECK_GE(multiplier, 0.0);
if (multiplier == current_multiplier_) { if (multiplier != current_multiplier_) {
return; current_multiplier_ = multiplier;
current_dbfs_ =
(multiplier == 0.0f ? -200.0f : std::log10(multiplier) * 20);
DCHECK(chromecast::media::VolumeControl::DbFSToVolume);
cast_volume_ =
chromecast::media::VolumeControl::DbFSToVolume(current_dbfs_);
}
if (target != target_multiplier_) {
target_multiplier_ = target;
target_dbfs_ = (target == 0.0f ? -200.0f : std::log10(target) * 20);
} }
current_multiplier_ = multiplier;
current_dbfs_ = (multiplier == 0.0f ? -200.0f : std::log10(multiplier) * 20);
DCHECK(chromecast::media::VolumeControl::DbFSToVolume);
cast_volume_ = chromecast::media::VolumeControl::DbFSToVolume(current_dbfs_);
} }
// Send string |config| to postprocessor |name|. // Send string |config| to postprocessor |name|.
......
...@@ -36,6 +36,7 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline { ...@@ -36,6 +36,7 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline {
double ProcessFrames(float* data, double ProcessFrames(float* data,
int num_frames, int num_frames,
float current_volume, float current_volume,
float target_volume,
bool is_silence) override; bool is_silence) override;
float* GetOutputBuffer() override; float* GetOutputBuffer() override;
...@@ -61,7 +62,7 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline { ...@@ -61,7 +62,7 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline {
} PostProcessorInfo; } PostProcessorInfo;
int GetRingingTimeInFrames(); int GetRingingTimeInFrames();
void UpdateCastVolume(float multiplier); void UpdateCastVolume(float multiplier, float target);
std::string name_; std::string name_;
int input_sample_rate_ = 0; int input_sample_rate_ = 0;
...@@ -72,6 +73,8 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline { ...@@ -72,6 +73,8 @@ class PostProcessingPipelineImpl : public PostProcessingPipeline {
float current_multiplier_ = 0.0; float current_multiplier_ = 0.0;
float cast_volume_ = 0.0; float cast_volume_ = 0.0;
float current_dbfs_ = 0.0; float current_dbfs_ = 0.0;
float target_multiplier_ = 0.0;
float target_dbfs_ = 0.0;
int num_output_channels_ = 0; int num_output_channels_ = 0;
float* output_buffer_ = nullptr; float* output_buffer_ = nullptr;
AlignedBuffer<float> silence_buffer_; AlignedBuffer<float> silence_buffer_;
......
...@@ -57,16 +57,13 @@ const AudioPostProcessor2::Status& Governor::GetStatus() { ...@@ -57,16 +57,13 @@ const AudioPostProcessor2::Status& Governor::GetStatus() {
return status_; return status_;
} }
void Governor::ProcessFrames(float* data, void Governor::ProcessFrames(float* data, int frames, Metadata* metadata) {
int frames,
float volume,
float volume_dbfs) {
DCHECK(data); DCHECK(data);
status_.output_buffer = data; status_.output_buffer = data;
// If the volume has changed. // If the volume has changed.
if (!base::IsApproximatelyEqual(volume, volume_, kEpsilon)) { if (!base::IsApproximatelyEqual(metadata->system_volume, volume_, kEpsilon)) {
volume_ = volume; volume_ = metadata->system_volume;
slew_volume_.SetVolume(GetGovernorMultiplier()); slew_volume_.SetVolume(GetGovernorMultiplier());
} }
......
...@@ -33,10 +33,7 @@ class Governor : public AudioPostProcessor2 { ...@@ -33,10 +33,7 @@ class Governor : public AudioPostProcessor2 {
// AudioPostProcessor2 implementation: // AudioPostProcessor2 implementation:
bool SetConfig(const Config& config) override; bool SetConfig(const Config& config) override;
const Status& GetStatus() override; const Status& GetStatus() override;
void ProcessFrames(float* data, void ProcessFrames(float* data, int frames, Metadata* metadata) override;
int frames,
float cast_volume,
float volume_dbfs) override;
bool UpdateParameters(const std::string& message) override; bool UpdateParameters(const std::string& message) override;
void SetSlewTimeMsForTest(int slew_time_ms); void SetSlewTimeMsForTest(int slew_time_ms);
......
...@@ -62,7 +62,8 @@ class GovernorTest : public ::testing::TestWithParam<float> { ...@@ -62,7 +62,8 @@ class GovernorTest : public ::testing::TestWithParam<float> {
} }
void ProcessFrames(float volume) { void ProcessFrames(float volume) {
governor_->ProcessFrames(data_.data(), kNumFrames, volume, 0); AudioPostProcessor2::Metadata metadata = {0, 0, volume};
governor_->ProcessFrames(data_.data(), kNumFrames, &metadata);
} }
void CompareBuffers() { void CompareBuffers() {
......
...@@ -96,9 +96,10 @@ void TestDelay(AudioPostProcessor2* pp, ...@@ -96,9 +96,10 @@ void TestDelay(AudioPostProcessor2* pp,
AlignedBuffer<float> data_out(data_in.size() * resample_factor); AlignedBuffer<float> data_out(data_in.size() * resample_factor);
const int output_buf_size = kBufSizeFrames * resample_factor * const int output_buf_size = kBufSizeFrames * resample_factor *
status.output_channels * sizeof(data_out[0]); status.output_channels * sizeof(data_out[0]);
AudioPostProcessor2::Metadata metadata = {0, 0, 1.0};
for (int i = 0; i < input_size_frames; i += kBufSizeFrames) { for (int i = 0; i < input_size_frames; i += kBufSizeFrames) {
pp->ProcessFrames(&data_in[i * num_input_channels], kBufSizeFrames, 1.0, pp->ProcessFrames(&data_in[i * num_input_channels], kBufSizeFrames,
0.0); &metadata);
std::memcpy(&data_out[i * status.output_channels * resample_factor], std::memcpy(&data_out[i * status.output_channels * resample_factor],
status.output_buffer, output_buf_size); status.output_buffer, output_buf_size);
} }
...@@ -143,14 +144,15 @@ void TestRingingTime(AudioPostProcessor2* pp, ...@@ -143,14 +144,15 @@ void TestRingingTime(AudioPostProcessor2* pp,
const int kSinFreq = 2000; const int kSinFreq = 2000;
// Send a second of data to excite the filter. // Send a second of data to excite the filter.
AudioPostProcessor2::Metadata metadata = {0, 0, 1.0};
for (int i = 0; i < sample_rate; i += kNumFrames) { for (int i = 0; i < sample_rate; i += kNumFrames) {
AlignedBuffer<float> data = AlignedBuffer<float> data =
GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels); GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels);
pp->ProcessFrames(data.data(), kNumFrames, 1.0, 0.0); pp->ProcessFrames(data.data(), kNumFrames, &metadata);
} }
AlignedBuffer<float> data = AlignedBuffer<float> data =
GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels); GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels);
pp->ProcessFrames(data.data(), kNumFrames, 1.0, 0.0); pp->ProcessFrames(data.data(), kNumFrames, &metadata);
// Compute the amplitude of the last buffer // Compute the amplitude of the last buffer
ASSERT_NE(status.output_buffer, nullptr); ASSERT_NE(status.output_buffer, nullptr);
...@@ -166,13 +168,13 @@ void TestRingingTime(AudioPostProcessor2* pp, ...@@ -166,13 +168,13 @@ void TestRingingTime(AudioPostProcessor2* pp,
while (frames_remaining > 0) { while (frames_remaining > 0) {
frames_to_process = std::min(frames_to_process, frames_remaining); frames_to_process = std::min(frames_to_process, frames_remaining);
data.assign(frames_to_process * num_input_channels, 0); data.assign(frames_to_process * num_input_channels, 0);
pp->ProcessFrames(data.data(), frames_to_process, 1.0, 0.0); pp->ProcessFrames(data.data(), frames_to_process, &metadata);
frames_remaining -= frames_to_process; frames_remaining -= frames_to_process;
} }
// Send a little more data and ensure the amplitude is < 1% the original. // Send a little more data and ensure the amplitude is < 1% the original.
data.assign(kNumFrames * num_input_channels, 0); data.assign(kNumFrames * num_input_channels, 0);
pp->ProcessFrames(data.data(), kNumFrames, 1.0, 0.0); pp->ProcessFrames(data.data(), kNumFrames, &metadata);
// Only look at the amplitude of the first few frames. // Only look at the amplitude of the first few frames.
EXPECT_LE(SineAmplitude(status.output_buffer, 10 * status.output_channels) / EXPECT_LE(SineAmplitude(status.output_buffer, 10 * status.output_channels) /
...@@ -205,7 +207,8 @@ void TestPassthrough(AudioPostProcessor2* pp, ...@@ -205,7 +207,8 @@ void TestPassthrough(AudioPostProcessor2* pp,
GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels); GetSineData(kNumFrames, kSinFreq, sample_rate, num_input_channels);
AlignedBuffer<float> expected(data); AlignedBuffer<float> expected(data);
pp->ProcessFrames(data.data(), kNumFrames, 1.0, 0.0); AudioPostProcessor2::Metadata metadata = {0, 0, 1.0};
pp->ProcessFrames(data.data(), kNumFrames, &metadata);
int delayed_frames = 0; int delayed_frames = 0;
while (status.rendering_delay_frames >= delayed_frames + kNumFrames) { while (status.rendering_delay_frames >= delayed_frames + kNumFrames) {
...@@ -214,7 +217,7 @@ void TestPassthrough(AudioPostProcessor2* pp, ...@@ -214,7 +217,7 @@ void TestPassthrough(AudioPostProcessor2* pp,
EXPECT_EQ(0.0f, data[i]) << i; EXPECT_EQ(0.0f, data[i]) << i;
} }
data = expected; data = expected;
pp->ProcessFrames(data.data(), kNumFrames, 1.0, 0.0); pp->ProcessFrames(data.data(), kNumFrames, &metadata);
ASSERT_GE(status.rendering_delay_frames, delayed_frames); ASSERT_GE(status.rendering_delay_frames, delayed_frames);
} }
...@@ -241,8 +244,9 @@ void AudioProcessorBenchmark(AudioPostProcessor2* pp, ...@@ -241,8 +244,9 @@ void AudioProcessorBenchmark(AudioPostProcessor2* pp,
test_size_frames, std::vector<double>(num_input_channels, 0.0), test_size_frames, std::vector<double>(num_input_channels, 0.0),
std::vector<double>(num_input_channels, 1.0)); std::vector<double>(num_input_channels, 1.0));
clock_t start_clock = clock(); clock_t start_clock = clock();
AudioPostProcessor2::Metadata metadata = {0, 0, 1.0};
for (int i = 0; i < test_size_frames; i += kBufSizeFrames * kNumChannels) { for (int i = 0; i < test_size_frames; i += kBufSizeFrames * kNumChannels) {
pp->ProcessFrames(&data_in[i], kBufSizeFrames, 1.0, 0.0); pp->ProcessFrames(&data_in[i], kBufSizeFrames, &metadata);
} }
clock_t stop_clock = clock(); clock_t stop_clock = clock();
const ::testing::TestInfo* const test_info = const ::testing::TestInfo* const test_info =
......
...@@ -43,11 +43,10 @@ const AudioPostProcessor2::Status& AudioPostProcessorWrapper::GetStatus() { ...@@ -43,11 +43,10 @@ const AudioPostProcessor2::Status& AudioPostProcessorWrapper::GetStatus() {
void AudioPostProcessorWrapper::ProcessFrames(float* data, void AudioPostProcessorWrapper::ProcessFrames(float* data,
int frames, int frames,
float system_volume, Metadata* metadata) {
float volume_dbfs) {
status_.output_buffer = data; status_.output_buffer = data;
status_.rendering_delay_frames = status_.rendering_delay_frames = pp_->ProcessFrames(
pp_->ProcessFrames(data, frames, system_volume, volume_dbfs); data, frames, metadata->system_volume, metadata->volume_dbfs);
} }
bool AudioPostProcessorWrapper::UpdateParameters(const std::string& message) { bool AudioPostProcessorWrapper::UpdateParameters(const std::string& message) {
......
...@@ -40,10 +40,7 @@ class AudioPostProcessorWrapper : public AudioPostProcessor2 { ...@@ -40,10 +40,7 @@ class AudioPostProcessorWrapper : public AudioPostProcessor2 {
// AudioPostProcessor2 implementation: // AudioPostProcessor2 implementation:
bool SetConfig(const Config& config) override; bool SetConfig(const Config& config) override;
const Status& GetStatus() override; const Status& GetStatus() override;
void ProcessFrames(float* data, void ProcessFrames(float* data, int frames, Metadata* metadata) override;
int frames,
float system_volume,
float volume_dbfs) override;
bool UpdateParameters(const std::string& message) override; bool UpdateParameters(const std::string& message) override;
void SetContentType(AudioContentType content_type) override; void SetContentType(AudioContentType content_type) override;
void SetPlayoutChannel(int channel) override; void SetPlayoutChannel(int channel) override;
......
...@@ -50,15 +50,12 @@ const AudioPostProcessor2::Status& SaturatedGain::GetStatus() { ...@@ -50,15 +50,12 @@ const AudioPostProcessor2::Status& SaturatedGain::GetStatus() {
return status_; return status_;
} }
void SaturatedGain::ProcessFrames(float* data, void SaturatedGain::ProcessFrames(float* data, int frames, Metadata* metadata) {
int frames,
float volume,
float volume_dbfs) {
DCHECK(data); DCHECK(data);
status_.output_buffer = data; status_.output_buffer = data;
if (volume_dbfs != last_volume_dbfs_) { if (metadata->volume_dbfs != last_volume_dbfs_) {
last_volume_dbfs_ = volume_dbfs; last_volume_dbfs_ = metadata->volume_dbfs;
// Don't apply more gain than attenuation. // Don't apply more gain than attenuation.
float effective_gain = std::min(DbFsToScale(-last_volume_dbfs_), gain_); float effective_gain = std::min(DbFsToScale(-last_volume_dbfs_), gain_);
slew_volume_.SetVolume(effective_gain); slew_volume_.SetVolume(effective_gain);
......
...@@ -23,10 +23,7 @@ class SaturatedGain : public AudioPostProcessor2 { ...@@ -23,10 +23,7 @@ class SaturatedGain : public AudioPostProcessor2 {
// AudioPostProcessor implementation: // AudioPostProcessor implementation:
bool SetConfig(const Config& config) override; bool SetConfig(const Config& config) override;
const Status& GetStatus() override; const Status& GetStatus() override;
void ProcessFrames(float* data, void ProcessFrames(float* data, int frames, Metadata* metadata) override;
int frames,
float volume,
float volume_dbfs) override;
bool UpdateParameters(const std::string& message) override; bool UpdateParameters(const std::string& message) override;
private: private:
......
...@@ -55,7 +55,8 @@ TEST_P(PostProcessorTest, Gain) { ...@@ -55,7 +55,8 @@ TEST_P(PostProcessorTest, Gain) {
} }
float original_amplitude = float original_amplitude =
SineAmplitude(data.data(), kNumChannels * kNumFrames); SineAmplitude(data.data(), kNumChannels * kNumFrames);
pp->ProcessFrames(data.data(), kNumFrames, 1.0 /* doesn't matter */, -20.0); AudioPostProcessor2::Metadata metadata = {-20.0, -20.0, 1.0};
pp->ProcessFrames(data.data(), kNumFrames, &metadata);
EXPECT_FLOAT_EQ(original_amplitude * 10.0, EXPECT_FLOAT_EQ(original_amplitude * 10.0,
SineAmplitude(data.data(), kNumChannels * kNumFrames)) SineAmplitude(data.data(), kNumChannels * kNumFrames))
......
...@@ -1052,7 +1052,7 @@ TEST_F(StreamMixerTest, TwoUnscaledStreamsMixProperlyWithEdgeCases) { ...@@ -1052,7 +1052,7 @@ TEST_F(StreamMixerTest, TwoUnscaledStreamsMixProperlyWithEdgeCases) {
do { \ do { \
auto itr = map->find(name); \ auto itr = map->find(name); \
CHECK(itr != map->end()) << "Could not find processor for " << name; \ CHECK(itr != map->end()) << "Could not find processor for " << name; \
EXPECT_CALL(*(itr->second), ProcessFrames(_, frames, _, silence)) \ EXPECT_CALL(*(itr->second), ProcessFrames(_, frames, _, _, silence)) \
.Times(times); \ .Times(times); \
} while (0); } while (0);
......
...@@ -41,6 +41,18 @@ class AudioPostProcessor2 { ...@@ -41,6 +41,18 @@ class AudioPostProcessor2 {
int output_frames_per_write; int output_frames_per_write;
}; };
struct Metadata {
// The maximum volume multiplier applied to the current buffer, in dBFS.
float volume_dbfs;
// The (max) current target volume multiplier that we are slewing towards.
float target_volume_dbfs;
// The system volume applied to the stream (normalized to 0-1). Equivalent
// to DbFSToVolume(volume_dbfs).
float system_volume;
};
struct Status { struct Status {
int input_sample_rate = -1; int input_sample_rate = -1;
int output_channels = -1; int output_channels = -1;
...@@ -94,15 +106,7 @@ class AudioPostProcessor2 { ...@@ -94,15 +106,7 @@ class AudioPostProcessor2 {
// will take up equal or less space than the input data, ProcessFrames() // will take up equal or less space than the input data, ProcessFrames()
// should overwrite the input data and store a pointer to |data| in |Status|. // should overwrite the input data and store a pointer to |data| in |Status|.
// Otherwise, the Processor should allocate and own its own output buffer. // Otherwise, the Processor should allocate and own its own output buffer.
// |system_volume| is the Cast Volume applied to the stream virtual void ProcessFrames(float* data, int frames, Metadata* metadata) = 0;
// (normalized to 0-1). It is the same as the cast volume set via alsa.
// |volume_dbfs| is the actual attenuation in dBFS (-inf to 0), equivalent to
// VolumeMap::VolumeToDbFS(|volume|).
// AudioPostProcessor should assume that volume has already been applied.
virtual void ProcessFrames(float* data,
int frames,
float system_volume,
float volume_dbfs) = 0;
// Sends a message to the PostProcessor. Implementations are responsible // Sends a message to the PostProcessor. Implementations are responsible
// for the format and parsing of messages. // for the format and parsing of messages.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment