Commit 4d46c8d9 authored by bshaya's avatar bshaya Committed by Commit bot

[Chromecast] Complete PostProcessingPipeline changes

- Add mix & linearize AudioPostProcessor hooks.
- Use delay from AudioPostProcessor::ProcessFrames to update timing estimates.
- Allow configuring mixing multiple device_id's in a single AudioPostProcessor.
- Pass Cast Volume to PostProcessors (rather than raw multiplier).
- Add unittest for assignment of PostProcessors + delay accounting.

BUG=internal/36299959
TEST=cast_alsa_cma_backend_unittests

Change-Id: I5503f7de39d0ac502b8e861322162fee9aade8dd
Review-Url: https://codereview.chromium.org/2847673002
Cr-Commit-Position: refs/heads/master@{#468399}
parent 27ed7650
......@@ -21,6 +21,8 @@ shared_library("libcast_media_1.0_audio") {
sources = [
"cast_media_shlib.cc",
"media_codec_support_cast_audio.cc",
"post_processing_pipeline_impl.cc",
"post_processing_pipeline_impl.h",
]
deps = [
......@@ -28,6 +30,7 @@ shared_library("libcast_media_1.0_audio") {
"//base",
"//chromecast/base",
"//chromecast/public",
"//chromecast/public/media",
"//media",
]
}
......@@ -44,7 +47,6 @@ source_set("alsa_cma_backend") {
"filter_group.h",
"media_pipeline_backend_alsa.cc",
"media_pipeline_backend_alsa.h",
"post_processing_pipeline.cc",
"post_processing_pipeline.h",
"post_processing_pipeline_parser.cc",
"post_processing_pipeline_parser.h",
......
......@@ -7,24 +7,27 @@
#include <algorithm>
#include "base/memory/ptr_util.h"
#include "base/time/time.h"
#include "base/values.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
#include "media/base/audio_bus.h"
#include "media/base/vector_math.h"
namespace chromecast {
namespace media {
FilterGroup::FilterGroup(const std::unordered_set<std::string>& input_types,
AudioContentType content_type,
int num_channels,
const base::ListValue* filter_list)
: input_types_(input_types),
content_type_(content_type),
num_channels_(num_channels),
FilterGroup::FilterGroup(int num_channels,
const std::string& name,
const base::ListValue* filter_list,
const std::unordered_set<std::string>& device_ids,
const std::vector<FilterGroup*>& mixed_inputs)
: num_channels_(num_channels),
name_(name),
device_ids_(device_ids),
mixed_inputs_(mixed_inputs),
output_samples_per_second_(0),
channels_(num_channels_),
post_processing_pipeline_(
base::MakeUnique<PostProcessingPipeline>(filter_list,
num_channels_)) {}
PostProcessingPipeline::Create(name_, filter_list, num_channels_)) {}
FilterGroup::~FilterGroup() = default;
......@@ -34,27 +37,46 @@ void FilterGroup::Initialize(int output_samples_per_second) {
}
bool FilterGroup::CanProcessInput(StreamMixerAlsa::InputQueue* input) {
return !(input_types_.find(input->device_id()) == input_types_.end());
return !(device_ids_.find(input->device_id()) == device_ids_.end());
}
void FilterGroup::AddActiveInput(StreamMixerAlsa::InputQueue* input) {
active_inputs_.push_back(input);
}
std::vector<uint8_t>* FilterGroup::GetInterleaved() {
return &interleaved_;
}
bool FilterGroup::MixAndFilter(int chunk_size) {
float FilterGroup::MixAndFilter(int chunk_size) {
DCHECK_NE(output_samples_per_second_, 0);
if (active_inputs_.empty() && !post_processing_pipeline_->IsRinging()) {
return false; // Output will be silence, no need to mix.
}
ResizeBuffersIfNecessary(chunk_size);
mixed_->ZeroFramesPartial(0, chunk_size);
float volume = 0.0f;
// Recursively mix inputs.
for (auto* filter_group : mixed_inputs_) {
volume = std::max(volume, filter_group->MixAndFilter(chunk_size));
}
// |volume| can only be 0 if no |mixed_inputs_| have data.
// This is true because FilterGroup can only return 0 if:
// a) It has no data and its PostProcessorPipeline is not ringing.
// (early return, below) or
// b) The output volume is 0 and has NEVER been non-zero,
// since FilterGroup will use last_volume_ if volume is 0.
// In this case, there was never any data in the pipeline.
if (active_inputs_.empty() && volume == 0.0f &&
!post_processing_pipeline_->IsRinging()) {
if (frames_zeroed_ < chunk_size) {
// Ensure mixed_ is zeros. This is necessary if |mixed_| is read later.
mixed_->ZeroFramesPartial(0, chunk_size);
frames_zeroed_ = chunk_size;
}
return 0.0f; // Output will be silence, no need to mix.
}
frames_zeroed_ = 0;
// Mix InputQueues
mixed_->ZeroFramesPartial(0, chunk_size);
for (StreamMixerAlsa::InputQueue* input : active_inputs_) {
input->GetResampledData(temp_.get(), chunk_size);
for (int c = 0; c < num_channels_; ++c) {
......@@ -65,18 +87,37 @@ bool FilterGroup::MixAndFilter(int chunk_size) {
volume = std::max(volume, input->EffectiveVolume());
}
post_processing_pipeline_->ProcessFrames(channels_, chunk_size, volume,
active_inputs_.empty());
mixed_->ToInterleaved(chunk_size, BytesPerOutputFormatSample(),
interleaved_.data());
return true;
// Mix FilterGroups
for (FilterGroup* group : mixed_inputs_) {
if (group->last_volume() > 0.0f) {
for (int c = 0; c < num_channels_; ++c) {
::media::vector_math::FMAC(group->data()->channel(c), 1.0f, chunk_size,
channels_[c]);
}
}
}
bool is_silence = (volume == 0.0f);
// Allow paused streams to "ring out" at the last valid volume.
// If the stream volume is actually 0, this doesn't matter, since the
// data is 0's anyway.
if (!is_silence) {
last_volume_ = volume;
}
delay_frames_ = post_processing_pipeline_->ProcessFrames(
channels_, chunk_size, last_volume_, is_silence);
return last_volume_;
}
void FilterGroup::ClearInterleaved(int chunk_size) {
ResizeBuffersIfNecessary(chunk_size);
memset(interleaved_.data(), 0,
static_cast<size_t>(chunk_size) * num_channels_ *
BytesPerOutputFormatSample());
int64_t FilterGroup::GetRenderingDelayMicroseconds() {
return delay_frames_ * base::Time::kMicrosecondsPerSecond /
output_samples_per_second_;
}
void FilterGroup::ClearActiveInputs() {
active_inputs_.clear();
}
void FilterGroup::ResizeBuffersIfNecessary(int chunk_size) {
......@@ -89,26 +130,6 @@ void FilterGroup::ResizeBuffersIfNecessary(int chunk_size) {
if (!temp_ || temp_->frames() < chunk_size) {
temp_ = ::media::AudioBus::Create(num_channels_, chunk_size);
}
size_t interleaved_size = static_cast<size_t>(chunk_size) * num_channels_ *
BytesPerOutputFormatSample();
if (interleaved_.size() < interleaved_size) {
interleaved_.resize(interleaved_size);
}
}
int FilterGroup::BytesPerOutputFormatSample() {
return sizeof(int32_t);
}
void FilterGroup::ClearActiveInputs() {
active_inputs_.clear();
}
void FilterGroup::DisablePostProcessingForTest() {
post_processing_pipeline_ =
base::MakeUnique<PostProcessingPipeline>(nullptr, num_channels_);
}
} // namespace media
......
......@@ -26,25 +26,32 @@ namespace media {
class PostProcessingPipeline;
// FilterGroup contains state for an AudioFilter.
// It takes multiple StreamMixerAlsa::InputQueues,
// mixes them, and processes them.
// FilterGroup mixes StreamMixerAlsa::InputQueues and/or FilterGroups,
// mixes their outputs, and applies DSP to them.
// ActiveInputs are added with AddActiveInput(), then cleared when
// FilterGroups are added at construction. These cannot be removed.
// InputQueues are added with AddActiveInput(), then cleared when
// MixAndFilter() is called (they must be added each time data is queried).
class FilterGroup {
public:
// |input_types| is a set of strings that is used as a filter to determine
// if an input belongs to this group (InputQueue->name() must exactly match an
// entry in |input_types| to be processed by this group.
// |filter_type| is passed to AudioFilterFactory to create an AudioFilter.
FilterGroup(const std::unordered_set<std::string>& input_types,
AudioContentType content_type,
int channels,
const base::ListValue* filter_list);
~FilterGroup();
// |name| is used for debug printing
// |filter_list| is a list of {"processor": LIBRARY_NAME, "configs": CONFIG}
// that is used to create PostProcessingPipeline.
// |device_ids| is a set of strings that is used as a filter to determine
// if an InputQueue belongs to this group (InputQueue->name() must exactly
// match an entry in |device_ids| to be processed by this group).
// |mixed_inputs| are FilterGroups that will be mixed into this FilterGroup.
// ex: the final mix ("mix") FilterGroup mixes all other filter groups.
// FilterGroups currently use either InputQueues OR FilterGroups as inputs,
// but there is no technical limitation preventing mixing input classes.
FilterGroup(int num_channels,
const std::string& name,
const base::ListValue* filter_list,
const std::unordered_set<std::string>& device_ids,
const std::vector<FilterGroup*>& mixed_inputs);
AudioContentType content_type() const { return content_type_; }
~FilterGroup();
// Sets the sample rate of the post-processors.
void Initialize(int output_samples_per_second);
......@@ -55,40 +62,47 @@ class FilterGroup {
// Adds |input| to |active_inputs_|.
void AddActiveInput(StreamMixerAlsa::InputQueue* input);
// Retrieves a pointer to the output buffer |interleaved_|.
std::vector<uint8_t>* GetInterleaved();
// Mixes all active inputs and passes them through the audio filter.
bool MixAndFilter(int chunk_size);
// Returns the largest volume of all streams with data.
// return value will be zero IFF there is no data and
// the PostProcessingPipeline is not ringing.
float MixAndFilter(int chunk_size);
// Overwrites |interleaved_| with 0's, ensuring at least
// |chunk_size| bytes.
void ClearInterleaved(int chunk_size);
// Gets the current delay of this filter group's AudioPostProcessors.
// (Not recursive).
int64_t GetRenderingDelayMicroseconds();
// Clear all |active_inputs_|. This should be called before AddActiveInputs
// on each mixing iteration.
void ClearActiveInputs();
// Resets the PostProcessingPipeline, removing all AudioPostProcessors.
void DisablePostProcessingForTest();
// Retrieves a pointer to the output buffer.
::media::AudioBus* data() { return mixed_.get(); }
// Get the last used volume.
float last_volume() const { return last_volume_; }
std::string name() const { return name_; }
private:
void ResizeBuffersIfNecessary(int chunk_size);
int BytesPerOutputFormatSample();
const std::unordered_set<std::string> input_types_;
const AudioContentType content_type_;
const int num_channels_;
const std::string name_;
const std::unordered_set<std::string> device_ids_;
std::vector<FilterGroup*> mixed_inputs_;
std::vector<StreamMixerAlsa::InputQueue*> active_inputs_;
int output_samples_per_second_;
int frames_zeroed_ = 0;
float last_volume_ = 0.0f;
int64_t delay_frames_ = 0;
// Buffers that hold audio data while it is mixed.
// These are kept as members of this class to minimize copies and
// allocations.
std::unique_ptr<::media::AudioBus> temp_;
std::unique_ptr<::media::AudioBus> mixed_;
std::vector<uint8_t> interleaved_;
std::vector<float*> channels_;
std::unique_ptr<PostProcessingPipeline> post_processing_pipeline_;
......
......@@ -5,54 +5,30 @@
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_H_
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
namespace base {
class ListValue;
class ScopedNativeLibrary;
} // namespace base
namespace chromecast {
namespace media {
class AudioPostProcessor;
// Creates and contains multiple AudioPostProcessors, as specified in ctor.
// Provides convenience methods to access and use the AudioPostProcessors.
class PostProcessingPipeline {
public:
PostProcessingPipeline(const base::ListValue* filter_description_list,
int channels);
~PostProcessingPipeline();
int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence);
bool SetSampleRate(int sample_rate);
bool IsRinging();
private:
int GetRingingTimeInFrames();
int sample_rate_;
int ringing_time_in_frames_ = 0;
int silence_frames_processed_ = 0;
int total_delay_frames_ = 0;
// Contains all libraries in use;
// Functions in shared objects cannot be used once library is closed.
std::vector<std::unique_ptr<base::ScopedNativeLibrary>> libraries_;
// Must be after libraries_
std::vector<std::unique_ptr<AudioPostProcessor>> processors_;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipeline);
virtual ~PostProcessingPipeline() = default;
virtual int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) = 0;
virtual bool SetSampleRate(int sample_rate) = 0;
virtual bool IsRinging() = 0;
static std::unique_ptr<PostProcessingPipeline> Create(
const std::string& name,
const base::ListValue* filter_description_list,
int num_channels);
};
} // namespace media
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline_impl.h"
#include <string>
......@@ -12,6 +12,7 @@
#include "base/values.h"
#include "chromecast/base/serializers.h"
#include "chromecast/public/media/audio_post_processor_shlib.h"
#include "chromecast/public/volume_control.h"
namespace chromecast {
namespace media {
......@@ -20,15 +21,25 @@ namespace {
const int kNoSampleRate = -1;
const char kSoCreateFunction[] = "AudioPostProcessorShlib_Create";
const char kProcessorKey[] = "processor";
} // namespace
using CreatePostProcessor = AudioPostProcessor* (*)(const std::string&, int);
PostProcessingPipeline::PostProcessingPipeline(
std::unique_ptr<PostProcessingPipeline> PostProcessingPipeline::Create(
const std::string& name,
const base::ListValue* filter_description_list,
int num_channels) {
return base::MakeUnique<PostProcessingPipelineImpl>(
name, filter_description_list, num_channels);
}
PostProcessingPipelineImpl::PostProcessingPipelineImpl(
const std::string& name,
const base::ListValue* filter_description_list,
int channels)
: sample_rate_(kNoSampleRate) {
: name_(name), sample_rate_(kNoSampleRate) {
if (!filter_description_list) {
return; // Warning logged.
}
......@@ -37,7 +48,7 @@ PostProcessingPipeline::PostProcessingPipeline(
CHECK(
filter_description_list->GetDictionary(i, &processor_description_dict));
std::string library_path;
CHECK(processor_description_dict->GetString("processor", &library_path));
CHECK(processor_description_dict->GetString(kProcessorKey, &library_path));
if (library_path == "null" || library_path == "none") {
continue;
}
......@@ -62,12 +73,12 @@ PostProcessingPipeline::PostProcessingPipeline(
}
}
PostProcessingPipeline::~PostProcessingPipeline() = default;
PostProcessingPipelineImpl::~PostProcessingPipelineImpl() = default;
int PostProcessingPipeline::ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) {
int PostProcessingPipelineImpl::ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_multiplier,
bool is_silence) {
DCHECK_NE(sample_rate_, kNoSampleRate);
if (is_silence) {
if (!IsRinging()) {
......@@ -78,15 +89,17 @@ int PostProcessingPipeline::ProcessFrames(const std::vector<float*>& data,
silence_frames_processed_ = 0;
}
UpdateCastVolume(current_multiplier);
total_delay_frames_ = 0;
for (auto& processor : processors_) {
total_delay_frames_ +=
processor->ProcessFrames(data, num_frames, current_volume);
processor->ProcessFrames(data, num_frames, cast_volume_);
}
return total_delay_frames_;
}
bool PostProcessingPipeline::SetSampleRate(int sample_rate) {
bool PostProcessingPipelineImpl::SetSampleRate(int sample_rate) {
sample_rate_ = sample_rate;
bool result = true;
for (auto& processor : processors_) {
......@@ -97,11 +110,11 @@ bool PostProcessingPipeline::SetSampleRate(int sample_rate) {
return result;
}
bool PostProcessingPipeline::IsRinging() {
bool PostProcessingPipelineImpl::IsRinging() {
return silence_frames_processed_ < ringing_time_in_frames_;
}
int PostProcessingPipeline::GetRingingTimeInFrames() {
int PostProcessingPipelineImpl::GetRingingTimeInFrames() {
int memory_frames = 0;
for (auto& processor : processors_) {
memory_frames += processor->GetRingingTimeInFrames();
......@@ -109,5 +122,17 @@ int PostProcessingPipeline::GetRingingTimeInFrames() {
return memory_frames;
}
void PostProcessingPipelineImpl::UpdateCastVolume(float multiplier) {
DCHECK_GE(multiplier, 0.0);
if (multiplier == current_multiplier_) {
return;
}
current_multiplier_ = multiplier;
float dbfs = std::log10(multiplier) * 20;
DCHECK(chromecast::media::VolumeControl::DbFSToVolume);
cast_volume_ = chromecast::media::VolumeControl::DbFSToVolume(dbfs);
}
} // namespace media
} // namespace chromecast
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
namespace base {
class ListValue;
class ScopedNativeLibrary;
} // namespace base
namespace chromecast {
namespace media {
class AudioPostProcessor;
// Creates and contains multiple AudioPostProcessors, as specified in ctor.
// Provides convenience methods to access and use the AudioPostProcessors.
class PostProcessingPipelineImpl : public PostProcessingPipeline {
public:
PostProcessingPipelineImpl(const std::string& name,
const base::ListValue* filter_description_list,
int channels);
~PostProcessingPipelineImpl() override;
int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) override;
bool SetSampleRate(int sample_rate) override;
bool IsRinging() override;
private:
int GetRingingTimeInFrames();
void UpdateCastVolume(float multiplier);
std::string name_;
int sample_rate_;
int ringing_time_in_frames_ = 0;
int silence_frames_processed_ = 0;
int total_delay_frames_ = 0;
float current_multiplier_;
float cast_volume_;
// Contains all libraries in use;
// Functions in shared objects cannot be used once library is closed.
std::vector<std::unique_ptr<base::ScopedNativeLibrary>> libraries_;
// Must be after libraries_
std::vector<std::unique_ptr<AudioPostProcessor>> processors_;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipelineImpl);
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
......@@ -20,54 +20,103 @@ namespace {
const base::FilePath::CharType kPostProcessingPipelineFilePath[] =
FILE_PATH_LITERAL("/etc/cast_audio.json");
const char kMediaPipelineKey[] = "media";
const char kTtsPipelineKey[] = "tts";
// TODO(bshaya): Use AudioContentType instead.
std::string GetPipelineKey(const std::string& device_id) {
if (device_id == kTtsAudioDeviceId) {
return kTtsPipelineKey;
}
if (device_id == ::media::AudioDeviceDescription::kDefaultDeviceId) {
return kMediaPipelineKey;
}
NOTREACHED() << "Invalid device_id: " << device_id;
return "";
}
const char kOutputStreamsKey[] = "output_streams";
const char kMixPipelineKey[] = "mix";
const char kLinearizePipelineKey[] = "linearize";
const char kProcessorsKey[] = "processors";
const char kStreamsKey[] = "streams";
} // namespace
PostProcessingPipelineParser::PostProcessingPipelineParser() = default;
PostProcessingPipelineParser::~PostProcessingPipelineParser() = default;
StreamPipelineDescriptor::StreamPipelineDescriptor(
base::ListValue* pipeline_in,
const std::unordered_set<std::string>& stream_types_in)
: pipeline(pipeline_in), stream_types(stream_types_in) {}
StreamPipelineDescriptor::~StreamPipelineDescriptor() = default;
StreamPipelineDescriptor::StreamPipelineDescriptor(
const StreamPipelineDescriptor& other)
: StreamPipelineDescriptor(other.pipeline, other.stream_types) {}
void PostProcessingPipelineParser::Initialize() {
if (!base::PathExists(base::FilePath(kPostProcessingPipelineFilePath))) {
LOG(WARNING) << "No post-processing config found in "
PostProcessingPipelineParser::PostProcessingPipelineParser(
const std::string& json) {
if (json.empty() &&
!base::PathExists(base::FilePath(kPostProcessingPipelineFilePath))) {
LOG(WARNING) << "Could not open post-processing config in "
<< kPostProcessingPipelineFilePath << ".";
return;
}
config_dict_ = base::DictionaryValue::From(
DeserializeJsonFromFile(base::FilePath(kPostProcessingPipelineFilePath)));
CHECK(config_dict_->GetDictionary("post_processing", &pipeline_dict_))
<< "No \"post_processing\" object found in "
<< kPostProcessingPipelineFilePath;
if (json.empty()) {
config_dict_ = base::DictionaryValue::From(DeserializeJsonFromFile(
base::FilePath(kPostProcessingPipelineFilePath)));
} else {
config_dict_ = base::DictionaryValue::From(DeserializeFromJson(json));
}
CHECK(config_dict_) << "Invalid JSON in " << kPostProcessingPipelineFilePath;
}
base::ListValue* PostProcessingPipelineParser::GetPipelineByDeviceId(
const std::string& device_id) {
if (!pipeline_dict_) {
return nullptr;
PostProcessingPipelineParser::~PostProcessingPipelineParser() = default;
std::vector<StreamPipelineDescriptor>
PostProcessingPipelineParser::GetStreamPipelines() {
base::ListValue* pipelines_list;
std::vector<StreamPipelineDescriptor> descriptors;
if (!config_dict_ ||
!config_dict_->GetList(kOutputStreamsKey, &pipelines_list)) {
LOG(WARNING) << "No post-processors found for streams (key = "
<< kOutputStreamsKey
<< ").\n No stream-specific processing will occur.";
return descriptors;
}
for (size_t i = 0; i < pipelines_list->GetSize(); ++i) {
base::DictionaryValue* pipeline_description_dict;
CHECK(pipelines_list->GetDictionary(i, &pipeline_description_dict));
base::ListValue* out_list;
std::string key = GetPipelineKey(device_id);
if (!pipeline_dict_->GetList(key, &out_list)) {
base::ListValue* processors_list;
CHECK(pipeline_description_dict->GetList(kProcessorsKey, &processors_list));
base::ListValue* streams_list;
CHECK(pipeline_description_dict->GetList(kStreamsKey, &streams_list));
std::unordered_set<std::string> streams_set;
for (size_t stream = 0; stream < streams_list->GetSize(); ++stream) {
std::string stream_name;
CHECK(streams_list->GetString(stream, &stream_name));
CHECK(streams_set.insert(stream_name).second)
<< "Duplicate stream type: " << stream_name;
}
descriptors.emplace_back(processors_list, std::move(streams_set));
}
return descriptors;
}
std::string PostProcessingPipelineParser::GetFilePath() {
return kPostProcessingPipelineFilePath;
}
base::ListValue* PostProcessingPipelineParser::GetMixPipeline() {
return GetPipelineByKey(kMixPipelineKey);
}
base::ListValue* PostProcessingPipelineParser::GetLinearizePipeline() {
return GetPipelineByKey(kLinearizePipelineKey);
}
base::ListValue* PostProcessingPipelineParser::GetPipelineByKey(
const std::string& key) {
base::DictionaryValue* stream_dict;
if (!config_dict_ || !config_dict_->GetDictionary(key, &stream_dict)) {
LOG(WARNING) << "No post-processor description found for \"" << key
<< "\" in " << kPostProcessingPipelineFilePath
<< ". Using passthrough.";
return nullptr;
}
base::ListValue* out_list;
CHECK(stream_dict->GetList(kProcessorsKey, &out_list));
return out_list;
}
......
......@@ -7,6 +7,8 @@
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "base/macros.h"
......@@ -18,30 +20,47 @@ class ListValue;
namespace chromecast {
namespace media {
// Helper class to hold information about a stream pipeline.
struct StreamPipelineDescriptor {
// The format for pipeline is:
// [ {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// ... ]
base::ListValue* pipeline;
std::unordered_set<std::string> stream_types;
StreamPipelineDescriptor(
base::ListValue* pipeline_in,
const std::unordered_set<std::string>& stream_types_in);
~StreamPipelineDescriptor();
StreamPipelineDescriptor(const StreamPipelineDescriptor& other);
StreamPipelineDescriptor operator=(const StreamPipelineDescriptor& other) =
delete;
};
// Helper class to parse post-processing pipeline descriptor file.
class PostProcessingPipelineParser {
public:
PostProcessingPipelineParser();
// |json|, if provided, is used instead of reading from file.
// |json| should be provided in tests only.
explicit PostProcessingPipelineParser(const std::string& json = "");
~PostProcessingPipelineParser();
// Reads the pipeline descriptor file and does preliminary parsing.
// Crashes with fatal log if parsing fails.
void Initialize();
std::vector<StreamPipelineDescriptor> GetStreamPipelines();
// Gets the list of processors for a given stream type.
// The format will be:
// [
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// ...
// ]
base::ListValue* GetPipelineByDeviceId(const std::string& device_id);
// Gets the list of processors for the mix/linearize stages.
// Same format as StreamPipelineDescriptor.pipeline
base::ListValue* GetMixPipeline();
base::ListValue* GetLinearizePipeline();
static std::string GetFilePath();
private:
base::ListValue* GetPipelineByKey(const std::string& key);
std::unique_ptr<base::DictionaryValue> config_dict_;
base::DictionaryValue* pipeline_dict_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipelineParser);
};
......
......@@ -19,6 +19,7 @@
#include "base/single_thread_task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "chromecast/base/chromecast_switches.h"
#include "chromecast/media/base/audio_device_ids.h"
#include "chromecast/media/cma/backend/alsa/alsa_wrapper.h"
......@@ -129,10 +130,11 @@ int64_t TimespecToMicroseconds(struct timespec time) {
time.tv_nsec / 1000;
}
void VectorAccumulate(const int32_t* source, size_t size, int32_t* dest) {
for (size_t i = 0; i < size; ++i) {
dest[i] = base::SaturatedAddition(source[i], dest[i]);
}
bool IsOutputDeviceId(const std::string& device) {
return device == ::media::AudioDeviceDescription::kDefaultDeviceId ||
device == ::media::AudioDeviceDescription::kCommunicationsDeviceId ||
device == kLocalAudioDeviceId || device == kAlarmAudioDeviceId ||
device == kTtsAudioDeviceId;
}
class StreamMixerAlsaInstance : public StreamMixerAlsa {
......@@ -217,33 +219,78 @@ StreamMixerAlsa::StreamMixerAlsa()
// Read post-processing configuration file
PostProcessingPipelineParser pipeline_parser;
pipeline_parser.Initialize();
// Media filter group:
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
std::unordered_set<std::string>(
{::media::AudioDeviceDescription::kDefaultDeviceId,
kLocalAudioDeviceId, "", kAlarmAudioDeviceId}),
AudioContentType::kMedia, kNumOutputChannels,
pipeline_parser.GetPipelineByDeviceId(
::media::AudioDeviceDescription::kDefaultDeviceId)));
// Voice filter group:
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
std::unordered_set<std::string>(
{kTtsAudioDeviceId,
::media::AudioDeviceDescription::kCommunicationsDeviceId}),
AudioContentType::kCommunication, kNumOutputChannels,
pipeline_parser.GetPipelineByDeviceId(kTtsAudioDeviceId)));
CreatePostProcessors(&pipeline_parser);
// TODO(bshaya): Add support for final mix AudioPostProcessor.
DefineAlsaParameters();
}
void StreamMixerAlsa::CreatePostProcessors(
PostProcessingPipelineParser* pipeline_parser) {
std::unordered_set<std::string> used_streams;
for (auto& stream_pipeline : pipeline_parser->GetStreamPipelines()) {
const auto& device_ids = stream_pipeline.stream_types;
for (const std::string& stream_type : device_ids) {
CHECK(IsOutputDeviceId(stream_type))
<< stream_type << " is not a stream type. Stream types are listed "
<< "in chromecast/media/base/audio_device_ids.cc and "
<< "media/audio/audio_device_description.cc";
CHECK(used_streams.insert(stream_type).second)
<< "Multiple instances of stream type '" << stream_type << "' in "
<< pipeline_parser->GetFilePath() << ".";
}
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
kNumOutputChannels, *device_ids.begin() /* name */,
stream_pipeline.pipeline, device_ids,
std::vector<FilterGroup*>() /* mixed_inputs */));
if (device_ids.find(::media::AudioDeviceDescription::kDefaultDeviceId) !=
device_ids.end()) {
default_filter_ = filter_groups_.back().get();
}
}
// Always provide a default filter; OEM may only specify mix filter.
if (!default_filter_) {
std::string kDefaultDeviceId =
::media::AudioDeviceDescription::kDefaultDeviceId;
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
kNumOutputChannels, kDefaultDeviceId /* name */, nullptr,
std::unordered_set<std::string>({kDefaultDeviceId}),
std::vector<FilterGroup*>() /* mixed_inputs */));
default_filter_ = filter_groups_.back().get();
}
std::vector<FilterGroup*> filter_group_ptrs(filter_groups_.size());
std::transform(
filter_groups_.begin(), filter_groups_.end(), filter_group_ptrs.begin(),
[](const std::unique_ptr<FilterGroup>& group) { return group.get(); });
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
kNumOutputChannels, "mix", pipeline_parser->GetMixPipeline(),
std::unordered_set<std::string>() /* device_ids */, filter_group_ptrs));
mix_filter_ = filter_groups_.back().get();
filter_groups_.push_back(base::MakeUnique<FilterGroup>(
kNumOutputChannels, "linearize", pipeline_parser->GetLinearizePipeline(),
std::unordered_set<std::string>() /* device_ids */,
std::vector<FilterGroup*>({mix_filter_})));
linearize_filter_ = filter_groups_.back().get();
}
void StreamMixerAlsa::ResetTaskRunnerForTest() {
mixer_task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
void StreamMixerAlsa::ResetPostProcessorsForTest(
const std::string& pipeline_json) {
LOG(INFO) << __FUNCTION__ << " disregard previous PostProcessor messages.";
filter_groups_.clear();
default_filter_ = nullptr;
PostProcessingPipelineParser parser(pipeline_json);
CreatePostProcessors(&parser);
}
void StreamMixerAlsa::DefineAlsaParameters() {
// Get the ALSA output configuration from the command line.
alsa_buffer_size_ = GetSwitchValueNonNegativeInt(
......@@ -508,8 +555,8 @@ void StreamMixerAlsa::Start() {
RETURN_REPORT_ERROR(PcmPrepare, pcm_);
RETURN_REPORT_ERROR(PcmStatusMalloc, &pcm_status_);
rendering_delay_.timestamp_microseconds = kNoTimestamp;
rendering_delay_.delay_microseconds = 0;
alsa_rendering_delay_.timestamp_microseconds = kNoTimestamp;
alsa_rendering_delay_.delay_microseconds = 0;
state_ = kStateNormalPlayback;
}
......@@ -583,12 +630,6 @@ void StreamMixerAlsa::SetAlsaWrapperForTest(
alsa_ = std::move(alsa_wrapper);
}
void StreamMixerAlsa::DisablePostProcessingForTest() {
for (auto& filter : filter_groups_) {
filter->DisablePostProcessingForTest();
}
}
void StreamMixerAlsa::WriteFramesForTest() {
RUN_ON_MIXER_THREAD(&StreamMixerAlsa::WriteFramesForTest);
WriteFrames();
......@@ -633,16 +674,28 @@ void StreamMixerAlsa::AddInput(std::unique_ptr<InputQueue> input) {
// Fallthrough intended
case kStateNormalPlayback: {
bool found_filter_group = false;
input->Initialize(rendering_delay_);
input->Initialize(alsa_rendering_delay_);
for (auto&& filter_group : filter_groups_) {
if (filter_group->CanProcessInput(input.get())) {
found_filter_group = true;
input->set_filter_group(filter_group.get());
LOG(INFO) << "Added input of type " << input->device_id() << " to "
<< filter_group->name();
break;
}
}
DCHECK(found_filter_group) << "Could not find a filter group for "
<< input->device_id();
// Fallback to default_filter_ if provided
if (!found_filter_group && default_filter_) {
found_filter_group = true;
input->set_filter_group(default_filter_);
LOG(INFO) << "Added input of type " << input->device_id() << " to "
<< default_filter_->name();
}
CHECK(found_filter_group)
<< "Could not find a filter group for " << input->device_id() << "\n"
<< "(consider adding a 'default' processor)";
inputs_.push_back(std::move(input));
} break;
case kStateError:
......@@ -809,29 +862,10 @@ bool StreamMixerAlsa::TryWriteFrames() {
chunk_size = kPreventUnderrunChunkSize;
}
// Mix and filter each group.
std::vector<uint8_t>* interleaved = nullptr;
for (auto&& filter_group : filter_groups_) {
if (filter_group->MixAndFilter(chunk_size)) {
if (!interleaved) {
interleaved = filter_group->GetInterleaved();
} else {
DCHECK_EQ(4, BytesPerOutputFormatSample());
VectorAccumulate(
reinterpret_cast<int32_t*>(filter_group->GetInterleaved()->data()),
chunk_size * kNumOutputChannels,
reinterpret_cast<int32_t*>(interleaved->data()));
}
}
}
if (!interleaved) {
// No group has any data, write empty buffer.
filter_groups_[0]->ClearInterleaved(chunk_size);
interleaved = filter_groups_[0]->GetInterleaved();
}
// Recursively mix and filter each group.
linearize_filter_->MixAndFilter(chunk_size);
WriteMixedPcm(interleaved, chunk_size);
WriteMixedPcm(chunk_size);
return true;
}
......@@ -844,27 +878,40 @@ ssize_t StreamMixerAlsa::BytesPerOutputFormatSample() {
return alsa_->PcmFormatSize(pcm_format_, 1);
}
void StreamMixerAlsa::WriteMixedPcm(std::vector<uint8_t>* interleaved,
int frames) {
void StreamMixerAlsa::WriteMixedPcm(int frames) {
DCHECK(mixer_task_runner_->BelongsToCurrentThread());
CHECK_PCM_INITIALIZED();
DCHECK(interleaved);
DCHECK_GE(interleaved->size(), InterleavedSize(frames));
// Resize interleaved if necessary.
size_t interleaved_size = static_cast<size_t>(frames) * kNumOutputChannels *
BytesPerOutputFormatSample();
if (interleaved_.size() < interleaved_size) {
interleaved_.resize(interleaved_size);
}
// Get data for loopback.
mix_filter_->data()->ToInterleaved(frames, BytesPerOutputFormatSample(),
interleaved_.data());
int64_t expected_playback_time;
if (rendering_delay_.timestamp_microseconds == kNoTimestamp) {
if (alsa_rendering_delay_.timestamp_microseconds == kNoTimestamp) {
expected_playback_time = kNoTimestamp;
} else {
expected_playback_time = rendering_delay_.timestamp_microseconds +
rendering_delay_.delay_microseconds;
expected_playback_time = alsa_rendering_delay_.timestamp_microseconds +
alsa_rendering_delay_.delay_microseconds +
linearize_filter_->GetRenderingDelayMicroseconds();
}
for (CastMediaShlib::LoopbackAudioObserver* observer : loopback_observers_) {
observer->OnLoopbackAudio(expected_playback_time, kSampleFormatS32,
output_samples_per_second_, kNumOutputChannels,
interleaved->data(), InterleavedSize(frames));
interleaved_.data(), InterleavedSize(frames));
}
// Get data for playout.
linearize_filter_->data()->ToInterleaved(frames, BytesPerOutputFormatSample(),
interleaved_.data());
// If the PCM has been drained it will be in SND_PCM_STATE_SETUP and need
// to be prepared in order for playback to work.
if (alsa_->PcmState(pcm_) == SND_PCM_STATE_SETUP) {
......@@ -872,7 +919,7 @@ void StreamMixerAlsa::WriteMixedPcm(std::vector<uint8_t>* interleaved,
}
int frames_left = frames;
uint8_t* data = interleaved->data();
uint8_t* data = interleaved_.data();
while (frames_left) {
int frames_or_error;
while ((frames_or_error = alsa_->PcmWritei(pcm_, data, frames_left)) < 0) {
......@@ -887,8 +934,18 @@ void StreamMixerAlsa::WriteMixedPcm(std::vector<uint8_t>* interleaved,
data += frames_or_error * kNumOutputChannels * BytesPerOutputFormatSample();
}
UpdateRenderingDelay(frames);
for (auto&& input : inputs_)
input->AfterWriteFrames(rendering_delay_);
MediaPipelineBackendAlsa::RenderingDelay common_rendering_delay =
alsa_rendering_delay_;
common_rendering_delay.delay_microseconds +=
linearize_filter_->GetRenderingDelayMicroseconds() +
mix_filter_->GetRenderingDelayMicroseconds();
for (auto&& input : inputs_) {
MediaPipelineBackendAlsa::RenderingDelay stream_rendering_delay =
common_rendering_delay;
stream_rendering_delay.delay_microseconds +=
input->filter_group()->GetRenderingDelayMicroseconds();
input->AfterWriteFrames(stream_rendering_delay);
}
}
void StreamMixerAlsa::UpdateRenderingDelay(int newly_pushed_frames) {
......@@ -898,19 +955,19 @@ void StreamMixerAlsa::UpdateRenderingDelay(int newly_pushed_frames) {
// TODO(bshaya): Add rendering delay from post-processors.
if (alsa_->PcmStatus(pcm_, pcm_status_) != 0 ||
alsa_->PcmStatusGetState(pcm_status_) != SND_PCM_STATE_RUNNING) {
rendering_delay_.timestamp_microseconds = kNoTimestamp;
rendering_delay_.delay_microseconds = 0;
alsa_rendering_delay_.timestamp_microseconds = kNoTimestamp;
alsa_rendering_delay_.delay_microseconds = 0;
return;
}
snd_htimestamp_t status_timestamp = {};
alsa_->PcmStatusGetHtstamp(pcm_status_, &status_timestamp);
rendering_delay_.timestamp_microseconds =
alsa_rendering_delay_.timestamp_microseconds =
TimespecToMicroseconds(status_timestamp);
snd_pcm_sframes_t delay_frames = alsa_->PcmStatusGetDelay(pcm_status_);
rendering_delay_.delay_microseconds = static_cast<int64_t>(delay_frames) *
base::Time::kMicrosecondsPerSecond /
output_samples_per_second_;
alsa_rendering_delay_.delay_microseconds =
static_cast<int64_t>(delay_frames) * base::Time::kMicrosecondsPerSecond /
output_samples_per_second_;
}
void StreamMixerAlsa::AddLoopbackAudioObserver(
......
......@@ -30,6 +30,7 @@ namespace chromecast {
namespace media {
class AlsaWrapper;
class FilterGroup;
class PostProcessingPipelineParser;
// Mixer implementation. The mixer has one or more input queues; these can be
// added/removed at any time. When an input source pushes frames to an input
......@@ -175,8 +176,8 @@ class StreamMixerAlsa {
// mixer thread.
void OnFramesQueued();
void ResetPostProcessorsForTest(const std::string& pipeline_json);
void SetAlsaWrapperForTest(std::unique_ptr<AlsaWrapper> alsa_wrapper);
void DisablePostProcessingForTest();
void WriteFramesForTest(); // Can be called on any thread.
void ClearInputsForTest(); // Removes all inputs.
......@@ -213,6 +214,7 @@ class StreamMixerAlsa {
void FinalizeOnMixerThread();
void FinishFinalize();
void CreatePostProcessors(PostProcessingPipelineParser* pipeline_parser);
// Reads the buffer size, period size, start threshold, and avail min value
// from the provided command line flags or uses default values if no flags are
// provided.
......@@ -242,7 +244,7 @@ class StreamMixerAlsa {
void WriteFrames();
bool TryWriteFrames();
void WriteMixedPcm(std::vector<uint8_t>* interleaved, int frames);
void WriteMixedPcm(int frames);
void UpdateRenderingDelay(int newly_pushed_frames);
size_t InterleavedSize(int frames);
ssize_t BytesPerOutputFormatSample();
......@@ -275,7 +277,7 @@ class StreamMixerAlsa {
std::vector<std::unique_ptr<InputQueue>> inputs_;
std::vector<std::unique_ptr<InputQueue>> ignored_inputs_;
MediaPipelineBackendAlsa::RenderingDelay rendering_delay_;
MediaPipelineBackendAlsa::RenderingDelay alsa_rendering_delay_;
std::unique_ptr<base::Timer> retry_write_frames_timer_;
......@@ -283,6 +285,11 @@ class StreamMixerAlsa {
std::unique_ptr<base::Timer> check_close_timer_;
std::vector<std::unique_ptr<FilterGroup>> filter_groups_;
FilterGroup* default_filter_;
FilterGroup* mix_filter_;
FilterGroup* linearize_filter_;
std::vector<uint8_t> interleaved_;
std::vector<CastMediaShlib::LoopbackAudioObserver*> loopback_observers_;
std::map<AudioContentType, VolumeInfo> volume_info_;
......
......@@ -7,14 +7,18 @@
#include <algorithm>
#include <cmath>
#include <limits>
#include <unordered_map>
#include <utility>
#include "base/memory/ptr_util.h"
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/values.h"
#include "chromecast/media/cma/backend/alsa/mock_alsa_wrapper.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
#include "media/audio/audio_device_description.h"
#include "media/base/audio_bus.h"
#include "media/base/vector_math.h"
......@@ -120,6 +124,51 @@ const int32_t kTestData[NUM_DATA_SETS][NUM_SAMPLES] = {
}
};
// Compensate for integer arithmatic errors.
const int kMaxDelayErrorUs = 2;
const char kDelayModuleSolib[] = "delay.so";
// Should match # of "processors" blocks below.
const int kNumPostProcessors = 5;
const char kTestPipelineJsonTemplate[] = R"json(
{
"output_streams": [{
"streams": [ "default" ],
"processors": [{
"processor": "%s",
"config": { "delay": %d }
}]
}, {
"streams": [ "assistant-tts" ],
"processors": [{
"processor": "%s",
"config": { "delay": %d }
}]
}, {
"streams": [ "communications" ],
"processors": []
}],
"mix": {
"processors": [{
"processor": "%s",
"config": { "delay": %d }
}]
},
"linearize": {
"processors": [{
"processor": "%s",
"config": { "delay": %d }
}]
}
}
)json";
const int kDefaultProcessorDelay = 10;
const int kTtsProcessorDelay = 100;
const int kMixProcessorDelay = 1000;
const int kLinearizeProcessorDelay = 10000;
// Return a scoped pointer filled with the data laid out at |index| above.
std::unique_ptr<::media::AudioBus> GetTestData(size_t index) {
CHECK_LT(index, NUM_DATA_SETS);
......@@ -131,9 +180,9 @@ std::unique_ptr<::media::AudioBus> GetTestData(size_t index) {
class MockInputQueue : public StreamMixerAlsa::InputQueue {
public:
explicit MockInputQueue(int samples_per_second,
const std::string& device_id =
::media::AudioDeviceDescription::kDefaultDeviceId)
MockInputQueue(int samples_per_second,
const std::string& device_id =
::media::AudioDeviceDescription::kDefaultDeviceId)
: paused_(true),
samples_per_second_(samples_per_second),
max_read_size_(kTestMaxReadSize),
......@@ -241,6 +290,75 @@ class MockInputQueue : public StreamMixerAlsa::InputQueue {
DISALLOW_COPY_AND_ASSIGN(MockInputQueue);
};
class MockPostProcessor : public PostProcessingPipeline {
public:
MockPostProcessor(const std::string& name,
const base::ListValue* filter_description_list,
int channels)
: name_(name) {
CHECK(instances_.insert({name_, this}).second);
if (!filter_description_list) {
// This happens for PostProcessingPipeline with no post-processors.
return;
}
// Parse |filter_description_list| for parameters.
for (size_t i = 0; i < filter_description_list->GetSize(); ++i) {
const base::DictionaryValue* description_dict;
CHECK(filter_description_list->GetDictionary(i, &description_dict));
std::string solib;
CHECK(description_dict->GetString("processor", &solib));
// This will initially be called with the actual pipeline on creation.
// Ignore and wait for the call to ResetPostProcessorsForTest.
if (solib == kDelayModuleSolib) {
const base::DictionaryValue* processor_config_dict;
CHECK(
description_dict->GetDictionary("config", &processor_config_dict));
int module_delay;
CHECK(processor_config_dict->GetInteger("delay", &module_delay));
rendering_delay_ += module_delay;
processor_config_dict->GetBoolean("ringing", &ringing_);
}
}
ON_CALL(*this, ProcessFrames(_, _, _, _))
.WillByDefault(
testing::Invoke(this, &MockPostProcessor::DoProcessFrames));
}
~MockPostProcessor() override { instances_.erase(name_); }
MOCK_METHOD4(ProcessFrames,
int(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence));
bool SetSampleRate(int sample_rate) override { return false; }
bool IsRinging() override { return ringing_; }
int delay() { return rendering_delay_; }
std::string name() const { return name_; }
static std::unordered_map<std::string, MockPostProcessor*>* instances() {
return &instances_;
}
private:
int DoProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_sience) {
return rendering_delay_;
}
static std::unordered_map<std::string, MockPostProcessor*> instances_;
std::string name_;
int rendering_delay_ = 0;
bool ringing_ = false;
DISALLOW_COPY_AND_ASSIGN(MockPostProcessor);
};
std::unordered_map<std::string, MockPostProcessor*>
MockPostProcessor::instances_;
// Given |inputs|, returns mixed audio data according to the mixing method used
// by the mixer.
std::unique_ptr<::media::AudioBus> GetMixedAudioData(
......@@ -297,15 +415,45 @@ void CompareAudioData(const ::media::AudioBus& expected,
}
}
// Check that MediaPipelineBackendAlsa::RenderingDelay.delay_microseconds is
// within kMaxDelayErrorUs of |delay|
MATCHER_P2(MatchDelay, delay, id, "") {
bool result = std::abs(arg.delay_microseconds - delay) < kMaxDelayErrorUs;
if (!result) {
LOG(ERROR) << "Expected delay_microseconds for " << id << " to be " << delay
<< " but got " << arg.delay_microseconds;
}
return result;
}
// Convert a number of frames at kTestSamplesPerSecond to microseconds
int64_t FramesToDelayUs(int64_t frames) {
return frames * base::Time::kMicrosecondsPerSecond / kTestSamplesPerSecond;
}
} // namespace
std::unique_ptr<PostProcessingPipeline> PostProcessingPipeline::Create(
const std::string& name,
const base::ListValue* filter_description_list,
int channels) {
return base::MakeUnique<testing::NiceMock<MockPostProcessor>>(
name, filter_description_list, channels);
}
class StreamMixerAlsaTest : public testing::Test {
protected:
StreamMixerAlsaTest()
: message_loop_(new base::MessageLoop()),
mock_alsa_(new testing::NiceMock<MockAlsaWrapper>()) {
StreamMixerAlsa::MakeSingleThreadedForTest();
StreamMixerAlsa::Get()->DisablePostProcessingForTest();
std::string test_pipeline_json = base::StringPrintf(
kTestPipelineJsonTemplate, kDelayModuleSolib, kDefaultProcessorDelay,
kDelayModuleSolib, kTtsProcessorDelay, kDelayModuleSolib,
kMixProcessorDelay, kDelayModuleSolib, kLinearizeProcessorDelay);
StreamMixerAlsa::Get()->ResetPostProcessorsForTest(test_pipeline_json);
CHECK_EQ(MockPostProcessor::instances()->size(),
static_cast<size_t>(kNumPostProcessors));
StreamMixerAlsa::Get()->SetAlsaWrapperForTest(base::WrapUnique(mock_alsa_));
}
......@@ -803,5 +951,232 @@ TEST_F(StreamMixerAlsaTest, StuckStreamWithLowBuffer) {
mixer->WriteFramesForTest();
}
#define EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(map, name, times, frames, \
silence) \
do { \
auto itr = map->find(name); \
CHECK(itr != map->end()) << "Could not find processor for " << name; \
EXPECT_CALL(*(itr->second), ProcessFrames(_, frames, _, silence)) \
.Times(times); \
} while (0);
TEST_F(StreamMixerAlsaTest, PostProcessorDelayListedDeviceId) {
int common_delay = kMixProcessorDelay + kLinearizeProcessorDelay;
std::vector<testing::StrictMock<MockInputQueue>*> inputs;
std::vector<int64_t> delays;
inputs.push_back(new testing::StrictMock<MockInputQueue>(
kTestSamplesPerSecond, "default"));
delays.push_back(common_delay + kDefaultProcessorDelay);
inputs.push_back(new testing::StrictMock<MockInputQueue>(
kTestSamplesPerSecond, "communications"));
delays.push_back(common_delay);
inputs.push_back(new testing::StrictMock<MockInputQueue>(
kTestSamplesPerSecond, "assistant-tts"));
delays.push_back(common_delay + kTtsProcessorDelay);
// Convert delay from frames to microseconds.
std::transform(delays.begin(), delays.end(), delays.begin(),
&FramesToDelayUs);
const int kNumFrames = 10;
for (auto* input : inputs) {
input->SetMaxReadSize(kNumFrames);
input->SetPaused(false);
}
StreamMixerAlsa* mixer = StreamMixerAlsa::Get();
for (size_t i = 0; i < inputs.size(); ++i) {
EXPECT_CALL(*inputs[i], Initialize(_)).Times(1);
mixer->AddInput(base::WrapUnique(inputs[i]));
}
mock_alsa()->set_avail(4086);
auto* post_processors = MockPostProcessor::instances();
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "default", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "mix", 1, kNumFrames,
false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "linearize", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "communications", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "assistant-tts", 1,
kNumFrames, false);
// Poll the inputs for data. Each input will get a different
// rendering delay based on their device type.
for (size_t i = 0; i < inputs.size(); ++i) {
EXPECT_CALL(*inputs[i], GetResampledData(_, kNumFrames));
EXPECT_CALL(*inputs[i], VolumeScaleAccumulate(_, _, kNumFrames, _))
.Times(kNumChannels);
EXPECT_CALL(*inputs[i], AfterWriteFrames(
MatchDelay(delays[i], inputs[i]->device_id())));
}
mixer->WriteFramesForTest();
}
TEST_F(StreamMixerAlsaTest, PostProcessorDelayUnlistedDevice) {
const std::string device_id = "not-a-device-id";
testing::StrictMock<MockInputQueue>* input =
new testing::StrictMock<MockInputQueue>(kTestSamplesPerSecond, device_id);
// Delay should be based on default processor
int64_t delay = FramesToDelayUs(
kDefaultProcessorDelay + kLinearizeProcessorDelay + kMixProcessorDelay);
const int kNumFrames = 10;
input->SetMaxReadSize(kNumFrames);
input->SetPaused(false);
auto* post_processors = MockPostProcessor::instances();
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "default", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "mix", 1, kNumFrames,
false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "linearize", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "communications", 0,
_, _);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "assistant-tts", 0,
_, _);
StreamMixerAlsa* mixer = StreamMixerAlsa::Get();
EXPECT_CALL(*input, Initialize(_));
mixer->AddInput(base::WrapUnique(input));
EXPECT_CALL(*input, GetResampledData(_, kNumFrames));
EXPECT_CALL(*input, VolumeScaleAccumulate(_, _, kNumFrames, _))
.Times(kNumChannels);
EXPECT_CALL(*input, AfterWriteFrames(MatchDelay(delay, device_id)));
mixer->WriteFramesForTest();
}
TEST_F(StreamMixerAlsaTest, PostProcessorRingingWithoutInput) {
const char kTestPipelineJson[] = R"json(
{
"output_streams": [{
"streams": [ "default" ],
"processors": [{
"processor": "%s",
"config": { "delay": 0, "ringing": true}
}]
}, {
"streams": [ "assistant-tts" ],
"processors": [{
"processor": "%s",
"config": { "delay": 0, "ringing": true}
}]
}]
}
)json";
const int kNumFrames = 32;
testing::NiceMock<MockInputQueue>* input =
new testing::NiceMock<MockInputQueue>(kTestSamplesPerSecond, "default");
input->SetMaxReadSize(kNumFrames);
input->SetPaused(false);
StreamMixerAlsa* mixer = StreamMixerAlsa::Get();
std::string test_pipeline_json = base::StringPrintf(
kTestPipelineJson, kDelayModuleSolib, kDelayModuleSolib);
mixer->ResetPostProcessorsForTest(test_pipeline_json);
mixer->AddInput(base::WrapUnique(input));
// "mix" + "linearize" should be automatic
CHECK_EQ(MockPostProcessor::instances()->size(), 4u);
mock_alsa()->set_avail(4086);
auto* post_processors = MockPostProcessor::instances();
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "default", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "mix", 1, kNumFrames,
false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "linearize", 1,
kNumFrames, false);
EXPECT_POSTPROCESSOR_CALL_PROCESSFRAMES(post_processors, "assistant-tts", 1,
kNumFrames, true);
mixer->WriteFramesForTest();
}
TEST_F(StreamMixerAlsaTest, PostProcessorProvidesDefaultPipeline) {
StreamMixerAlsa* mixer = StreamMixerAlsa::Get();
mixer->ResetPostProcessorsForTest("{}");
auto* instances = MockPostProcessor::instances();
CHECK(instances->find("default") != instances->end());
CHECK(instances->find("mix") != instances->end());
CHECK(instances->find("linearize") != instances->end());
CHECK_EQ(MockPostProcessor::instances()->size(), 3u);
}
TEST_F(StreamMixerAlsaTest, InvalidStreamTypeCrashes) {
const char json[] = R"json(
{
"output_streams": [{
"streams": [ "foobar" ],
"processors": [{
"processor": "dont_care.so",
"config": { "delay": 0 }
}]
}]
}
)json";
EXPECT_DEATH(StreamMixerAlsa::Get()->ResetPostProcessorsForTest(json),
"foobar is not a stream type");
}
TEST_F(StreamMixerAlsaTest, BadJsonCrashes) {
const std::string json("{{");
EXPECT_DEATH(StreamMixerAlsa::Get()->ResetPostProcessorsForTest(json),
"Invalid JSON");
}
TEST_F(StreamMixerAlsaTest, MultiplePostProcessorsInOneStream) {
const char kJsonTemplate[] = R"json(
{
"output_streams": [{
"streams": [ "default" ],
"processors": [{
"processor": "%s",
"config": { "delay": 10 }
}, {
"processor": "%s",
"config": { "delay": 100 }
}]
}],
"mix": {
"processors": [{
"processor": "%s",
"config": { "delay": 1000 }
}, {
"processor": "%s",
"config": { "delay": 10000 }
}]
}
}
)json";
std::string json =
base::StringPrintf(kJsonTemplate, kDelayModuleSolib, kDelayModuleSolib,
kDelayModuleSolib, kDelayModuleSolib);
StreamMixerAlsa* mixer = StreamMixerAlsa::Get();
mixer->ResetPostProcessorsForTest(json);
// "mix" + "linearize" + "default"
CHECK_EQ(MockPostProcessor::instances()->size(), 3u);
auto* post_processors = MockPostProcessor::instances();
CHECK_EQ(post_processors->find("default")->second->delay(), 110);
CHECK_EQ(post_processors->find("mix")->second->delay(), 11000);
CHECK_EQ(post_processors->find("linearize")->second->delay(), 0);
mixer->WriteFramesForTest();
}
} // namespace media
} // namespace chromecast
......@@ -45,10 +45,8 @@ class AudioPostProcessor {
// always non-zero and less than or equal to 20ms of audio.
// AudioPostProcessor must always provide |frames| frames of data back
// (may output 0’s)
// |volume| is the attenuation level (multiplier) of the stream.
// |volume| is between 0 and 1 inclusive.
// |volume| is the Cast Volume applied to the stream (normalized to 0-1)
// AudioPostProcessor should assume that it has already been applied.
// TODO(bshaya): Change |volume| to Cast System Volume.
// Returns the current rendering delay of the filter in frames,
// or negative if an error occurred during processing.
// If an error occurred during processing, |data| should be unchanged.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment