Commit 4d46c8d9 authored by bshaya's avatar bshaya Committed by Commit bot

[Chromecast] Complete PostProcessingPipeline changes

- Add mix & linearize AudioPostProcessor hooks.
- Use delay from AudioPostProcessor::ProcessFrames to update timing estimates.
- Allow configuring mixing multiple device_id's in a single AudioPostProcessor.
- Pass Cast Volume to PostProcessors (rather than raw multiplier).
- Add unittest for assignment of PostProcessors + delay accounting.

BUG=internal/36299959
TEST=cast_alsa_cma_backend_unittests

Change-Id: I5503f7de39d0ac502b8e861322162fee9aade8dd
Review-Url: https://codereview.chromium.org/2847673002
Cr-Commit-Position: refs/heads/master@{#468399}
parent 27ed7650
......@@ -21,6 +21,8 @@ shared_library("libcast_media_1.0_audio") {
sources = [
"cast_media_shlib.cc",
"media_codec_support_cast_audio.cc",
"post_processing_pipeline_impl.cc",
"post_processing_pipeline_impl.h",
]
deps = [
......@@ -28,6 +30,7 @@ shared_library("libcast_media_1.0_audio") {
"//base",
"//chromecast/base",
"//chromecast/public",
"//chromecast/public/media",
"//media",
]
}
......@@ -44,7 +47,6 @@ source_set("alsa_cma_backend") {
"filter_group.h",
"media_pipeline_backend_alsa.cc",
"media_pipeline_backend_alsa.h",
"post_processing_pipeline.cc",
"post_processing_pipeline.h",
"post_processing_pipeline_parser.cc",
"post_processing_pipeline_parser.h",
......
......@@ -7,24 +7,27 @@
#include <algorithm>
#include "base/memory/ptr_util.h"
#include "base/time/time.h"
#include "base/values.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
#include "media/base/audio_bus.h"
#include "media/base/vector_math.h"
namespace chromecast {
namespace media {
FilterGroup::FilterGroup(const std::unordered_set<std::string>& input_types,
AudioContentType content_type,
int num_channels,
const base::ListValue* filter_list)
: input_types_(input_types),
content_type_(content_type),
num_channels_(num_channels),
FilterGroup::FilterGroup(int num_channels,
const std::string& name,
const base::ListValue* filter_list,
const std::unordered_set<std::string>& device_ids,
const std::vector<FilterGroup*>& mixed_inputs)
: num_channels_(num_channels),
name_(name),
device_ids_(device_ids),
mixed_inputs_(mixed_inputs),
output_samples_per_second_(0),
channels_(num_channels_),
post_processing_pipeline_(
base::MakeUnique<PostProcessingPipeline>(filter_list,
num_channels_)) {}
PostProcessingPipeline::Create(name_, filter_list, num_channels_)) {}
FilterGroup::~FilterGroup() = default;
......@@ -34,27 +37,46 @@ void FilterGroup::Initialize(int output_samples_per_second) {
}
bool FilterGroup::CanProcessInput(StreamMixerAlsa::InputQueue* input) {
return !(input_types_.find(input->device_id()) == input_types_.end());
return !(device_ids_.find(input->device_id()) == device_ids_.end());
}
void FilterGroup::AddActiveInput(StreamMixerAlsa::InputQueue* input) {
active_inputs_.push_back(input);
}
std::vector<uint8_t>* FilterGroup::GetInterleaved() {
return &interleaved_;
}
bool FilterGroup::MixAndFilter(int chunk_size) {
float FilterGroup::MixAndFilter(int chunk_size) {
DCHECK_NE(output_samples_per_second_, 0);
if (active_inputs_.empty() && !post_processing_pipeline_->IsRinging()) {
return false; // Output will be silence, no need to mix.
}
ResizeBuffersIfNecessary(chunk_size);
mixed_->ZeroFramesPartial(0, chunk_size);
float volume = 0.0f;
// Recursively mix inputs.
for (auto* filter_group : mixed_inputs_) {
volume = std::max(volume, filter_group->MixAndFilter(chunk_size));
}
// |volume| can only be 0 if no |mixed_inputs_| have data.
// This is true because FilterGroup can only return 0 if:
// a) It has no data and its PostProcessorPipeline is not ringing.
// (early return, below) or
// b) The output volume is 0 and has NEVER been non-zero,
// since FilterGroup will use last_volume_ if volume is 0.
// In this case, there was never any data in the pipeline.
if (active_inputs_.empty() && volume == 0.0f &&
!post_processing_pipeline_->IsRinging()) {
if (frames_zeroed_ < chunk_size) {
// Ensure mixed_ is zeros. This is necessary if |mixed_| is read later.
mixed_->ZeroFramesPartial(0, chunk_size);
frames_zeroed_ = chunk_size;
}
return 0.0f; // Output will be silence, no need to mix.
}
frames_zeroed_ = 0;
// Mix InputQueues
mixed_->ZeroFramesPartial(0, chunk_size);
for (StreamMixerAlsa::InputQueue* input : active_inputs_) {
input->GetResampledData(temp_.get(), chunk_size);
for (int c = 0; c < num_channels_; ++c) {
......@@ -65,18 +87,37 @@ bool FilterGroup::MixAndFilter(int chunk_size) {
volume = std::max(volume, input->EffectiveVolume());
}
post_processing_pipeline_->ProcessFrames(channels_, chunk_size, volume,
active_inputs_.empty());
mixed_->ToInterleaved(chunk_size, BytesPerOutputFormatSample(),
interleaved_.data());
return true;
// Mix FilterGroups
for (FilterGroup* group : mixed_inputs_) {
if (group->last_volume() > 0.0f) {
for (int c = 0; c < num_channels_; ++c) {
::media::vector_math::FMAC(group->data()->channel(c), 1.0f, chunk_size,
channels_[c]);
}
}
}
bool is_silence = (volume == 0.0f);
// Allow paused streams to "ring out" at the last valid volume.
// If the stream volume is actually 0, this doesn't matter, since the
// data is 0's anyway.
if (!is_silence) {
last_volume_ = volume;
}
delay_frames_ = post_processing_pipeline_->ProcessFrames(
channels_, chunk_size, last_volume_, is_silence);
return last_volume_;
}
void FilterGroup::ClearInterleaved(int chunk_size) {
ResizeBuffersIfNecessary(chunk_size);
memset(interleaved_.data(), 0,
static_cast<size_t>(chunk_size) * num_channels_ *
BytesPerOutputFormatSample());
int64_t FilterGroup::GetRenderingDelayMicroseconds() {
return delay_frames_ * base::Time::kMicrosecondsPerSecond /
output_samples_per_second_;
}
void FilterGroup::ClearActiveInputs() {
active_inputs_.clear();
}
void FilterGroup::ResizeBuffersIfNecessary(int chunk_size) {
......@@ -89,26 +130,6 @@ void FilterGroup::ResizeBuffersIfNecessary(int chunk_size) {
if (!temp_ || temp_->frames() < chunk_size) {
temp_ = ::media::AudioBus::Create(num_channels_, chunk_size);
}
size_t interleaved_size = static_cast<size_t>(chunk_size) * num_channels_ *
BytesPerOutputFormatSample();
if (interleaved_.size() < interleaved_size) {
interleaved_.resize(interleaved_size);
}
}
int FilterGroup::BytesPerOutputFormatSample() {
return sizeof(int32_t);
}
void FilterGroup::ClearActiveInputs() {
active_inputs_.clear();
}
void FilterGroup::DisablePostProcessingForTest() {
post_processing_pipeline_ =
base::MakeUnique<PostProcessingPipeline>(nullptr, num_channels_);
}
} // namespace media
......
......@@ -26,25 +26,32 @@ namespace media {
class PostProcessingPipeline;
// FilterGroup contains state for an AudioFilter.
// It takes multiple StreamMixerAlsa::InputQueues,
// mixes them, and processes them.
// FilterGroup mixes StreamMixerAlsa::InputQueues and/or FilterGroups,
// mixes their outputs, and applies DSP to them.
// ActiveInputs are added with AddActiveInput(), then cleared when
// FilterGroups are added at construction. These cannot be removed.
// InputQueues are added with AddActiveInput(), then cleared when
// MixAndFilter() is called (they must be added each time data is queried).
class FilterGroup {
public:
// |input_types| is a set of strings that is used as a filter to determine
// if an input belongs to this group (InputQueue->name() must exactly match an
// entry in |input_types| to be processed by this group.
// |filter_type| is passed to AudioFilterFactory to create an AudioFilter.
FilterGroup(const std::unordered_set<std::string>& input_types,
AudioContentType content_type,
int channels,
const base::ListValue* filter_list);
~FilterGroup();
// |name| is used for debug printing
// |filter_list| is a list of {"processor": LIBRARY_NAME, "configs": CONFIG}
// that is used to create PostProcessingPipeline.
// |device_ids| is a set of strings that is used as a filter to determine
// if an InputQueue belongs to this group (InputQueue->name() must exactly
// match an entry in |device_ids| to be processed by this group).
// |mixed_inputs| are FilterGroups that will be mixed into this FilterGroup.
// ex: the final mix ("mix") FilterGroup mixes all other filter groups.
// FilterGroups currently use either InputQueues OR FilterGroups as inputs,
// but there is no technical limitation preventing mixing input classes.
FilterGroup(int num_channels,
const std::string& name,
const base::ListValue* filter_list,
const std::unordered_set<std::string>& device_ids,
const std::vector<FilterGroup*>& mixed_inputs);
AudioContentType content_type() const { return content_type_; }
~FilterGroup();
// Sets the sample rate of the post-processors.
void Initialize(int output_samples_per_second);
......@@ -55,40 +62,47 @@ class FilterGroup {
// Adds |input| to |active_inputs_|.
void AddActiveInput(StreamMixerAlsa::InputQueue* input);
// Retrieves a pointer to the output buffer |interleaved_|.
std::vector<uint8_t>* GetInterleaved();
// Mixes all active inputs and passes them through the audio filter.
bool MixAndFilter(int chunk_size);
// Returns the largest volume of all streams with data.
// return value will be zero IFF there is no data and
// the PostProcessingPipeline is not ringing.
float MixAndFilter(int chunk_size);
// Overwrites |interleaved_| with 0's, ensuring at least
// |chunk_size| bytes.
void ClearInterleaved(int chunk_size);
// Gets the current delay of this filter group's AudioPostProcessors.
// (Not recursive).
int64_t GetRenderingDelayMicroseconds();
// Clear all |active_inputs_|. This should be called before AddActiveInputs
// on each mixing iteration.
void ClearActiveInputs();
// Resets the PostProcessingPipeline, removing all AudioPostProcessors.
void DisablePostProcessingForTest();
// Retrieves a pointer to the output buffer.
::media::AudioBus* data() { return mixed_.get(); }
// Get the last used volume.
float last_volume() const { return last_volume_; }
std::string name() const { return name_; }
private:
void ResizeBuffersIfNecessary(int chunk_size);
int BytesPerOutputFormatSample();
const std::unordered_set<std::string> input_types_;
const AudioContentType content_type_;
const int num_channels_;
const std::string name_;
const std::unordered_set<std::string> device_ids_;
std::vector<FilterGroup*> mixed_inputs_;
std::vector<StreamMixerAlsa::InputQueue*> active_inputs_;
int output_samples_per_second_;
int frames_zeroed_ = 0;
float last_volume_ = 0.0f;
int64_t delay_frames_ = 0;
// Buffers that hold audio data while it is mixed.
// These are kept as members of this class to minimize copies and
// allocations.
std::unique_ptr<::media::AudioBus> temp_;
std::unique_ptr<::media::AudioBus> mixed_;
std::vector<uint8_t> interleaved_;
std::vector<float*> channels_;
std::unique_ptr<PostProcessingPipeline> post_processing_pipeline_;
......
......@@ -5,54 +5,30 @@
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_H_
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
namespace base {
class ListValue;
class ScopedNativeLibrary;
} // namespace base
namespace chromecast {
namespace media {
class AudioPostProcessor;
// Creates and contains multiple AudioPostProcessors, as specified in ctor.
// Provides convenience methods to access and use the AudioPostProcessors.
class PostProcessingPipeline {
public:
PostProcessingPipeline(const base::ListValue* filter_description_list,
int channels);
~PostProcessingPipeline();
int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence);
bool SetSampleRate(int sample_rate);
bool IsRinging();
private:
int GetRingingTimeInFrames();
int sample_rate_;
int ringing_time_in_frames_ = 0;
int silence_frames_processed_ = 0;
int total_delay_frames_ = 0;
// Contains all libraries in use;
// Functions in shared objects cannot be used once library is closed.
std::vector<std::unique_ptr<base::ScopedNativeLibrary>> libraries_;
// Must be after libraries_
std::vector<std::unique_ptr<AudioPostProcessor>> processors_;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipeline);
virtual ~PostProcessingPipeline() = default;
virtual int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) = 0;
virtual bool SetSampleRate(int sample_rate) = 0;
virtual bool IsRinging() = 0;
static std::unique_ptr<PostProcessingPipeline> Create(
const std::string& name,
const base::ListValue* filter_description_list,
int num_channels);
};
} // namespace media
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline_impl.h"
#include <string>
......@@ -12,6 +12,7 @@
#include "base/values.h"
#include "chromecast/base/serializers.h"
#include "chromecast/public/media/audio_post_processor_shlib.h"
#include "chromecast/public/volume_control.h"
namespace chromecast {
namespace media {
......@@ -20,15 +21,25 @@ namespace {
const int kNoSampleRate = -1;
const char kSoCreateFunction[] = "AudioPostProcessorShlib_Create";
const char kProcessorKey[] = "processor";
} // namespace
using CreatePostProcessor = AudioPostProcessor* (*)(const std::string&, int);
PostProcessingPipeline::PostProcessingPipeline(
std::unique_ptr<PostProcessingPipeline> PostProcessingPipeline::Create(
const std::string& name,
const base::ListValue* filter_description_list,
int num_channels) {
return base::MakeUnique<PostProcessingPipelineImpl>(
name, filter_description_list, num_channels);
}
PostProcessingPipelineImpl::PostProcessingPipelineImpl(
const std::string& name,
const base::ListValue* filter_description_list,
int channels)
: sample_rate_(kNoSampleRate) {
: name_(name), sample_rate_(kNoSampleRate) {
if (!filter_description_list) {
return; // Warning logged.
}
......@@ -37,7 +48,7 @@ PostProcessingPipeline::PostProcessingPipeline(
CHECK(
filter_description_list->GetDictionary(i, &processor_description_dict));
std::string library_path;
CHECK(processor_description_dict->GetString("processor", &library_path));
CHECK(processor_description_dict->GetString(kProcessorKey, &library_path));
if (library_path == "null" || library_path == "none") {
continue;
}
......@@ -62,12 +73,12 @@ PostProcessingPipeline::PostProcessingPipeline(
}
}
PostProcessingPipeline::~PostProcessingPipeline() = default;
PostProcessingPipelineImpl::~PostProcessingPipelineImpl() = default;
int PostProcessingPipeline::ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) {
int PostProcessingPipelineImpl::ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_multiplier,
bool is_silence) {
DCHECK_NE(sample_rate_, kNoSampleRate);
if (is_silence) {
if (!IsRinging()) {
......@@ -78,15 +89,17 @@ int PostProcessingPipeline::ProcessFrames(const std::vector<float*>& data,
silence_frames_processed_ = 0;
}
UpdateCastVolume(current_multiplier);
total_delay_frames_ = 0;
for (auto& processor : processors_) {
total_delay_frames_ +=
processor->ProcessFrames(data, num_frames, current_volume);
processor->ProcessFrames(data, num_frames, cast_volume_);
}
return total_delay_frames_;
}
bool PostProcessingPipeline::SetSampleRate(int sample_rate) {
bool PostProcessingPipelineImpl::SetSampleRate(int sample_rate) {
sample_rate_ = sample_rate;
bool result = true;
for (auto& processor : processors_) {
......@@ -97,11 +110,11 @@ bool PostProcessingPipeline::SetSampleRate(int sample_rate) {
return result;
}
bool PostProcessingPipeline::IsRinging() {
bool PostProcessingPipelineImpl::IsRinging() {
return silence_frames_processed_ < ringing_time_in_frames_;
}
int PostProcessingPipeline::GetRingingTimeInFrames() {
int PostProcessingPipelineImpl::GetRingingTimeInFrames() {
int memory_frames = 0;
for (auto& processor : processors_) {
memory_frames += processor->GetRingingTimeInFrames();
......@@ -109,5 +122,17 @@ int PostProcessingPipeline::GetRingingTimeInFrames() {
return memory_frames;
}
void PostProcessingPipelineImpl::UpdateCastVolume(float multiplier) {
DCHECK_GE(multiplier, 0.0);
if (multiplier == current_multiplier_) {
return;
}
current_multiplier_ = multiplier;
float dbfs = std::log10(multiplier) * 20;
DCHECK(chromecast::media::VolumeControl::DbFSToVolume);
cast_volume_ = chromecast::media::VolumeControl::DbFSToVolume(dbfs);
}
} // namespace media
} // namespace chromecast
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "chromecast/media/cma/backend/alsa/post_processing_pipeline.h"
namespace base {
class ListValue;
class ScopedNativeLibrary;
} // namespace base
namespace chromecast {
namespace media {
class AudioPostProcessor;
// Creates and contains multiple AudioPostProcessors, as specified in ctor.
// Provides convenience methods to access and use the AudioPostProcessors.
class PostProcessingPipelineImpl : public PostProcessingPipeline {
public:
PostProcessingPipelineImpl(const std::string& name,
const base::ListValue* filter_description_list,
int channels);
~PostProcessingPipelineImpl() override;
int ProcessFrames(const std::vector<float*>& data,
int num_frames,
float current_volume,
bool is_silence) override;
bool SetSampleRate(int sample_rate) override;
bool IsRinging() override;
private:
int GetRingingTimeInFrames();
void UpdateCastVolume(float multiplier);
std::string name_;
int sample_rate_;
int ringing_time_in_frames_ = 0;
int silence_frames_processed_ = 0;
int total_delay_frames_ = 0;
float current_multiplier_;
float cast_volume_;
// Contains all libraries in use;
// Functions in shared objects cannot be used once library is closed.
std::vector<std::unique_ptr<base::ScopedNativeLibrary>> libraries_;
// Must be after libraries_
std::vector<std::unique_ptr<AudioPostProcessor>> processors_;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipelineImpl);
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_ALSA_POST_PROCESSING_PIPELINE_IMPL_H_
......@@ -20,54 +20,103 @@ namespace {
const base::FilePath::CharType kPostProcessingPipelineFilePath[] =
FILE_PATH_LITERAL("/etc/cast_audio.json");
const char kMediaPipelineKey[] = "media";
const char kTtsPipelineKey[] = "tts";
// TODO(bshaya): Use AudioContentType instead.
std::string GetPipelineKey(const std::string& device_id) {
if (device_id == kTtsAudioDeviceId) {
return kTtsPipelineKey;
}
if (device_id == ::media::AudioDeviceDescription::kDefaultDeviceId) {
return kMediaPipelineKey;
}
NOTREACHED() << "Invalid device_id: " << device_id;
return "";
}
const char kOutputStreamsKey[] = "output_streams";
const char kMixPipelineKey[] = "mix";
const char kLinearizePipelineKey[] = "linearize";
const char kProcessorsKey[] = "processors";
const char kStreamsKey[] = "streams";
} // namespace
PostProcessingPipelineParser::PostProcessingPipelineParser() = default;
PostProcessingPipelineParser::~PostProcessingPipelineParser() = default;
StreamPipelineDescriptor::StreamPipelineDescriptor(
base::ListValue* pipeline_in,
const std::unordered_set<std::string>& stream_types_in)
: pipeline(pipeline_in), stream_types(stream_types_in) {}
StreamPipelineDescriptor::~StreamPipelineDescriptor() = default;
StreamPipelineDescriptor::StreamPipelineDescriptor(
const StreamPipelineDescriptor& other)
: StreamPipelineDescriptor(other.pipeline, other.stream_types) {}
void PostProcessingPipelineParser::Initialize() {
if (!base::PathExists(base::FilePath(kPostProcessingPipelineFilePath))) {
LOG(WARNING) << "No post-processing config found in "
PostProcessingPipelineParser::PostProcessingPipelineParser(
const std::string& json) {
if (json.empty() &&
!base::PathExists(base::FilePath(kPostProcessingPipelineFilePath))) {
LOG(WARNING) << "Could not open post-processing config in "
<< kPostProcessingPipelineFilePath << ".";
return;
}
config_dict_ = base::DictionaryValue::From(
DeserializeJsonFromFile(base::FilePath(kPostProcessingPipelineFilePath)));
CHECK(config_dict_->GetDictionary("post_processing", &pipeline_dict_))
<< "No \"post_processing\" object found in "
<< kPostProcessingPipelineFilePath;
if (json.empty()) {
config_dict_ = base::DictionaryValue::From(DeserializeJsonFromFile(
base::FilePath(kPostProcessingPipelineFilePath)));
} else {
config_dict_ = base::DictionaryValue::From(DeserializeFromJson(json));
}
CHECK(config_dict_) << "Invalid JSON in " << kPostProcessingPipelineFilePath;
}
base::ListValue* PostProcessingPipelineParser::GetPipelineByDeviceId(
const std::string& device_id) {
if (!pipeline_dict_) {
return nullptr;
PostProcessingPipelineParser::~PostProcessingPipelineParser() = default;
std::vector<StreamPipelineDescriptor>
PostProcessingPipelineParser::GetStreamPipelines() {
base::ListValue* pipelines_list;
std::vector<StreamPipelineDescriptor> descriptors;
if (!config_dict_ ||
!config_dict_->GetList(kOutputStreamsKey, &pipelines_list)) {
LOG(WARNING) << "No post-processors found for streams (key = "
<< kOutputStreamsKey
<< ").\n No stream-specific processing will occur.";
return descriptors;
}
for (size_t i = 0; i < pipelines_list->GetSize(); ++i) {
base::DictionaryValue* pipeline_description_dict;
CHECK(pipelines_list->GetDictionary(i, &pipeline_description_dict));
base::ListValue* out_list;
std::string key = GetPipelineKey(device_id);
if (!pipeline_dict_->GetList(key, &out_list)) {
base::ListValue* processors_list;
CHECK(pipeline_description_dict->GetList(kProcessorsKey, &processors_list));
base::ListValue* streams_list;
CHECK(pipeline_description_dict->GetList(kStreamsKey, &streams_list));
std::unordered_set<std::string> streams_set;
for (size_t stream = 0; stream < streams_list->GetSize(); ++stream) {
std::string stream_name;
CHECK(streams_list->GetString(stream, &stream_name));
CHECK(streams_set.insert(stream_name).second)
<< "Duplicate stream type: " << stream_name;
}
descriptors.emplace_back(processors_list, std::move(streams_set));
}
return descriptors;
}
std::string PostProcessingPipelineParser::GetFilePath() {
return kPostProcessingPipelineFilePath;
}
base::ListValue* PostProcessingPipelineParser::GetMixPipeline() {
return GetPipelineByKey(kMixPipelineKey);
}
base::ListValue* PostProcessingPipelineParser::GetLinearizePipeline() {
return GetPipelineByKey(kLinearizePipelineKey);
}
base::ListValue* PostProcessingPipelineParser::GetPipelineByKey(
const std::string& key) {
base::DictionaryValue* stream_dict;
if (!config_dict_ || !config_dict_->GetDictionary(key, &stream_dict)) {
LOG(WARNING) << "No post-processor description found for \"" << key
<< "\" in " << kPostProcessingPipelineFilePath
<< ". Using passthrough.";
return nullptr;
}
base::ListValue* out_list;
CHECK(stream_dict->GetList(kProcessorsKey, &out_list));
return out_list;
}
......
......@@ -7,6 +7,8 @@
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "base/macros.h"
......@@ -18,30 +20,47 @@ class ListValue;
namespace chromecast {
namespace media {
// Helper class to hold information about a stream pipeline.
struct StreamPipelineDescriptor {
// The format for pipeline is:
// [ {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// ... ]
base::ListValue* pipeline;
std::unordered_set<std::string> stream_types;
StreamPipelineDescriptor(
base::ListValue* pipeline_in,
const std::unordered_set<std::string>& stream_types_in);
~StreamPipelineDescriptor();
StreamPipelineDescriptor(const StreamPipelineDescriptor& other);
StreamPipelineDescriptor operator=(const StreamPipelineDescriptor& other) =
delete;
};
// Helper class to parse post-processing pipeline descriptor file.
class PostProcessingPipelineParser {
public:
PostProcessingPipelineParser();
// |json|, if provided, is used instead of reading from file.
// |json| should be provided in tests only.
explicit PostProcessingPipelineParser(const std::string& json = "");
~PostProcessingPipelineParser();
// Reads the pipeline descriptor file and does preliminary parsing.
// Crashes with fatal log if parsing fails.
void Initialize();
std::vector<StreamPipelineDescriptor> GetStreamPipelines();
// Gets the list of processors for a given stream type.
// The format will be:
// [
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// {"processor": "PATH_TO_SHARED_OBJECT",
// "config": "CONFIGURATION_STRING"},
// ...
// ]
base::ListValue* GetPipelineByDeviceId(const std::string& device_id);
// Gets the list of processors for the mix/linearize stages.
// Same format as StreamPipelineDescriptor.pipeline
base::ListValue* GetMixPipeline();
base::ListValue* GetLinearizePipeline();
static std::string GetFilePath();
private:
base::ListValue* GetPipelineByKey(const std::string& key);
std::unique_ptr<base::DictionaryValue> config_dict_;
base::DictionaryValue* pipeline_dict_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(PostProcessingPipelineParser);
};
......
......@@ -30,6 +30,7 @@ namespace chromecast {
namespace media {
class AlsaWrapper;
class FilterGroup;
class PostProcessingPipelineParser;
// Mixer implementation. The mixer has one or more input queues; these can be
// added/removed at any time. When an input source pushes frames to an input
......@@ -175,8 +176,8 @@ class StreamMixerAlsa {
// mixer thread.
void OnFramesQueued();
void ResetPostProcessorsForTest(const std::string& pipeline_json);
void SetAlsaWrapperForTest(std::unique_ptr<AlsaWrapper> alsa_wrapper);
void DisablePostProcessingForTest();
void WriteFramesForTest(); // Can be called on any thread.
void ClearInputsForTest(); // Removes all inputs.
......@@ -213,6 +214,7 @@ class StreamMixerAlsa {
void FinalizeOnMixerThread();
void FinishFinalize();
void CreatePostProcessors(PostProcessingPipelineParser* pipeline_parser);
// Reads the buffer size, period size, start threshold, and avail min value
// from the provided command line flags or uses default values if no flags are
// provided.
......@@ -242,7 +244,7 @@ class StreamMixerAlsa {
void WriteFrames();
bool TryWriteFrames();
void WriteMixedPcm(std::vector<uint8_t>* interleaved, int frames);
void WriteMixedPcm(int frames);
void UpdateRenderingDelay(int newly_pushed_frames);
size_t InterleavedSize(int frames);
ssize_t BytesPerOutputFormatSample();
......@@ -275,7 +277,7 @@ class StreamMixerAlsa {
std::vector<std::unique_ptr<InputQueue>> inputs_;
std::vector<std::unique_ptr<InputQueue>> ignored_inputs_;
MediaPipelineBackendAlsa::RenderingDelay rendering_delay_;
MediaPipelineBackendAlsa::RenderingDelay alsa_rendering_delay_;
std::unique_ptr<base::Timer> retry_write_frames_timer_;
......@@ -283,6 +285,11 @@ class StreamMixerAlsa {
std::unique_ptr<base::Timer> check_close_timer_;
std::vector<std::unique_ptr<FilterGroup>> filter_groups_;
FilterGroup* default_filter_;
FilterGroup* mix_filter_;
FilterGroup* linearize_filter_;
std::vector<uint8_t> interleaved_;
std::vector<CastMediaShlib::LoopbackAudioObserver*> loopback_observers_;
std::map<AudioContentType, VolumeInfo> volume_info_;
......
......@@ -45,10 +45,8 @@ class AudioPostProcessor {
// always non-zero and less than or equal to 20ms of audio.
// AudioPostProcessor must always provide |frames| frames of data back
// (may output 0’s)
// |volume| is the attenuation level (multiplier) of the stream.
// |volume| is between 0 and 1 inclusive.
// |volume| is the Cast Volume applied to the stream (normalized to 0-1)
// AudioPostProcessor should assume that it has already been applied.
// TODO(bshaya): Change |volume| to Cast System Volume.
// Returns the current rendering delay of the filter in frames,
// or negative if an error occurred during processing.
// If an error occurred during processing, |data| should be unchanged.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment