Commit 2c9ce61a authored by ziyangch's avatar ziyangch Committed by Commit Bot

[Chromecast] Allow arbitrary channel count for Android audio streams

Bug: internal b/140189221

Test: Cast from Youtube to Android TV.
      TTS on Android Things.

Change-Id: I1cc4e355a0644ff35193a62c47f7fa748de17fb2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1900151
Commit-Queue: Ziyang Cheng <ziyangch@chromium.org>
Reviewed-by: default avatarYuchen Liu <yucliu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712797}
parent c2d2c1b4
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "chromecast/media/cma/backend/android/media_pipeline_backend_android.h" #include "chromecast/media/cma/backend/android/media_pipeline_backend_android.h"
#include "chromecast/media/cma/base/decoder_buffer_adapter.h" #include "chromecast/media/cma/base/decoder_buffer_adapter.h"
#include "chromecast/media/cma/base/decoder_buffer_base.h" #include "chromecast/media/cma/base/decoder_buffer_base.h"
#include "chromecast/media/cma/base/decoder_config_adapter.h"
#include "chromecast/public/media/cast_decoder_buffer.h" #include "chromecast/public/media/cast_decoder_buffer.h"
#include "media/base/audio_bus.h" #include "media/base/audio_bus.h"
#include "media/base/channel_layout.h" #include "media/base/channel_layout.h"
...@@ -38,7 +39,6 @@ namespace media { ...@@ -38,7 +39,6 @@ namespace media {
namespace { namespace {
const int kNumChannels = 2;
const int kDefaultFramesPerBuffer = 1024; const int kDefaultFramesPerBuffer = 1024;
const int kSilenceBufferFrames = 2048; const int kSilenceBufferFrames = 2048;
const int kMaxOutputMs = 20; const int kMaxOutputMs = 20;
...@@ -73,8 +73,6 @@ AudioDecoderAndroid::AudioDecoderAndroid(MediaPipelineBackendAndroid* backend) ...@@ -73,8 +73,6 @@ AudioDecoderAndroid::AudioDecoderAndroid(MediaPipelineBackendAndroid* backend)
got_eos_(false), got_eos_(false),
pushed_eos_(false), pushed_eos_(false),
sink_error_(false), sink_error_(false),
rate_shifter_output_(
::media::AudioBus::Create(kNumChannels, kDefaultFramesPerBuffer)),
current_pts_(kInvalidTimestamp), current_pts_(kInvalidTimestamp),
sink_(AudioSinkManager::GetDefaultSinkType()), sink_(AudioSinkManager::GetDefaultSinkType()),
pending_output_frames_(kNoPendingOutput), pending_output_frames_(kNoPendingOutput),
...@@ -121,8 +119,9 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) { ...@@ -121,8 +119,9 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) {
TRACE_FUNCTION_ENTRY0(); TRACE_FUNCTION_ENTRY0();
current_pts_ = start_pts; current_pts_ = start_pts;
DCHECK(IsValidConfig(config_)); DCHECK(IsValidConfig(config_));
sink_.Reset(this, config_.samples_per_second, backend_->Primary(), sink_.Reset(this, config_.channel_number, config_.samples_per_second,
backend_->DeviceId(), backend_->ContentType()); backend_->Primary(), backend_->DeviceId(),
backend_->ContentType());
sink_->SetStreamVolumeMultiplier(volume_multiplier_); sink_->SetStreamVolumeMultiplier(volume_multiplier_);
// Create decoder_ if necessary. This can happen if Stop() was called, and // Create decoder_ if necessary. This can happen if Stop() was called, and
// SetConfig() was not called since then. // SetConfig() was not called since then.
...@@ -130,7 +129,7 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) { ...@@ -130,7 +129,7 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) {
CreateDecoder(); CreateDecoder();
} }
if (!rate_shifter_) { if (!rate_shifter_) {
CreateRateShifter(config_.samples_per_second); CreateRateShifter(config_);
} }
return true; return true;
} }
...@@ -259,22 +258,23 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) { ...@@ -259,22 +258,23 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) {
return false; return false;
} }
bool changed_sample_rate = bool changed_config =
(config.samples_per_second != config_.samples_per_second); (config.samples_per_second != config_.samples_per_second ||
config.channel_number != config_.channel_number);
if (!rate_shifter_ || changed_sample_rate) { if (!rate_shifter_ || changed_config) {
CreateRateShifter(config.samples_per_second); CreateRateShifter(config);
} }
if (sink_ && changed_sample_rate) { if (sink_ && changed_config) {
ResetSinkForNewSampleRate(config.samples_per_second); ResetSinkForNewConfig(config);
} }
config_ = config; config_ = config;
decoder_.reset(); decoder_.reset();
CreateDecoder(); CreateDecoder();
if (pending_buffer_complete_ && changed_sample_rate) { if (pending_buffer_complete_ && changed_config) {
pending_buffer_complete_ = false; pending_buffer_complete_ = false;
delegate_->OnPushBufferComplete( delegate_->OnPushBufferComplete(
MediaPipelineBackendAndroid::kBufferSuccess); MediaPipelineBackendAndroid::kBufferSuccess);
...@@ -282,8 +282,9 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) { ...@@ -282,8 +282,9 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) {
return true; return true;
} }
void AudioDecoderAndroid::ResetSinkForNewSampleRate(int sample_rate) { void AudioDecoderAndroid::ResetSinkForNewConfig(const AudioConfig& config) {
sink_.Reset(this, sample_rate, backend_->Primary(), backend_->DeviceId(), sink_.Reset(this, config.channel_number, config.samples_per_second,
backend_->Primary(), backend_->DeviceId(),
backend_->ContentType()); backend_->ContentType());
sink_->SetStreamVolumeMultiplier(volume_multiplier_); sink_->SetStreamVolumeMultiplier(volume_multiplier_);
pending_output_frames_ = kNoPendingOutput; pending_output_frames_ = kNoPendingOutput;
...@@ -309,18 +310,21 @@ void AudioDecoderAndroid::CreateDecoder() { ...@@ -309,18 +310,21 @@ void AudioDecoderAndroid::CreateDecoder() {
base::Unretained(this))); base::Unretained(this)));
} }
void AudioDecoderAndroid::CreateRateShifter(int samples_per_second) { void AudioDecoderAndroid::CreateRateShifter(const AudioConfig& config) {
LOG(INFO) << __func__ << ": samples_per_second=" << samples_per_second; LOG(INFO) << __func__ << ": channel_number=" << config.channel_number
<< " samples_per_second=" << config.samples_per_second;
rate_shifter_info_.clear(); rate_shifter_info_.clear();
rate_shifter_info_.push_back(RateShifterInfo(1.0f)); rate_shifter_info_.push_back(RateShifterInfo(1.0f));
rate_shifter_output_.reset();
rate_shifter_.reset(new ::media::AudioRendererAlgorithm()); rate_shifter_.reset(new ::media::AudioRendererAlgorithm());
bool is_encrypted = false; bool is_encrypted = false;
rate_shifter_->Initialize( rate_shifter_->Initialize(
::media::AudioParameters(::media::AudioParameters::AUDIO_PCM_LINEAR, ::media::AudioParameters(
::media::CHANNEL_LAYOUT_STEREO, ::media::AudioParameters::AUDIO_PCM_LINEAR,
samples_per_second, kDefaultFramesPerBuffer), DecoderConfigAdapter::ToMediaChannelLayout(config.channel_layout),
config.samples_per_second, kDefaultFramesPerBuffer),
is_encrypted); is_encrypted);
} }
...@@ -402,14 +406,26 @@ void AudioDecoderAndroid::OnBufferDecoded( ...@@ -402,14 +406,26 @@ void AudioDecoderAndroid::OnBufferDecoded(
delta.decoded_bytes = input_bytes; delta.decoded_bytes = input_bytes;
UpdateStatistics(delta); UpdateStatistics(delta);
bool changed_config = false;
if (config.samples_per_second != config_.samples_per_second) { if (config.samples_per_second != config_.samples_per_second) {
// Sample rate from actual stream doesn't match supposed sample rate from LOG(INFO) << "Input sample rate changed from " << config_.samples_per_second
// the container. Update the sink and rate shifter. Note that for now we << " to " << config.samples_per_second;
config_.samples_per_second = config.samples_per_second;
changed_config = true;
}
if (config.channel_number != config_.channel_number) {
LOG(INFO) << "Input channel count changed from " << config_.channel_number
<< " to " << config.channel_number;
config_.channel_number = config.channel_number;
changed_config = true;
}
if (changed_config) {
// Config from actual stream doesn't match supposed config from the
// container. Update the sink and rate shifter. Note that for now we
// assume that this can only happen at start of stream (ie, on the first // assume that this can only happen at start of stream (ie, on the first
// decoded buffer). // decoded buffer).
config_.samples_per_second = config.samples_per_second; CreateRateShifter(config_);
CreateRateShifter(config.samples_per_second); ResetSinkForNewConfig(config_);
ResetSinkForNewSampleRate(config.samples_per_second);
} }
pending_buffer_complete_ = true; pending_buffer_complete_ = true;
...@@ -417,7 +433,8 @@ void AudioDecoderAndroid::OnBufferDecoded( ...@@ -417,7 +433,8 @@ void AudioDecoderAndroid::OnBufferDecoded(
got_eos_ = true; got_eos_ = true;
LOG(INFO) << __func__ << ": decoded buffer marked EOS"; LOG(INFO) << __func__ << ": decoded buffer marked EOS";
} else { } else {
int input_frames = decoded->data_size() / (kNumChannels * sizeof(float)); int input_frames =
decoded->data_size() / (config_.channel_number * sizeof(float));
DCHECK(!rate_shifter_info_.empty()); DCHECK(!rate_shifter_info_.empty());
...@@ -429,9 +446,10 @@ void AudioDecoderAndroid::OnBufferDecoded( ...@@ -429,9 +446,10 @@ void AudioDecoderAndroid::OnBufferDecoded(
backend_->AudioChannel() == AudioChannel::kRight); backend_->AudioChannel() == AudioChannel::kRight);
const int playout_channel = const int playout_channel =
backend_->AudioChannel() == AudioChannel::kLeft ? 0 : 1; backend_->AudioChannel() == AudioChannel::kLeft ? 0 : 1;
for (int c = 0; c < kNumChannels; ++c) { for (int c = 0; c < config_.channel_number; ++c) {
if (c != playout_channel) { if (c != playout_channel) {
const size_t channel_size = decoded->data_size() / kNumChannels; const size_t channel_size =
decoded->data_size() / config_.channel_number;
std::memcpy(decoded->writable_data() + c * channel_size, std::memcpy(decoded->writable_data() + c * channel_size,
decoded->writable_data() + playout_channel * channel_size, decoded->writable_data() + playout_channel * channel_size,
channel_size); channel_size);
...@@ -457,12 +475,19 @@ void AudioDecoderAndroid::OnBufferDecoded( ...@@ -457,12 +475,19 @@ void AudioDecoderAndroid::OnBufferDecoded(
// Otherwise, queue data into the rate shifter, and then try to push the // Otherwise, queue data into the rate shifter, and then try to push the
// rate-shifted data. // rate-shifted data.
const uint8_t* channels[kNumChannels] = { scoped_refptr<::media::AudioBuffer> buffer =
decoded->data(), decoded->data() + input_frames * sizeof(float)}; ::media::AudioBuffer::CreateBuffer(
scoped_refptr<::media::AudioBuffer> buffer = ::media::AudioBuffer::CopyFrom( ::media::kSampleFormatPlanarF32,
::media::kSampleFormatPlanarF32, ::media::CHANNEL_LAYOUT_STEREO, DecoderConfigAdapter::ToMediaChannelLayout(config_.channel_layout),
kNumChannels, config_.samples_per_second, input_frames, channels, config_.channel_number, config_.samples_per_second, input_frames,
base::TimeDelta(), pool_); pool_);
buffer->set_timestamp(base::TimeDelta());
const int channel_data_size = input_frames * sizeof(float);
for (int c = 0; c < config_.channel_number; ++c) {
memcpy(buffer->channel_data()[c], decoded->data() + c * channel_data_size,
channel_data_size);
}
rate_shifter_->EnqueueBuffer(buffer); rate_shifter_->EnqueueBuffer(buffer);
rate_shifter_info_.back().input_frames += input_frames; rate_shifter_info_.back().input_frames += input_frames;
} }
...@@ -513,8 +538,9 @@ void AudioDecoderAndroid::PushRateShifted() { ...@@ -513,8 +538,9 @@ void AudioDecoderAndroid::PushRateShifted() {
// Push some silence into the rate shifter so we can get out any remaining // Push some silence into the rate shifter so we can get out any remaining
// rate-shifted data. // rate-shifted data.
rate_shifter_->EnqueueBuffer(::media::AudioBuffer::CreateEmptyBuffer( rate_shifter_->EnqueueBuffer(::media::AudioBuffer::CreateEmptyBuffer(
::media::CHANNEL_LAYOUT_STEREO, kNumChannels, DecoderConfigAdapter::ToMediaChannelLayout(config_.channel_layout),
config_.samples_per_second, kSilenceBufferFrames, base::TimeDelta())); config_.channel_number, config_.samples_per_second,
kSilenceBufferFrames, base::TimeDelta()));
} }
DCHECK(!rate_shifter_info_.empty()); DCHECK(!rate_shifter_info_.empty());
...@@ -541,9 +567,10 @@ void AudioDecoderAndroid::PushRateShifted() { ...@@ -541,9 +567,10 @@ void AudioDecoderAndroid::PushRateShifted() {
desired_output_frames, desired_output_frames,
config_.samples_per_second * kMaxOutputMs / kMillisecondsPerSecond); config_.samples_per_second * kMaxOutputMs / kMillisecondsPerSecond);
if (desired_output_frames > rate_shifter_output_->frames()) { if (!rate_shifter_output_ ||
rate_shifter_output_ = desired_output_frames > rate_shifter_output_->frames()) {
::media::AudioBus::Create(kNumChannels, desired_output_frames); rate_shifter_output_ = ::media::AudioBus::Create(config_.channel_number,
desired_output_frames);
} }
int out_frames = rate_shifter_->FillBuffer( int out_frames = rate_shifter_->FillBuffer(
...@@ -557,8 +584,8 @@ void AudioDecoderAndroid::PushRateShifted() { ...@@ -557,8 +584,8 @@ void AudioDecoderAndroid::PushRateShifted() {
int channel_data_size = out_frames * sizeof(float); int channel_data_size = out_frames * sizeof(float);
scoped_refptr<DecoderBufferBase> output_buffer(new DecoderBufferAdapter( scoped_refptr<DecoderBufferBase> output_buffer(new DecoderBufferAdapter(
new ::media::DecoderBuffer(channel_data_size * kNumChannels))); new ::media::DecoderBuffer(channel_data_size * config_.channel_number)));
for (int c = 0; c < kNumChannels; ++c) { for (int c = 0; c < config_.channel_number; ++c) {
memcpy(output_buffer->writable_data() + c * channel_data_size, memcpy(output_buffer->writable_data() + c * channel_data_size,
rate_shifter_output_->channel(c), channel_data_size); rate_shifter_output_->channel(c), channel_data_size);
} }
...@@ -586,7 +613,7 @@ void AudioDecoderAndroid::PushRateShifted() { ...@@ -586,7 +613,7 @@ void AudioDecoderAndroid::PushRateShifted() {
if (extra_frames > 0) { if (extra_frames > 0) {
// Clear out extra buffered data. // Clear out extra buffered data.
std::unique_ptr<::media::AudioBus> dropped = std::unique_ptr<::media::AudioBus> dropped =
::media::AudioBus::Create(kNumChannels, extra_frames); ::media::AudioBus::Create(config_.channel_number, extra_frames);
int cleared_frames = int cleared_frames =
rate_shifter_->FillBuffer(dropped.get(), 0, extra_frames, 1.0f); rate_shifter_->FillBuffer(dropped.get(), 0, extra_frames, 1.0f);
DCHECK_EQ(extra_frames, cleared_frames); DCHECK_EQ(extra_frames, cleared_frames);
......
...@@ -75,9 +75,9 @@ class AudioDecoderAndroid : public MediaPipelineBackend::AudioDecoder, ...@@ -75,9 +75,9 @@ class AudioDecoderAndroid : public MediaPipelineBackend::AudioDecoder,
void OnSinkError(SinkError error) override; void OnSinkError(SinkError error) override;
void CleanUpPcm(); void CleanUpPcm();
void ResetSinkForNewSampleRate(int sample_rate); void ResetSinkForNewConfig(const AudioConfig& config);
void CreateDecoder(); void CreateDecoder();
void CreateRateShifter(int samples_per_second); void CreateRateShifter(const AudioConfig& config);
void OnDecoderInitialized(bool success); void OnDecoderInitialized(bool success);
void OnBufferDecoded(uint64_t input_bytes, void OnBufferDecoded(uint64_t input_bytes,
CastAudioDecoder::Status status, CastAudioDecoder::Status status,
......
...@@ -53,7 +53,7 @@ int64_t AudioSinkAndroid::GetMinimumBufferedTime(SinkType sink_type, ...@@ -53,7 +53,7 @@ int64_t AudioSinkAndroid::GetMinimumBufferedTime(SinkType sink_type,
break; break;
case AudioSinkAndroid::kSinkTypeJavaBased: case AudioSinkAndroid::kSinkTypeJavaBased:
return AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime( return AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
config.samples_per_second); config.channel_number, config.samples_per_second);
} }
return kDefaultMinBufferTimeUs; return kDefaultMinBufferTimeUs;
} }
...@@ -70,6 +70,7 @@ void ManagedAudioSink::Reset() { ...@@ -70,6 +70,7 @@ void ManagedAudioSink::Reset() {
} }
void ManagedAudioSink::Reset(Delegate* delegate, void ManagedAudioSink::Reset(Delegate* delegate,
int num_channels,
int samples_per_second, int samples_per_second,
bool primary, bool primary,
const std::string& device_id, const std::string& device_id,
...@@ -83,8 +84,9 @@ void ManagedAudioSink::Reset(Delegate* delegate, ...@@ -83,8 +84,9 @@ void ManagedAudioSink::Reset(Delegate* delegate,
NOTREACHED() << "Native-based audio sink is not implemented yet!"; NOTREACHED() << "Native-based audio sink is not implemented yet!";
break; break;
case AudioSinkAndroid::kSinkTypeJavaBased: case AudioSinkAndroid::kSinkTypeJavaBased:
sink_ = new AudioSinkAndroidAudioTrackImpl( sink_ = new AudioSinkAndroidAudioTrackImpl(delegate, num_channels,
delegate, samples_per_second, primary, device_id, content_type); samples_per_second, primary,
device_id, content_type);
} }
AudioSinkManager::Get()->Add(sink_); AudioSinkManager::Get()->Add(sink_);
} }
......
...@@ -122,6 +122,7 @@ class ManagedAudioSink { ...@@ -122,6 +122,7 @@ class ManagedAudioSink {
// the manager. If a valid instance existed on entry it is removed from the // the manager. If a valid instance existed on entry it is removed from the
// manager and deleted before creating the new one. // manager and deleted before creating the new one.
void Reset(Delegate* delegate, void Reset(Delegate* delegate,
int num_channels,
int samples_per_second, int samples_per_second,
bool primary, bool primary,
const std::string& device_id, const std::string& device_id,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <algorithm> #include <algorithm>
#include <string> #include <string>
#include <vector>
#include "base/bind.h" #include "base/bind.h"
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
...@@ -67,18 +68,21 @@ bool AudioSinkAndroidAudioTrackImpl::GetSessionIds(int* media_id, ...@@ -67,18 +68,21 @@ bool AudioSinkAndroidAudioTrackImpl::GetSessionIds(int* media_id,
// static // static
int64_t AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime( int64_t AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
int num_channels,
int samples_per_second) { int samples_per_second) {
return Java_AudioSinkAudioTrackImpl_getMinimumBufferedTime( return Java_AudioSinkAudioTrackImpl_getMinimumBufferedTime(
base::android::AttachCurrentThread(), samples_per_second); base::android::AttachCurrentThread(), num_channels, samples_per_second);
} }
AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl( AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
AudioSinkAndroid::Delegate* delegate, AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second, int input_samples_per_second,
bool primary, bool primary,
const std::string& device_id, const std::string& device_id,
AudioContentType content_type) AudioContentType content_type)
: delegate_(delegate), : delegate_(delegate),
num_channels_(num_channels),
input_samples_per_second_(input_samples_per_second), input_samples_per_second_(input_samples_per_second),
primary_(primary), primary_(primary),
device_id_(device_id), device_id_(device_id),
...@@ -93,10 +97,12 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl( ...@@ -93,10 +97,12 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
state_(kStateUninitialized), state_(kStateUninitialized),
weak_factory_(this) { weak_factory_(this) {
LOG(INFO) << __func__ << "(" << this << "):" LOG(INFO) << __func__ << "(" << this << "):"
<< " num_channels_=" << num_channels_
<< " input_samples_per_second_=" << input_samples_per_second_ << " input_samples_per_second_=" << input_samples_per_second_
<< " primary_=" << primary_ << " device_id_=" << device_id_ << " primary_=" << primary_ << " device_id_=" << device_id_
<< " content_type__=" << GetContentTypeName(); << " content_type__=" << GetContentTypeName();
DCHECK(delegate_); DCHECK(delegate_);
DCHECK_GT(num_channels_, 0);
// Create Java part and initialize. // Create Java part and initialize.
DCHECK(j_audio_sink_audiotrack_impl_.is_null()); DCHECK(j_audio_sink_audiotrack_impl_.is_null());
...@@ -106,7 +112,7 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl( ...@@ -106,7 +112,7 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
reinterpret_cast<intptr_t>(this))); reinterpret_cast<intptr_t>(this)));
Java_AudioSinkAudioTrackImpl_init( Java_AudioSinkAudioTrackImpl_init(
base::android::AttachCurrentThread(), j_audio_sink_audiotrack_impl_, base::android::AttachCurrentThread(), j_audio_sink_audiotrack_impl_,
static_cast<int>(content_type_), input_samples_per_second_, static_cast<int>(content_type_), num_channels_, input_samples_per_second_,
kDirectBufferSize); kDirectBufferSize);
// Should be set now. // Should be set now.
DCHECK(direct_pcm_buffer_address_); DCHECK(direct_pcm_buffer_address_);
...@@ -266,20 +272,24 @@ void AudioSinkAndroidAudioTrackImpl::OnPlayoutDone() { ...@@ -266,20 +272,24 @@ void AudioSinkAndroidAudioTrackImpl::OnPlayoutDone() {
} }
void AudioSinkAndroidAudioTrackImpl::ReformatData() { void AudioSinkAndroidAudioTrackImpl::ReformatData() {
// Data is in planar float format, i.e. all left samples first, then all // Data is in planar float format, i.e., planar audio data for stereo is all
// right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR"). // left samples first, then all right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR").
// AudioTrack needs interleaved format -> "LRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLR"). // AudioTrack needs interleaved format -> "LRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLR").
DCHECK(direct_pcm_buffer_address_); DCHECK(direct_pcm_buffer_address_);
DCHECK_EQ(0, static_cast<int>(pending_data_->data_size() % sizeof(float))); DCHECK_EQ(0, static_cast<int>(pending_data_->data_size() % sizeof(float)));
CHECK(pending_data_->data_size() < kDirectBufferSize); CHECK_LT(static_cast<int>(pending_data_->data_size()), kDirectBufferSize);
int num_of_samples = pending_data_->data_size() / sizeof(float); int num_of_samples = pending_data_->data_size() / sizeof(float);
int num_of_frames = num_of_samples / 2; int num_of_frames = num_of_samples / num_channels_;
const float* src_left = reinterpret_cast<const float*>(pending_data_->data()); std::vector<const float*> src(num_channels_);
const float* src_right = src_left + num_of_samples / 2; for (int c = 0; c < num_channels_; c++) {
src[c] = reinterpret_cast<const float*>(pending_data_->data()) +
c * num_of_frames;
}
float* dst = reinterpret_cast<float*>(direct_pcm_buffer_address_); float* dst = reinterpret_cast<float*>(direct_pcm_buffer_address_);
for (int f = 0; f < num_of_frames; f++) { for (int f = 0; f < num_of_frames; f++) {
*dst++ = *src_left++; for (int c = 0; c < num_channels_; c++) {
*dst++ = *src_right++; *dst++ = *src[c]++;
}
} }
} }
......
...@@ -47,7 +47,8 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid { ...@@ -47,7 +47,8 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Returns true if the ids populated are valid. // Returns true if the ids populated are valid.
static bool GetSessionIds(int* media_id, int* communication_id); static bool GetSessionIds(int* media_id, int* communication_id);
static int64_t GetMinimumBufferedTime(int samples_per_second); static int64_t GetMinimumBufferedTime(int num_channels,
int samples_per_second);
// Called from Java so that we can cache the addresses of the Java-managed // Called from Java so that we can cache the addresses of the Java-managed
// byte_buffers. // byte_buffers.
...@@ -81,6 +82,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid { ...@@ -81,6 +82,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
friend class ManagedAudioSink; friend class ManagedAudioSink;
AudioSinkAndroidAudioTrackImpl(AudioSinkAndroid::Delegate* delegate, AudioSinkAndroidAudioTrackImpl(AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second, int input_samples_per_second,
bool primary, bool primary,
const std::string& device_id, const std::string& device_id,
...@@ -113,6 +115,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid { ...@@ -113,6 +115,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Config parameters provided into c'tor. // Config parameters provided into c'tor.
Delegate* const delegate_; Delegate* const delegate_;
const int num_channels_;
const int input_samples_per_second_; const int input_samples_per_second_;
const bool primary_; const bool primary_;
const std::string device_id_; const std::string device_id_;
......
...@@ -32,11 +32,11 @@ import java.nio.ByteOrder; ...@@ -32,11 +32,11 @@ import java.nio.ByteOrder;
* Implements an audio sink object using Android's AudioTrack module to * Implements an audio sink object using Android's AudioTrack module to
* playback audio samples. * playback audio samples.
* It assumes the following fixed configuration parameters: * It assumes the following fixed configuration parameters:
* - 2-channel audio
* - PCM audio format (i.e., no encoded data like mp3) * - PCM audio format (i.e., no encoded data like mp3)
* - samples are 4-byte floats, interleaved channels ("LRLRLRLRLR"). * - samples are 4-byte floats, interleaved channels (i.e., interleaved audio
* The only configurable audio parameter is the sample rate (typically 44.1 or * data for stereo is "LRLRLRLRLR").
* 48 KHz). * The configurable audio parameters are the sample rate (typically 44.1 or
* 48 KHz) and the channel number.
* *
* PCM data is shared through the JNI using memory-mapped ByteBuffer objects. * PCM data is shared through the JNI using memory-mapped ByteBuffer objects.
* The AudioTrack.write() function is called in BLOCKING mode. That means when * The AudioTrack.write() function is called in BLOCKING mode. That means when
...@@ -88,10 +88,9 @@ class AudioSinkAudioTrackImpl { ...@@ -88,10 +88,9 @@ class AudioSinkAudioTrackImpl {
}; };
// Hardcoded AudioTrack config parameters. // Hardcoded AudioTrack config parameters.
private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_OUT_STEREO;
private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_FLOAT; private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_FLOAT;
private static final int AUDIO_MODE = AudioTrack.MODE_STREAM; private static final int AUDIO_MODE = AudioTrack.MODE_STREAM;
private static final int BYTES_PER_FRAME = 2 * 4; // 2 channels, float (4-bytes) private static final int BYTES_PER_SAMPLE = 4; // float (4-bytes)
// Parameter to determine the proper internal buffer size of the AudioTrack instance. In order // Parameter to determine the proper internal buffer size of the AudioTrack instance. In order
// to minimize latency we want a buffer as small as possible. However, to avoid underruns we // to minimize latency we want a buffer as small as possible. However, to avoid underruns we
...@@ -175,6 +174,7 @@ class AudioSinkAudioTrackImpl { ...@@ -175,6 +174,7 @@ class AudioSinkAudioTrackImpl {
// Dynamic AudioTrack config parameter. // Dynamic AudioTrack config parameter.
private int mSampleRateInHz; private int mSampleRateInHz;
private int mChannelCount;
private AudioTrack mAudioTrack; private AudioTrack mAudioTrack;
...@@ -229,10 +229,33 @@ class AudioSinkAudioTrackImpl { ...@@ -229,10 +229,33 @@ class AudioSinkAudioTrackImpl {
return sAudioManager; return sAudioManager;
} }
private static int getChannelConfig(int channelCount) {
switch (channelCount) {
case 1:
return AudioFormat.CHANNEL_OUT_MONO;
case 2:
return AudioFormat.CHANNEL_OUT_STEREO;
case 4:
return AudioFormat.CHANNEL_OUT_QUAD;
case 6:
return AudioFormat.CHANNEL_OUT_5POINT1;
case 8:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
return AudioFormat.CHANNEL_OUT_7POINT1_SURROUND;
} else {
return AudioFormat.CHANNEL_OUT_7POINT1;
}
default:
return AudioFormat.CHANNEL_OUT_DEFAULT;
}
}
@CalledByNative @CalledByNative
public static long getMinimumBufferedTime(int sampleRateInHz) { public static long getMinimumBufferedTime(int channelCount, int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(sampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT); int sizeBytes = AudioTrack.getMinBufferSize(
long sizeUs = SEC_IN_USEC * (long) sizeBytes / (BYTES_PER_FRAME * (long) sampleRateInHz); sampleRateInHz, getChannelConfig(channelCount), AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes
/ (BYTES_PER_SAMPLE * channelCount * (long) sampleRateInHz);
return sizeUs + MIN_BUFFERED_TIME_PADDING_US; return sizeUs + MIN_BUFFERED_TIME_PADDING_US;
} }
...@@ -301,8 +324,8 @@ class AudioSinkAudioTrackImpl { ...@@ -301,8 +324,8 @@ class AudioSinkAudioTrackImpl {
* the shared memory buffers. * the shared memory buffers.
*/ */
@CalledByNative @CalledByNative
private void init( private void init(@AudioContentType int castContentType, int channelCount, int sampleRateInHz,
@AudioContentType int castContentType, int sampleRateInHz, int bytesPerBuffer) { int bytesPerBuffer) {
mTag = TAG + "(" + castContentType + ":" + (sInstanceCounter++) + ")"; mTag = TAG + "(" + castContentType + ":" + (sInstanceCounter++) + ")";
// Setup throttled logs: pass the first 5, then every 1sec, reset after 5. // Setup throttled logs: pass the first 5, then every 1sec, reset after 5.
...@@ -312,7 +335,7 @@ class AudioSinkAudioTrackImpl { ...@@ -312,7 +335,7 @@ class AudioSinkAudioTrackImpl {
Log.i(mTag, Log.i(mTag,
"Init:" "Init:"
+ " sampleRateInHz=" + sampleRateInHz + " channelCount=" + channelCount + " sampleRateInHz=" + sampleRateInHz
+ " bytesPerBuffer=" + bytesPerBuffer); + " bytesPerBuffer=" + bytesPerBuffer);
if (mIsInitialized) { if (mIsInitialized) {
...@@ -325,6 +348,7 @@ class AudioSinkAudioTrackImpl { ...@@ -325,6 +348,7 @@ class AudioSinkAudioTrackImpl {
return; return;
} }
mSampleRateInHz = sampleRateInHz; mSampleRateInHz = sampleRateInHz;
mChannelCount = channelCount;
int usageType = CAST_TYPE_TO_ANDROID_USAGE_TYPE_MAP.get(castContentType); int usageType = CAST_TYPE_TO_ANDROID_USAGE_TYPE_MAP.get(castContentType);
int contentType = CAST_TYPE_TO_ANDROID_CONTENT_TYPE_MAP.get(castContentType); int contentType = CAST_TYPE_TO_ANDROID_CONTENT_TYPE_MAP.get(castContentType);
...@@ -337,9 +361,11 @@ class AudioSinkAudioTrackImpl { ...@@ -337,9 +361,11 @@ class AudioSinkAudioTrackImpl {
} }
// AudioContentType.ALARM doesn't get a sessionId. // AudioContentType.ALARM doesn't get a sessionId.
int channelConfig = getChannelConfig(mChannelCount);
int bufferSizeInBytes = MIN_BUFFER_SIZE_MULTIPLIER int bufferSizeInBytes = MIN_BUFFER_SIZE_MULTIPLIER
* AudioTrack.getMinBufferSize(mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT); * AudioTrack.getMinBufferSize(mSampleRateInHz, channelConfig, AUDIO_FORMAT);
int bufferSizeInMs = 1000 * bufferSizeInBytes / (BYTES_PER_FRAME * mSampleRateInHz); int bufferSizeInMs =
1000 * bufferSizeInBytes / (BYTES_PER_SAMPLE * mChannelCount * mSampleRateInHz);
Log.i(mTag, Log.i(mTag,
"Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + bufferSizeInMs "Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + bufferSizeInMs
+ "ms) usageType=" + usageType + " contentType=" + contentType + "ms) usageType=" + usageType + " contentType=" + contentType
...@@ -359,7 +385,7 @@ class AudioSinkAudioTrackImpl { ...@@ -359,7 +385,7 @@ class AudioSinkAudioTrackImpl {
.setAudioFormat(new AudioFormat.Builder() .setAudioFormat(new AudioFormat.Builder()
.setEncoding(AUDIO_FORMAT) .setEncoding(AUDIO_FORMAT)
.setSampleRate(mSampleRateInHz) .setSampleRate(mSampleRateInHz)
.setChannelMask(CHANNEL_CONFIG) .setChannelMask(channelConfig)
.build()); .build());
if (sessionId != AudioManager.ERROR) builder.setSessionId(sessionId); if (sessionId != AudioManager.ERROR) builder.setSessionId(sessionId);
mAudioTrack = builder.build(); mAudioTrack = builder.build();
...@@ -368,11 +394,11 @@ class AudioSinkAudioTrackImpl { ...@@ -368,11 +394,11 @@ class AudioSinkAudioTrackImpl {
// Using pre-M API. // Using pre-M API.
if (sessionId == AudioManager.ERROR) { if (sessionId == AudioManager.ERROR) {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType), mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes, mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM); AudioTrack.MODE_STREAM);
} else { } else {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType), mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes, mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM, sessionId); AudioTrack.MODE_STREAM, sessionId);
} }
} }
...@@ -548,13 +574,13 @@ class AudioSinkAudioTrackImpl { ...@@ -548,13 +574,13 @@ class AudioSinkAudioTrackImpl {
} }
} }
int framesWritten = bytesWritten / BYTES_PER_FRAME; int framesWritten = bytesWritten / (BYTES_PER_SAMPLE * mChannelCount);
mTotalFramesWritten += framesWritten; mTotalFramesWritten += framesWritten;
if (DEBUG_LEVEL >= 3) { if (DEBUG_LEVEL >= 3) {
Log.i(mTag, Log.i(mTag,
" wrote " + bytesWritten + "/" + sizeInBytes " wrote " + bytesWritten + "/" + sizeInBytes + " total_bytes_written="
+ " total_bytes_written=" + (mTotalFramesWritten * BYTES_PER_FRAME) + (mTotalFramesWritten * BYTES_PER_SAMPLE * mChannelCount)
+ " took:" + (SystemClock.elapsedRealtime() - beforeMsecs) + "ms"); + " took:" + (SystemClock.elapsedRealtime() - beforeMsecs) + "ms");
} }
......
...@@ -203,6 +203,8 @@ class CastAudioDecoderImpl : public CastAudioDecoder { ...@@ -203,6 +203,8 @@ class CastAudioDecoderImpl : public CastAudioDecoder {
} }
if (decoded->channel_count() != config_.channel_number) { if (decoded->channel_count() != config_.channel_number) {
LOG(WARNING) << "channel_count changed to " << decoded->channel_count()
<< " from " << config_.channel_number;
config_.channel_number = decoded->channel_count(); config_.channel_number = decoded->channel_count();
decoded_bus_.reset(); decoded_bus_.reset();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment