Commit 2c9ce61a authored by ziyangch's avatar ziyangch Committed by Commit Bot

[Chromecast] Allow arbitrary channel count for Android audio streams

Bug: internal b/140189221

Test: Cast from Youtube to Android TV.
      TTS on Android Things.

Change-Id: I1cc4e355a0644ff35193a62c47f7fa748de17fb2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1900151
Commit-Queue: Ziyang Cheng <ziyangch@chromium.org>
Reviewed-by: default avatarYuchen Liu <yucliu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712797}
parent c2d2c1b4
......@@ -18,6 +18,7 @@
#include "chromecast/media/cma/backend/android/media_pipeline_backend_android.h"
#include "chromecast/media/cma/base/decoder_buffer_adapter.h"
#include "chromecast/media/cma/base/decoder_buffer_base.h"
#include "chromecast/media/cma/base/decoder_config_adapter.h"
#include "chromecast/public/media/cast_decoder_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/channel_layout.h"
......@@ -38,7 +39,6 @@ namespace media {
namespace {
const int kNumChannels = 2;
const int kDefaultFramesPerBuffer = 1024;
const int kSilenceBufferFrames = 2048;
const int kMaxOutputMs = 20;
......@@ -73,8 +73,6 @@ AudioDecoderAndroid::AudioDecoderAndroid(MediaPipelineBackendAndroid* backend)
got_eos_(false),
pushed_eos_(false),
sink_error_(false),
rate_shifter_output_(
::media::AudioBus::Create(kNumChannels, kDefaultFramesPerBuffer)),
current_pts_(kInvalidTimestamp),
sink_(AudioSinkManager::GetDefaultSinkType()),
pending_output_frames_(kNoPendingOutput),
......@@ -121,8 +119,9 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) {
TRACE_FUNCTION_ENTRY0();
current_pts_ = start_pts;
DCHECK(IsValidConfig(config_));
sink_.Reset(this, config_.samples_per_second, backend_->Primary(),
backend_->DeviceId(), backend_->ContentType());
sink_.Reset(this, config_.channel_number, config_.samples_per_second,
backend_->Primary(), backend_->DeviceId(),
backend_->ContentType());
sink_->SetStreamVolumeMultiplier(volume_multiplier_);
// Create decoder_ if necessary. This can happen if Stop() was called, and
// SetConfig() was not called since then.
......@@ -130,7 +129,7 @@ bool AudioDecoderAndroid::Start(int64_t start_pts) {
CreateDecoder();
}
if (!rate_shifter_) {
CreateRateShifter(config_.samples_per_second);
CreateRateShifter(config_);
}
return true;
}
......@@ -259,22 +258,23 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) {
return false;
}
bool changed_sample_rate =
(config.samples_per_second != config_.samples_per_second);
bool changed_config =
(config.samples_per_second != config_.samples_per_second ||
config.channel_number != config_.channel_number);
if (!rate_shifter_ || changed_sample_rate) {
CreateRateShifter(config.samples_per_second);
if (!rate_shifter_ || changed_config) {
CreateRateShifter(config);
}
if (sink_ && changed_sample_rate) {
ResetSinkForNewSampleRate(config.samples_per_second);
if (sink_ && changed_config) {
ResetSinkForNewConfig(config);
}
config_ = config;
decoder_.reset();
CreateDecoder();
if (pending_buffer_complete_ && changed_sample_rate) {
if (pending_buffer_complete_ && changed_config) {
pending_buffer_complete_ = false;
delegate_->OnPushBufferComplete(
MediaPipelineBackendAndroid::kBufferSuccess);
......@@ -282,8 +282,9 @@ bool AudioDecoderAndroid::SetConfig(const AudioConfig& config) {
return true;
}
void AudioDecoderAndroid::ResetSinkForNewSampleRate(int sample_rate) {
sink_.Reset(this, sample_rate, backend_->Primary(), backend_->DeviceId(),
void AudioDecoderAndroid::ResetSinkForNewConfig(const AudioConfig& config) {
sink_.Reset(this, config.channel_number, config.samples_per_second,
backend_->Primary(), backend_->DeviceId(),
backend_->ContentType());
sink_->SetStreamVolumeMultiplier(volume_multiplier_);
pending_output_frames_ = kNoPendingOutput;
......@@ -309,18 +310,21 @@ void AudioDecoderAndroid::CreateDecoder() {
base::Unretained(this)));
}
void AudioDecoderAndroid::CreateRateShifter(int samples_per_second) {
LOG(INFO) << __func__ << ": samples_per_second=" << samples_per_second;
void AudioDecoderAndroid::CreateRateShifter(const AudioConfig& config) {
LOG(INFO) << __func__ << ": channel_number=" << config.channel_number
<< " samples_per_second=" << config.samples_per_second;
rate_shifter_info_.clear();
rate_shifter_info_.push_back(RateShifterInfo(1.0f));
rate_shifter_output_.reset();
rate_shifter_.reset(new ::media::AudioRendererAlgorithm());
bool is_encrypted = false;
rate_shifter_->Initialize(
::media::AudioParameters(::media::AudioParameters::AUDIO_PCM_LINEAR,
::media::CHANNEL_LAYOUT_STEREO,
samples_per_second, kDefaultFramesPerBuffer),
::media::AudioParameters(
::media::AudioParameters::AUDIO_PCM_LINEAR,
DecoderConfigAdapter::ToMediaChannelLayout(config.channel_layout),
config.samples_per_second, kDefaultFramesPerBuffer),
is_encrypted);
}
......@@ -402,14 +406,26 @@ void AudioDecoderAndroid::OnBufferDecoded(
delta.decoded_bytes = input_bytes;
UpdateStatistics(delta);
bool changed_config = false;
if (config.samples_per_second != config_.samples_per_second) {
// Sample rate from actual stream doesn't match supposed sample rate from
// the container. Update the sink and rate shifter. Note that for now we
LOG(INFO) << "Input sample rate changed from " << config_.samples_per_second
<< " to " << config.samples_per_second;
config_.samples_per_second = config.samples_per_second;
changed_config = true;
}
if (config.channel_number != config_.channel_number) {
LOG(INFO) << "Input channel count changed from " << config_.channel_number
<< " to " << config.channel_number;
config_.channel_number = config.channel_number;
changed_config = true;
}
if (changed_config) {
// Config from actual stream doesn't match supposed config from the
// container. Update the sink and rate shifter. Note that for now we
// assume that this can only happen at start of stream (ie, on the first
// decoded buffer).
config_.samples_per_second = config.samples_per_second;
CreateRateShifter(config.samples_per_second);
ResetSinkForNewSampleRate(config.samples_per_second);
CreateRateShifter(config_);
ResetSinkForNewConfig(config_);
}
pending_buffer_complete_ = true;
......@@ -417,7 +433,8 @@ void AudioDecoderAndroid::OnBufferDecoded(
got_eos_ = true;
LOG(INFO) << __func__ << ": decoded buffer marked EOS";
} else {
int input_frames = decoded->data_size() / (kNumChannels * sizeof(float));
int input_frames =
decoded->data_size() / (config_.channel_number * sizeof(float));
DCHECK(!rate_shifter_info_.empty());
......@@ -429,9 +446,10 @@ void AudioDecoderAndroid::OnBufferDecoded(
backend_->AudioChannel() == AudioChannel::kRight);
const int playout_channel =
backend_->AudioChannel() == AudioChannel::kLeft ? 0 : 1;
for (int c = 0; c < kNumChannels; ++c) {
for (int c = 0; c < config_.channel_number; ++c) {
if (c != playout_channel) {
const size_t channel_size = decoded->data_size() / kNumChannels;
const size_t channel_size =
decoded->data_size() / config_.channel_number;
std::memcpy(decoded->writable_data() + c * channel_size,
decoded->writable_data() + playout_channel * channel_size,
channel_size);
......@@ -457,12 +475,19 @@ void AudioDecoderAndroid::OnBufferDecoded(
// Otherwise, queue data into the rate shifter, and then try to push the
// rate-shifted data.
const uint8_t* channels[kNumChannels] = {
decoded->data(), decoded->data() + input_frames * sizeof(float)};
scoped_refptr<::media::AudioBuffer> buffer = ::media::AudioBuffer::CopyFrom(
::media::kSampleFormatPlanarF32, ::media::CHANNEL_LAYOUT_STEREO,
kNumChannels, config_.samples_per_second, input_frames, channels,
base::TimeDelta(), pool_);
scoped_refptr<::media::AudioBuffer> buffer =
::media::AudioBuffer::CreateBuffer(
::media::kSampleFormatPlanarF32,
DecoderConfigAdapter::ToMediaChannelLayout(config_.channel_layout),
config_.channel_number, config_.samples_per_second, input_frames,
pool_);
buffer->set_timestamp(base::TimeDelta());
const int channel_data_size = input_frames * sizeof(float);
for (int c = 0; c < config_.channel_number; ++c) {
memcpy(buffer->channel_data()[c], decoded->data() + c * channel_data_size,
channel_data_size);
}
rate_shifter_->EnqueueBuffer(buffer);
rate_shifter_info_.back().input_frames += input_frames;
}
......@@ -513,8 +538,9 @@ void AudioDecoderAndroid::PushRateShifted() {
// Push some silence into the rate shifter so we can get out any remaining
// rate-shifted data.
rate_shifter_->EnqueueBuffer(::media::AudioBuffer::CreateEmptyBuffer(
::media::CHANNEL_LAYOUT_STEREO, kNumChannels,
config_.samples_per_second, kSilenceBufferFrames, base::TimeDelta()));
DecoderConfigAdapter::ToMediaChannelLayout(config_.channel_layout),
config_.channel_number, config_.samples_per_second,
kSilenceBufferFrames, base::TimeDelta()));
}
DCHECK(!rate_shifter_info_.empty());
......@@ -541,9 +567,10 @@ void AudioDecoderAndroid::PushRateShifted() {
desired_output_frames,
config_.samples_per_second * kMaxOutputMs / kMillisecondsPerSecond);
if (desired_output_frames > rate_shifter_output_->frames()) {
rate_shifter_output_ =
::media::AudioBus::Create(kNumChannels, desired_output_frames);
if (!rate_shifter_output_ ||
desired_output_frames > rate_shifter_output_->frames()) {
rate_shifter_output_ = ::media::AudioBus::Create(config_.channel_number,
desired_output_frames);
}
int out_frames = rate_shifter_->FillBuffer(
......@@ -557,8 +584,8 @@ void AudioDecoderAndroid::PushRateShifted() {
int channel_data_size = out_frames * sizeof(float);
scoped_refptr<DecoderBufferBase> output_buffer(new DecoderBufferAdapter(
new ::media::DecoderBuffer(channel_data_size * kNumChannels)));
for (int c = 0; c < kNumChannels; ++c) {
new ::media::DecoderBuffer(channel_data_size * config_.channel_number)));
for (int c = 0; c < config_.channel_number; ++c) {
memcpy(output_buffer->writable_data() + c * channel_data_size,
rate_shifter_output_->channel(c), channel_data_size);
}
......@@ -586,7 +613,7 @@ void AudioDecoderAndroid::PushRateShifted() {
if (extra_frames > 0) {
// Clear out extra buffered data.
std::unique_ptr<::media::AudioBus> dropped =
::media::AudioBus::Create(kNumChannels, extra_frames);
::media::AudioBus::Create(config_.channel_number, extra_frames);
int cleared_frames =
rate_shifter_->FillBuffer(dropped.get(), 0, extra_frames, 1.0f);
DCHECK_EQ(extra_frames, cleared_frames);
......
......@@ -75,9 +75,9 @@ class AudioDecoderAndroid : public MediaPipelineBackend::AudioDecoder,
void OnSinkError(SinkError error) override;
void CleanUpPcm();
void ResetSinkForNewSampleRate(int sample_rate);
void ResetSinkForNewConfig(const AudioConfig& config);
void CreateDecoder();
void CreateRateShifter(int samples_per_second);
void CreateRateShifter(const AudioConfig& config);
void OnDecoderInitialized(bool success);
void OnBufferDecoded(uint64_t input_bytes,
CastAudioDecoder::Status status,
......
......@@ -53,7 +53,7 @@ int64_t AudioSinkAndroid::GetMinimumBufferedTime(SinkType sink_type,
break;
case AudioSinkAndroid::kSinkTypeJavaBased:
return AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
config.samples_per_second);
config.channel_number, config.samples_per_second);
}
return kDefaultMinBufferTimeUs;
}
......@@ -70,6 +70,7 @@ void ManagedAudioSink::Reset() {
}
void ManagedAudioSink::Reset(Delegate* delegate,
int num_channels,
int samples_per_second,
bool primary,
const std::string& device_id,
......@@ -83,8 +84,9 @@ void ManagedAudioSink::Reset(Delegate* delegate,
NOTREACHED() << "Native-based audio sink is not implemented yet!";
break;
case AudioSinkAndroid::kSinkTypeJavaBased:
sink_ = new AudioSinkAndroidAudioTrackImpl(
delegate, samples_per_second, primary, device_id, content_type);
sink_ = new AudioSinkAndroidAudioTrackImpl(delegate, num_channels,
samples_per_second, primary,
device_id, content_type);
}
AudioSinkManager::Get()->Add(sink_);
}
......
......@@ -122,6 +122,7 @@ class ManagedAudioSink {
// the manager. If a valid instance existed on entry it is removed from the
// manager and deleted before creating the new one.
void Reset(Delegate* delegate,
int num_channels,
int samples_per_second,
bool primary,
const std::string& device_id,
......
......@@ -6,6 +6,7 @@
#include <algorithm>
#include <string>
#include <vector>
#include "base/bind.h"
#include "base/bind_helpers.h"
......@@ -67,18 +68,21 @@ bool AudioSinkAndroidAudioTrackImpl::GetSessionIds(int* media_id,
// static
int64_t AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
int num_channels,
int samples_per_second) {
return Java_AudioSinkAudioTrackImpl_getMinimumBufferedTime(
base::android::AttachCurrentThread(), samples_per_second);
base::android::AttachCurrentThread(), num_channels, samples_per_second);
}
AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second,
bool primary,
const std::string& device_id,
AudioContentType content_type)
: delegate_(delegate),
num_channels_(num_channels),
input_samples_per_second_(input_samples_per_second),
primary_(primary),
device_id_(device_id),
......@@ -93,10 +97,12 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
state_(kStateUninitialized),
weak_factory_(this) {
LOG(INFO) << __func__ << "(" << this << "):"
<< " num_channels_=" << num_channels_
<< " input_samples_per_second_=" << input_samples_per_second_
<< " primary_=" << primary_ << " device_id_=" << device_id_
<< " content_type__=" << GetContentTypeName();
DCHECK(delegate_);
DCHECK_GT(num_channels_, 0);
// Create Java part and initialize.
DCHECK(j_audio_sink_audiotrack_impl_.is_null());
......@@ -106,7 +112,7 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
reinterpret_cast<intptr_t>(this)));
Java_AudioSinkAudioTrackImpl_init(
base::android::AttachCurrentThread(), j_audio_sink_audiotrack_impl_,
static_cast<int>(content_type_), input_samples_per_second_,
static_cast<int>(content_type_), num_channels_, input_samples_per_second_,
kDirectBufferSize);
// Should be set now.
DCHECK(direct_pcm_buffer_address_);
......@@ -266,20 +272,24 @@ void AudioSinkAndroidAudioTrackImpl::OnPlayoutDone() {
}
void AudioSinkAndroidAudioTrackImpl::ReformatData() {
// Data is in planar float format, i.e. all left samples first, then all
// right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR").
// Data is in planar float format, i.e., planar audio data for stereo is all
// left samples first, then all right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR").
// AudioTrack needs interleaved format -> "LRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLR").
DCHECK(direct_pcm_buffer_address_);
DCHECK_EQ(0, static_cast<int>(pending_data_->data_size() % sizeof(float)));
CHECK(pending_data_->data_size() < kDirectBufferSize);
CHECK_LT(static_cast<int>(pending_data_->data_size()), kDirectBufferSize);
int num_of_samples = pending_data_->data_size() / sizeof(float);
int num_of_frames = num_of_samples / 2;
const float* src_left = reinterpret_cast<const float*>(pending_data_->data());
const float* src_right = src_left + num_of_samples / 2;
int num_of_frames = num_of_samples / num_channels_;
std::vector<const float*> src(num_channels_);
for (int c = 0; c < num_channels_; c++) {
src[c] = reinterpret_cast<const float*>(pending_data_->data()) +
c * num_of_frames;
}
float* dst = reinterpret_cast<float*>(direct_pcm_buffer_address_);
for (int f = 0; f < num_of_frames; f++) {
*dst++ = *src_left++;
*dst++ = *src_right++;
for (int c = 0; c < num_channels_; c++) {
*dst++ = *src[c]++;
}
}
}
......
......@@ -47,7 +47,8 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Returns true if the ids populated are valid.
static bool GetSessionIds(int* media_id, int* communication_id);
static int64_t GetMinimumBufferedTime(int samples_per_second);
static int64_t GetMinimumBufferedTime(int num_channels,
int samples_per_second);
// Called from Java so that we can cache the addresses of the Java-managed
// byte_buffers.
......@@ -81,6 +82,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
friend class ManagedAudioSink;
AudioSinkAndroidAudioTrackImpl(AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second,
bool primary,
const std::string& device_id,
......@@ -113,6 +115,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Config parameters provided into c'tor.
Delegate* const delegate_;
const int num_channels_;
const int input_samples_per_second_;
const bool primary_;
const std::string device_id_;
......
......@@ -32,11 +32,11 @@ import java.nio.ByteOrder;
* Implements an audio sink object using Android's AudioTrack module to
* playback audio samples.
* It assumes the following fixed configuration parameters:
* - 2-channel audio
* - PCM audio format (i.e., no encoded data like mp3)
* - samples are 4-byte floats, interleaved channels ("LRLRLRLRLR").
* The only configurable audio parameter is the sample rate (typically 44.1 or
* 48 KHz).
* - samples are 4-byte floats, interleaved channels (i.e., interleaved audio
* data for stereo is "LRLRLRLRLR").
* The configurable audio parameters are the sample rate (typically 44.1 or
* 48 KHz) and the channel number.
*
* PCM data is shared through the JNI using memory-mapped ByteBuffer objects.
* The AudioTrack.write() function is called in BLOCKING mode. That means when
......@@ -88,10 +88,9 @@ class AudioSinkAudioTrackImpl {
};
// Hardcoded AudioTrack config parameters.
private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_OUT_STEREO;
private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_FLOAT;
private static final int AUDIO_MODE = AudioTrack.MODE_STREAM;
private static final int BYTES_PER_FRAME = 2 * 4; // 2 channels, float (4-bytes)
private static final int BYTES_PER_SAMPLE = 4; // float (4-bytes)
// Parameter to determine the proper internal buffer size of the AudioTrack instance. In order
// to minimize latency we want a buffer as small as possible. However, to avoid underruns we
......@@ -175,6 +174,7 @@ class AudioSinkAudioTrackImpl {
// Dynamic AudioTrack config parameter.
private int mSampleRateInHz;
private int mChannelCount;
private AudioTrack mAudioTrack;
......@@ -229,10 +229,33 @@ class AudioSinkAudioTrackImpl {
return sAudioManager;
}
private static int getChannelConfig(int channelCount) {
switch (channelCount) {
case 1:
return AudioFormat.CHANNEL_OUT_MONO;
case 2:
return AudioFormat.CHANNEL_OUT_STEREO;
case 4:
return AudioFormat.CHANNEL_OUT_QUAD;
case 6:
return AudioFormat.CHANNEL_OUT_5POINT1;
case 8:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
return AudioFormat.CHANNEL_OUT_7POINT1_SURROUND;
} else {
return AudioFormat.CHANNEL_OUT_7POINT1;
}
default:
return AudioFormat.CHANNEL_OUT_DEFAULT;
}
}
@CalledByNative
public static long getMinimumBufferedTime(int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(sampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes / (BYTES_PER_FRAME * (long) sampleRateInHz);
public static long getMinimumBufferedTime(int channelCount, int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(
sampleRateInHz, getChannelConfig(channelCount), AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes
/ (BYTES_PER_SAMPLE * channelCount * (long) sampleRateInHz);
return sizeUs + MIN_BUFFERED_TIME_PADDING_US;
}
......@@ -301,8 +324,8 @@ class AudioSinkAudioTrackImpl {
* the shared memory buffers.
*/
@CalledByNative
private void init(
@AudioContentType int castContentType, int sampleRateInHz, int bytesPerBuffer) {
private void init(@AudioContentType int castContentType, int channelCount, int sampleRateInHz,
int bytesPerBuffer) {
mTag = TAG + "(" + castContentType + ":" + (sInstanceCounter++) + ")";
// Setup throttled logs: pass the first 5, then every 1sec, reset after 5.
......@@ -312,7 +335,7 @@ class AudioSinkAudioTrackImpl {
Log.i(mTag,
"Init:"
+ " sampleRateInHz=" + sampleRateInHz
+ " channelCount=" + channelCount + " sampleRateInHz=" + sampleRateInHz
+ " bytesPerBuffer=" + bytesPerBuffer);
if (mIsInitialized) {
......@@ -325,6 +348,7 @@ class AudioSinkAudioTrackImpl {
return;
}
mSampleRateInHz = sampleRateInHz;
mChannelCount = channelCount;
int usageType = CAST_TYPE_TO_ANDROID_USAGE_TYPE_MAP.get(castContentType);
int contentType = CAST_TYPE_TO_ANDROID_CONTENT_TYPE_MAP.get(castContentType);
......@@ -337,9 +361,11 @@ class AudioSinkAudioTrackImpl {
}
// AudioContentType.ALARM doesn't get a sessionId.
int channelConfig = getChannelConfig(mChannelCount);
int bufferSizeInBytes = MIN_BUFFER_SIZE_MULTIPLIER
* AudioTrack.getMinBufferSize(mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
int bufferSizeInMs = 1000 * bufferSizeInBytes / (BYTES_PER_FRAME * mSampleRateInHz);
* AudioTrack.getMinBufferSize(mSampleRateInHz, channelConfig, AUDIO_FORMAT);
int bufferSizeInMs =
1000 * bufferSizeInBytes / (BYTES_PER_SAMPLE * mChannelCount * mSampleRateInHz);
Log.i(mTag,
"Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + bufferSizeInMs
+ "ms) usageType=" + usageType + " contentType=" + contentType
......@@ -359,7 +385,7 @@ class AudioSinkAudioTrackImpl {
.setAudioFormat(new AudioFormat.Builder()
.setEncoding(AUDIO_FORMAT)
.setSampleRate(mSampleRateInHz)
.setChannelMask(CHANNEL_CONFIG)
.setChannelMask(channelConfig)
.build());
if (sessionId != AudioManager.ERROR) builder.setSessionId(sessionId);
mAudioTrack = builder.build();
......@@ -368,11 +394,11 @@ class AudioSinkAudioTrackImpl {
// Using pre-M API.
if (sessionId == AudioManager.ERROR) {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes,
mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM);
} else {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes,
mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM, sessionId);
}
}
......@@ -548,13 +574,13 @@ class AudioSinkAudioTrackImpl {
}
}
int framesWritten = bytesWritten / BYTES_PER_FRAME;
int framesWritten = bytesWritten / (BYTES_PER_SAMPLE * mChannelCount);
mTotalFramesWritten += framesWritten;
if (DEBUG_LEVEL >= 3) {
Log.i(mTag,
" wrote " + bytesWritten + "/" + sizeInBytes
+ " total_bytes_written=" + (mTotalFramesWritten * BYTES_PER_FRAME)
" wrote " + bytesWritten + "/" + sizeInBytes + " total_bytes_written="
+ (mTotalFramesWritten * BYTES_PER_SAMPLE * mChannelCount)
+ " took:" + (SystemClock.elapsedRealtime() - beforeMsecs) + "ms");
}
......
......@@ -203,6 +203,8 @@ class CastAudioDecoderImpl : public CastAudioDecoder {
}
if (decoded->channel_count() != config_.channel_number) {
LOG(WARNING) << "channel_count changed to " << decoded->channel_count()
<< " from " << config_.channel_number;
config_.channel_number = decoded->channel_count();
decoded_bus_.reset();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment