Commit 2c9ce61a authored by ziyangch's avatar ziyangch Committed by Commit Bot

[Chromecast] Allow arbitrary channel count for Android audio streams

Bug: internal b/140189221

Test: Cast from Youtube to Android TV.
      TTS on Android Things.

Change-Id: I1cc4e355a0644ff35193a62c47f7fa748de17fb2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1900151
Commit-Queue: Ziyang Cheng <ziyangch@chromium.org>
Reviewed-by: default avatarYuchen Liu <yucliu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712797}
parent c2d2c1b4
......@@ -75,9 +75,9 @@ class AudioDecoderAndroid : public MediaPipelineBackend::AudioDecoder,
void OnSinkError(SinkError error) override;
void CleanUpPcm();
void ResetSinkForNewSampleRate(int sample_rate);
void ResetSinkForNewConfig(const AudioConfig& config);
void CreateDecoder();
void CreateRateShifter(int samples_per_second);
void CreateRateShifter(const AudioConfig& config);
void OnDecoderInitialized(bool success);
void OnBufferDecoded(uint64_t input_bytes,
CastAudioDecoder::Status status,
......
......@@ -53,7 +53,7 @@ int64_t AudioSinkAndroid::GetMinimumBufferedTime(SinkType sink_type,
break;
case AudioSinkAndroid::kSinkTypeJavaBased:
return AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
config.samples_per_second);
config.channel_number, config.samples_per_second);
}
return kDefaultMinBufferTimeUs;
}
......@@ -70,6 +70,7 @@ void ManagedAudioSink::Reset() {
}
void ManagedAudioSink::Reset(Delegate* delegate,
int num_channels,
int samples_per_second,
bool primary,
const std::string& device_id,
......@@ -83,8 +84,9 @@ void ManagedAudioSink::Reset(Delegate* delegate,
NOTREACHED() << "Native-based audio sink is not implemented yet!";
break;
case AudioSinkAndroid::kSinkTypeJavaBased:
sink_ = new AudioSinkAndroidAudioTrackImpl(
delegate, samples_per_second, primary, device_id, content_type);
sink_ = new AudioSinkAndroidAudioTrackImpl(delegate, num_channels,
samples_per_second, primary,
device_id, content_type);
}
AudioSinkManager::Get()->Add(sink_);
}
......
......@@ -122,6 +122,7 @@ class ManagedAudioSink {
// the manager. If a valid instance existed on entry it is removed from the
// manager and deleted before creating the new one.
void Reset(Delegate* delegate,
int num_channels,
int samples_per_second,
bool primary,
const std::string& device_id,
......
......@@ -6,6 +6,7 @@
#include <algorithm>
#include <string>
#include <vector>
#include "base/bind.h"
#include "base/bind_helpers.h"
......@@ -67,18 +68,21 @@ bool AudioSinkAndroidAudioTrackImpl::GetSessionIds(int* media_id,
// static
int64_t AudioSinkAndroidAudioTrackImpl::GetMinimumBufferedTime(
int num_channels,
int samples_per_second) {
return Java_AudioSinkAudioTrackImpl_getMinimumBufferedTime(
base::android::AttachCurrentThread(), samples_per_second);
base::android::AttachCurrentThread(), num_channels, samples_per_second);
}
AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second,
bool primary,
const std::string& device_id,
AudioContentType content_type)
: delegate_(delegate),
num_channels_(num_channels),
input_samples_per_second_(input_samples_per_second),
primary_(primary),
device_id_(device_id),
......@@ -93,10 +97,12 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
state_(kStateUninitialized),
weak_factory_(this) {
LOG(INFO) << __func__ << "(" << this << "):"
<< " num_channels_=" << num_channels_
<< " input_samples_per_second_=" << input_samples_per_second_
<< " primary_=" << primary_ << " device_id_=" << device_id_
<< " content_type__=" << GetContentTypeName();
DCHECK(delegate_);
DCHECK_GT(num_channels_, 0);
// Create Java part and initialize.
DCHECK(j_audio_sink_audiotrack_impl_.is_null());
......@@ -106,7 +112,7 @@ AudioSinkAndroidAudioTrackImpl::AudioSinkAndroidAudioTrackImpl(
reinterpret_cast<intptr_t>(this)));
Java_AudioSinkAudioTrackImpl_init(
base::android::AttachCurrentThread(), j_audio_sink_audiotrack_impl_,
static_cast<int>(content_type_), input_samples_per_second_,
static_cast<int>(content_type_), num_channels_, input_samples_per_second_,
kDirectBufferSize);
// Should be set now.
DCHECK(direct_pcm_buffer_address_);
......@@ -266,20 +272,24 @@ void AudioSinkAndroidAudioTrackImpl::OnPlayoutDone() {
}
void AudioSinkAndroidAudioTrackImpl::ReformatData() {
// Data is in planar float format, i.e. all left samples first, then all
// right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR").
// Data is in planar float format, i.e., planar audio data for stereo is all
// left samples first, then all right -> "LLLLLLLLLLLLLLLLRRRRRRRRRRRRRRRR").
// AudioTrack needs interleaved format -> "LRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLR").
DCHECK(direct_pcm_buffer_address_);
DCHECK_EQ(0, static_cast<int>(pending_data_->data_size() % sizeof(float)));
CHECK(pending_data_->data_size() < kDirectBufferSize);
CHECK_LT(static_cast<int>(pending_data_->data_size()), kDirectBufferSize);
int num_of_samples = pending_data_->data_size() / sizeof(float);
int num_of_frames = num_of_samples / 2;
const float* src_left = reinterpret_cast<const float*>(pending_data_->data());
const float* src_right = src_left + num_of_samples / 2;
int num_of_frames = num_of_samples / num_channels_;
std::vector<const float*> src(num_channels_);
for (int c = 0; c < num_channels_; c++) {
src[c] = reinterpret_cast<const float*>(pending_data_->data()) +
c * num_of_frames;
}
float* dst = reinterpret_cast<float*>(direct_pcm_buffer_address_);
for (int f = 0; f < num_of_frames; f++) {
*dst++ = *src_left++;
*dst++ = *src_right++;
for (int c = 0; c < num_channels_; c++) {
*dst++ = *src[c]++;
}
}
}
......
......@@ -47,7 +47,8 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Returns true if the ids populated are valid.
static bool GetSessionIds(int* media_id, int* communication_id);
static int64_t GetMinimumBufferedTime(int samples_per_second);
static int64_t GetMinimumBufferedTime(int num_channels,
int samples_per_second);
// Called from Java so that we can cache the addresses of the Java-managed
// byte_buffers.
......@@ -81,6 +82,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
friend class ManagedAudioSink;
AudioSinkAndroidAudioTrackImpl(AudioSinkAndroid::Delegate* delegate,
int num_channels,
int input_samples_per_second,
bool primary,
const std::string& device_id,
......@@ -113,6 +115,7 @@ class AudioSinkAndroidAudioTrackImpl : public AudioSinkAndroid {
// Config parameters provided into c'tor.
Delegate* const delegate_;
const int num_channels_;
const int input_samples_per_second_;
const bool primary_;
const std::string device_id_;
......
......@@ -32,11 +32,11 @@ import java.nio.ByteOrder;
* Implements an audio sink object using Android's AudioTrack module to
* playback audio samples.
* It assumes the following fixed configuration parameters:
* - 2-channel audio
* - PCM audio format (i.e., no encoded data like mp3)
* - samples are 4-byte floats, interleaved channels ("LRLRLRLRLR").
* The only configurable audio parameter is the sample rate (typically 44.1 or
* 48 KHz).
* - samples are 4-byte floats, interleaved channels (i.e., interleaved audio
* data for stereo is "LRLRLRLRLR").
* The configurable audio parameters are the sample rate (typically 44.1 or
* 48 KHz) and the channel number.
*
* PCM data is shared through the JNI using memory-mapped ByteBuffer objects.
* The AudioTrack.write() function is called in BLOCKING mode. That means when
......@@ -88,10 +88,9 @@ class AudioSinkAudioTrackImpl {
};
// Hardcoded AudioTrack config parameters.
private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_OUT_STEREO;
private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_FLOAT;
private static final int AUDIO_MODE = AudioTrack.MODE_STREAM;
private static final int BYTES_PER_FRAME = 2 * 4; // 2 channels, float (4-bytes)
private static final int BYTES_PER_SAMPLE = 4; // float (4-bytes)
// Parameter to determine the proper internal buffer size of the AudioTrack instance. In order
// to minimize latency we want a buffer as small as possible. However, to avoid underruns we
......@@ -175,6 +174,7 @@ class AudioSinkAudioTrackImpl {
// Dynamic AudioTrack config parameter.
private int mSampleRateInHz;
private int mChannelCount;
private AudioTrack mAudioTrack;
......@@ -229,10 +229,33 @@ class AudioSinkAudioTrackImpl {
return sAudioManager;
}
private static int getChannelConfig(int channelCount) {
switch (channelCount) {
case 1:
return AudioFormat.CHANNEL_OUT_MONO;
case 2:
return AudioFormat.CHANNEL_OUT_STEREO;
case 4:
return AudioFormat.CHANNEL_OUT_QUAD;
case 6:
return AudioFormat.CHANNEL_OUT_5POINT1;
case 8:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
return AudioFormat.CHANNEL_OUT_7POINT1_SURROUND;
} else {
return AudioFormat.CHANNEL_OUT_7POINT1;
}
default:
return AudioFormat.CHANNEL_OUT_DEFAULT;
}
}
@CalledByNative
public static long getMinimumBufferedTime(int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(sampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes / (BYTES_PER_FRAME * (long) sampleRateInHz);
public static long getMinimumBufferedTime(int channelCount, int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(
sampleRateInHz, getChannelConfig(channelCount), AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes
/ (BYTES_PER_SAMPLE * channelCount * (long) sampleRateInHz);
return sizeUs + MIN_BUFFERED_TIME_PADDING_US;
}
......@@ -301,8 +324,8 @@ class AudioSinkAudioTrackImpl {
* the shared memory buffers.
*/
@CalledByNative
private void init(
@AudioContentType int castContentType, int sampleRateInHz, int bytesPerBuffer) {
private void init(@AudioContentType int castContentType, int channelCount, int sampleRateInHz,
int bytesPerBuffer) {
mTag = TAG + "(" + castContentType + ":" + (sInstanceCounter++) + ")";
// Setup throttled logs: pass the first 5, then every 1sec, reset after 5.
......@@ -312,7 +335,7 @@ class AudioSinkAudioTrackImpl {
Log.i(mTag,
"Init:"
+ " sampleRateInHz=" + sampleRateInHz
+ " channelCount=" + channelCount + " sampleRateInHz=" + sampleRateInHz
+ " bytesPerBuffer=" + bytesPerBuffer);
if (mIsInitialized) {
......@@ -325,6 +348,7 @@ class AudioSinkAudioTrackImpl {
return;
}
mSampleRateInHz = sampleRateInHz;
mChannelCount = channelCount;
int usageType = CAST_TYPE_TO_ANDROID_USAGE_TYPE_MAP.get(castContentType);
int contentType = CAST_TYPE_TO_ANDROID_CONTENT_TYPE_MAP.get(castContentType);
......@@ -337,9 +361,11 @@ class AudioSinkAudioTrackImpl {
}
// AudioContentType.ALARM doesn't get a sessionId.
int channelConfig = getChannelConfig(mChannelCount);
int bufferSizeInBytes = MIN_BUFFER_SIZE_MULTIPLIER
* AudioTrack.getMinBufferSize(mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
int bufferSizeInMs = 1000 * bufferSizeInBytes / (BYTES_PER_FRAME * mSampleRateInHz);
* AudioTrack.getMinBufferSize(mSampleRateInHz, channelConfig, AUDIO_FORMAT);
int bufferSizeInMs =
1000 * bufferSizeInBytes / (BYTES_PER_SAMPLE * mChannelCount * mSampleRateInHz);
Log.i(mTag,
"Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + bufferSizeInMs
+ "ms) usageType=" + usageType + " contentType=" + contentType
......@@ -359,7 +385,7 @@ class AudioSinkAudioTrackImpl {
.setAudioFormat(new AudioFormat.Builder()
.setEncoding(AUDIO_FORMAT)
.setSampleRate(mSampleRateInHz)
.setChannelMask(CHANNEL_CONFIG)
.setChannelMask(channelConfig)
.build());
if (sessionId != AudioManager.ERROR) builder.setSessionId(sessionId);
mAudioTrack = builder.build();
......@@ -368,11 +394,11 @@ class AudioSinkAudioTrackImpl {
// Using pre-M API.
if (sessionId == AudioManager.ERROR) {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes,
mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM);
} else {
mAudioTrack = new AudioTrack(CAST_TYPE_TO_ANDROID_STREAM_TYPE.get(castContentType),
mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT, bufferSizeInBytes,
mSampleRateInHz, channelConfig, AUDIO_FORMAT, bufferSizeInBytes,
AudioTrack.MODE_STREAM, sessionId);
}
}
......@@ -548,13 +574,13 @@ class AudioSinkAudioTrackImpl {
}
}
int framesWritten = bytesWritten / BYTES_PER_FRAME;
int framesWritten = bytesWritten / (BYTES_PER_SAMPLE * mChannelCount);
mTotalFramesWritten += framesWritten;
if (DEBUG_LEVEL >= 3) {
Log.i(mTag,
" wrote " + bytesWritten + "/" + sizeInBytes
+ " total_bytes_written=" + (mTotalFramesWritten * BYTES_PER_FRAME)
" wrote " + bytesWritten + "/" + sizeInBytes + " total_bytes_written="
+ (mTotalFramesWritten * BYTES_PER_SAMPLE * mChannelCount)
+ " took:" + (SystemClock.elapsedRealtime() - beforeMsecs) + "ms");
}
......
......@@ -203,6 +203,8 @@ class CastAudioDecoderImpl : public CastAudioDecoder {
}
if (decoded->channel_count() != config_.channel_number) {
LOG(WARNING) << "channel_count changed to " << decoded->channel_count()
<< " from " << config_.channel_number;
config_.channel_number = decoded->channel_count();
decoded_bus_.reset();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment