Commit 45143094 authored by Anurag Agrawal's avatar Anurag Agrawal Committed by Commit Bot

Use non-blocking mode for CMA backend audio code: initial implementation

Bug: http://b/111500583

[Audio] Use NON_BLOCKING mode in CMA backend audio code.

Ref: b/110722296
Change-Id: I04879e148e832c795fe2066143d8ef4e29ec5315
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1715474Reviewed-by: default avatarKenneth MacKay <kmackay@chromium.org>
Commit-Queue: Anurag Agrawal <anuragagrawal@google.com>
Cr-Commit-Position: refs/heads/master@{#684188}
parent 3e9fdd01
......@@ -12,7 +12,6 @@ import android.media.AudioManager;
import android.media.AudioTimestamp;
import android.media.AudioTrack;
import android.os.Build;
import android.os.SystemClock;
import android.support.annotation.IntDef;
import android.util.SparseIntArray;
......@@ -110,10 +109,14 @@ class AudioSinkAudioTrackImpl {
private static final long UNDERRUN_LOG_THROTTLE_PERIOD = SEC_IN_NSEC;
// Internally Android fetches data from AudioTrack buffer in periods of 20ms.
private static final long ANDROID_AUDIO_PERIOD_SIZE_US = 20000;
private static final long ANDROID_AUDIO_PERIOD_SIZE_USEC = 20000;
// Minimum amount of data written to the AudioTrack before we can call play().
// This is needed to avoid hitting a (false) underrun.
private static final long PLAY_START_THRESHOLD_USEC = 40000;
// Threshold at which we start logging low buffer warnings.
private static final long VERY_LOW_BUFFER_LEVEL = ANDROID_AUDIO_PERIOD_SIZE_US;
private static final long VERY_LOW_BUFFER_LEVEL_USEC = ANDROID_AUDIO_PERIOD_SIZE_USEC;
private static long sInstanceCounter;
......@@ -132,7 +135,7 @@ class AudioSinkAudioTrackImpl {
private static final long MAX_TIME_IGNORING_TSTAMPS_NSECS = SEC_IN_NSEC;
// Additional padding for minimum buffer time, determined experimentally.
private static final long MIN_BUFFERED_TIME_PADDING_US = ANDROID_AUDIO_PERIOD_SIZE_US;
private static final long MIN_BUFFERED_TIME_PADDING_USEC = ANDROID_AUDIO_PERIOD_SIZE_USEC;
private static AudioManager sAudioManager;
......@@ -146,6 +149,7 @@ class AudioSinkAudioTrackImpl {
private ThrottledLog mBufferLevelWarningLog;
private ThrottledLog mUnderrunWarningLog;
private ThrottledLog mTStampJitterWarningLog;
private ThrottledLog mStatisticsLog;
@IntDef({ReferenceTimestampState.STARTING_UP, ReferenceTimestampState.STABLE,
ReferenceTimestampState.RESYNCING_AFTER_PAUSE,
......@@ -174,6 +178,9 @@ class AudioSinkAudioTrackImpl {
private AudioTrack mAudioTrack;
private long mBufferSizeInSec; // Size of AudioTrack Buffer allocated.
private long mBufferLevelUsec;
// Timestamping logic for RenderingDelay calculations. See also the description for
// getNewFramePos0Timestamp() for additional information.
private long mRefNanoTimeAtFramePos0; // Reference time used to interpolate new timestamps at
......@@ -213,7 +220,7 @@ class AudioSinkAudioTrackImpl {
* Converts the given nanoseconds value into microseconds with proper rounding. It is assumed
* that the value given is positive.
*/
private static long convertNsecsToUsecs(long nsecs) {
private static long nSecToUsec(long nsecs) {
return (nsecs + 500) / 1000;
}
......@@ -229,7 +236,7 @@ class AudioSinkAudioTrackImpl {
public static long getMinimumBufferedTime(int sampleRateInHz) {
int sizeBytes = AudioTrack.getMinBufferSize(sampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
long sizeUs = SEC_IN_USEC * (long) sizeBytes / (BYTES_PER_FRAME * (long) sampleRateInHz);
return sizeUs + MIN_BUFFERED_TIME_PADDING_US;
return sizeUs + MIN_BUFFERED_TIME_PADDING_USEC;
}
@CalledByNative
......@@ -286,12 +293,6 @@ class AudioSinkAudioTrackImpl {
return mLastTimestampUpdateNsec != NO_TIMESTAMP;
}
/** Converts the given number of frames into an equivalent nanoTime period. */
private long convertFramesToNanoTime(long numOfFrames) {
// Use proper rounding (assumes all numbers are positive).
return (SEC_IN_NSEC * numOfFrames + mSampleRateInHz / 2) / mSampleRateInHz;
}
/**
* Initializes the instance by creating the AudioTrack object and allocating
* the shared memory buffers.
......@@ -305,6 +306,7 @@ class AudioSinkAudioTrackImpl {
mBufferLevelWarningLog = new ThrottledLog(Log::w, 5, 1000, 5000);
mUnderrunWarningLog = new ThrottledLog(Log::w, 5, 1000, 5000);
mTStampJitterWarningLog = new ThrottledLog(Log::w, 5, 1000, 5000);
mStatisticsLog = new ThrottledLog(Log::i, 1, 1000, 5000);
Log.i(mTag,
"Init:"
......@@ -335,10 +337,10 @@ class AudioSinkAudioTrackImpl {
int bufferSizeInBytes = MIN_BUFFER_SIZE_MULTIPLIER
* AudioTrack.getMinBufferSize(mSampleRateInHz, CHANNEL_CONFIG, AUDIO_FORMAT);
int bufferSizeInMs = 1000 * bufferSizeInBytes / (BYTES_PER_FRAME * mSampleRateInHz);
mBufferSizeInSec = bytesToSec(bufferSizeInBytes);
Log.i(mTag,
"Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + bufferSizeInMs
+ "ms) usageType=" + usageType + " contentType=" + contentType
"Init: create an AudioTrack of size=" + bufferSizeInBytes + " (" + mBufferSizeInSec
+ "sec) usageType=" + usageType + " contentType=" + contentType
+ " with session-id=" + sessionId);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
......@@ -410,16 +412,10 @@ class AudioSinkAudioTrackImpl {
return mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED;
}
private boolean isPlaying() {
return mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING;
}
private boolean isPaused() {
return mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PAUSED;
}
/** Stops the AudioTrack and returns an estimate of the time it takes for the remaining data
* left in the internal queue to be played out (in usecs). */
/**
* Stops the AudioTrack and returns an estimate of the time it takes for the remaining data
* left in the internal queue to be played out (in usecs).
*/
@CalledByNative
private long prepareForShutdown() {
long playtimeLeftNsecs;
......@@ -439,7 +435,7 @@ class AudioSinkAudioTrackImpl {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
long most_frames_left =
Math.min(mTotalFramesWritten, mAudioTrack.getBufferSizeInFrames());
playtimeLeftNsecs = convertFramesToNanoTime(most_frames_left);
playtimeLeftNsecs = SEC_IN_NSEC * framesToSec(most_frames_left);
} else {
// Using pre-M API. Don't know how many frames there are, so assume the worst case.
playtimeLeftNsecs = 0;
......@@ -449,8 +445,10 @@ class AudioSinkAudioTrackImpl {
}
@CalledByNative
/** Closes the instance by stopping playback and releasing the AudioTrack
* object. */
/**
* Closes the instance by stopping playback and releasing the AudioTrack
* object.
*/
private void close() {
Log.i(mTag, "Close AudioSinkAudioTrackImpl!");
if (!mIsInitialized) {
......@@ -475,7 +473,7 @@ class AudioSinkAudioTrackImpl {
}
}
int getUnderrunCount() {
private int getUnderrunCount() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
return mAudioTrack.getUnderrunCount();
}
......@@ -483,18 +481,62 @@ class AudioSinkAudioTrackImpl {
return 0;
}
/** Writes the PCM data of the given size into the AudioTrack object. The
/** Convert the given number of bytes into an equivalent frames with proper rounding */
private long bytesToFrames(long bytes) {
return (bytes + BYTES_PER_FRAME / 2) / BYTES_PER_FRAME;
}
/** Convert the given number of frames into an equivalent seconds */
private long framesToSec(long frames) {
return frames / mSampleRateInHz;
}
/** Convert the given number of bytes into an equivalent seconds with proper rounding */
private long bytesToSec(long bytes) {
return framesToSec(bytesToFrames(bytes));
}
private void startPlayingIfItIsTime() {
if (mTotalFramesWritten > PLAY_START_THRESHOLD_USEC) {
play();
}
}
private void waitUntilBufferAvail(int bytes) {
long sleepTimeUsec = SEC_IN_USEC * bytesToSec(bytes);
long bufferSizeInUsec = SEC_IN_USEC * mBufferSizeInSec;
if (sleepTimeUsec >= bufferSizeInUsec / 2) {
sleepTimeUsec = bufferSizeInUsec / 2;
} else {
long d = sleepTimeUsec / ANDROID_AUDIO_PERIOD_SIZE_USEC;
if (d * ANDROID_AUDIO_PERIOD_SIZE_USEC < sleepTimeUsec) {
sleepTimeUsec = (d + 1) * ANDROID_AUDIO_PERIOD_SIZE_USEC;
}
}
if (DEBUG_LEVEL >= 1) {
Log.i(mTag, "Buffer full, sleep for " + sleepTimeUsec + "usec");
}
try {
Thread.sleep(sleepTimeUsec / 1000);
} catch (InterruptedException ex) {
Log.i(mTag, "Sleep was interrupted: " + ex);
}
}
/**
* Writes the PCM data of the given size into the AudioTrack object. The
* PCM data is provided through the memory-mapped ByteBuffer.
*
* Returns the number of bytes written into the AudioTrack object, -1 for
* error.
*/
@CalledByNative
private int writePcm(int sizeInBytes) {
private int writePcm(int totalBytesToWrite) {
if (DEBUG_LEVEL >= 3) {
Log.i(mTag,
"Writing new PCM data:"
+ " sizeInBytes=" + sizeInBytes + " state=" + getPlayStateString()
"Writing new PCM data [" + totalBytesToWrite + "] state=" + getPlayStateString()
+ " underruns=" + mLastUnderrunCount);
}
......@@ -504,15 +546,22 @@ class AudioSinkAudioTrackImpl {
}
// Check buffer level before feeding in new data.
if (haveValidRefPoint()) checkBufferLevel();
checkBufferLevel();
// Setup the PCM ByteBuffer correctly.
mPcmBuffer.limit(sizeInBytes);
mPcmBuffer.position(0);
mPcmBuffer.limit(totalBytesToWrite);
long maxTimeToWriteNsec = 2 * bytesToSec(totalBytesToWrite) * SEC_IN_NSEC;
long startTimeNsec = System.nanoTime();
int bytesLeftToWrite = totalBytesToWrite;
int bytesWritten = 0;
// Feed into AudioTrack - blocking call.
long beforeMsecs = SystemClock.elapsedRealtime();
int bytesWritten = mAudioTrack.write(mPcmBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
while (bytesLeftToWrite > 0) {
mPcmBuffer.position(totalBytesToWrite - bytesLeftToWrite);
long beforeNsecs = System.nanoTime();
bytesWritten =
mAudioTrack.write(mPcmBuffer, bytesLeftToWrite, AudioTrack.WRITE_NON_BLOCKING);
if (bytesWritten < 0) {
int error = bytesWritten;
......@@ -520,49 +569,47 @@ class AudioSinkAudioTrackImpl {
return error;
}
mTotalFramesWritten = bytesToFrames(bytesWritten);
bytesLeftToWrite -= bytesWritten;
if (isStopped()) {
// Data was written, start playing now.
play();
// If not all data fit on the previous write() call (since we were not in PLAYING state
// it didn't block), do a second (now blocking) call to write().
int bytesLeft = sizeInBytes - bytesWritten;
if (bytesLeft > 0) {
mPcmBuffer.position(bytesWritten);
int moreBytesWritten =
mAudioTrack.write(mPcmBuffer, bytesLeft, AudioTrack.WRITE_BLOCKING);
if (moreBytesWritten < 0) {
int error = moreBytesWritten;
Log.e(mTag, "Couldn't write into AudioTrack (" + error + ")");
return error;
}
bytesWritten += moreBytesWritten;
}
startPlayingIfItIsTime();
}
int framesWritten = bytesWritten / BYTES_PER_FRAME;
mTotalFramesWritten += framesWritten;
if (DEBUG_LEVEL >= 3) {
long lastRenderingDelayUsec =
(mLastRenderingDelayUsecs == NO_TIMESTAMP) ? -1 : mLastRenderingDelayUsecs;
Log.i(mTag,
" wrote " + bytesWritten + "/" + sizeInBytes
" wrote " + bytesWritten + "/" + bytesWritten
+ " total_bytes_written=" + (mTotalFramesWritten * BYTES_PER_FRAME)
+ " took:" + (SystemClock.elapsedRealtime() - beforeMsecs) + "ms");
+ " took:" + (elapsedNsec(beforeNsecs) / MSEC_IN_NSEC) + "ms"
+ " RenderingDelayUsec=" + lastRenderingDelayUsec
+ " BufferLevelUsec=" + mBufferLevelUsec);
}
if (bytesWritten < sizeInBytes && isPaused()) {
// We are in PAUSED state, in which case the write() is non-blocking. If not all data
// was written, we will come back here once we transition back into PLAYING state.
return bytesWritten;
if (bytesLeftToWrite > 0) {
waitUntilBufferAvail(bytesLeftToWrite);
checkBufferLevel();
maxTimeToWriteNsec = 2 * bytesToSec(bytesLeftToWrite) * SEC_IN_NSEC;
startTimeNsec = System.nanoTime();
}
updateSampleRateMeasure(framesWritten);
if (bytesLeftToWrite > 0 && elapsedNsec(startTimeNsec) > maxTimeToWriteNsec) {
Log.e(mTag, "Took too long to write all data, abort!");
break;
}
}
int totalBytesWritten = totalBytesToWrite - bytesLeftToWrite;
updateSampleRateMeasure(bytesToFrames(totalBytesWritten));
updateRenderingDelay();
updateRefPointTimestamp();
// TODO(ckuiper): Log key statistics (SR and underruns, e.g.) in regular intervals
long lastRenderingDelayUsec =
(mLastRenderingDelayUsecs == NO_TIMESTAMP) ? -1 : mLastRenderingDelayUsecs;
mStatisticsLog.log(mTag,
"SampleRateHz=" + mSampleRateInHz + " RenderingDelayUsec=" + lastRenderingDelayUsec
+ " BufferLevelUsec=" + mBufferLevelUsec);
return bytesWritten;
return totalBytesWritten;
}
/** Returns the elapsed time from the given start_time until now, in nsec. */
......@@ -572,13 +619,13 @@ class AudioSinkAudioTrackImpl {
private void checkBufferLevel() {
long bufferLevel = mTotalFramesWritten - mAudioTrack.getPlaybackHeadPosition();
long bufferLevelUsec = convertNsecsToUsecs(convertFramesToNanoTime(bufferLevel));
if (bufferLevelUsec <= VERY_LOW_BUFFER_LEVEL) {
mBufferLevelUsec = SEC_IN_USEC * framesToSec(bufferLevel);
if (mBufferLevelUsec <= VERY_LOW_BUFFER_LEVEL_USEC) {
long lastRenderingDelayUsec =
(mLastRenderingDelayUsecs == NO_TIMESTAMP) ? -1 : mLastRenderingDelayUsecs;
boolean hitUnderrun = (getUnderrunCount() != mLastUnderrunCount);
mBufferLevelWarningLog.log(mTag,
"Low buffer level=" + bufferLevelUsec + "us "
"Low buffer level=" + mBufferLevelUsec + "us "
+ " RD=" + lastRenderingDelayUsec + (hitUnderrun ? "us *" : "us"));
}
}
......@@ -613,8 +660,8 @@ class AudioSinkAudioTrackImpl {
// Interpolate to get proper Rendering delay.
long playoutTimeNsecs = getInterpolatedTStampNsecs(mTotalFramesWritten);
long playoutTimeUsecs = convertNsecsToUsecs(playoutTimeNsecs);
long nowUsecs = convertNsecsToUsecs(System.nanoTime());
long playoutTimeUsecs = nSecToUsec(playoutTimeNsecs);
long nowUsecs = nSecToUsec(System.nanoTime());
long delayUsecs = playoutTimeUsecs - nowUsecs;
// Populate RenderingDelay return value for native land.
......@@ -647,14 +694,14 @@ class AudioSinkAudioTrackImpl {
return NO_TIMESTAMP;
}
mOriginalFramePosOfLastTimestamp = ts.framePosition;
return ts.nanoTime - convertFramesToNanoTime(ts.framePosition);
return ts.nanoTime - (SEC_IN_NSEC * framesToSec(ts.framePosition));
}
/**
* Returns a timestamp for the given frame position, interpolated from the reference timestamp.
*/
private long getInterpolatedTStampNsecs(long framePosition) {
return mRefNanoTimeAtFramePos0 + convertFramesToNanoTime(framePosition);
return mRefNanoTimeAtFramePos0 + (SEC_IN_NSEC * framesToSec(framePosition));
}
/** Checks for underruns and if detected invalidates the reference point timestamp. */
......@@ -780,7 +827,7 @@ class AudioSinkAudioTrackImpl {
long devNsec = mRefNanoTimeAtFramePos0 - newNanoTimeAtFramePos0;
if (Math.abs(devNsec) > TSTAMP_DEV_THRESHOLD_TO_IGNORE_NSEC) {
mTStampJitterWarningLog.log(
mTag, "Too jittery timestamp (" + convertNsecsToUsecs(devNsec) + ")");
mTag, "Too jittery timestamp (" + nSecToUsec(devNsec) + ")");
long timeSinceLastGoodTstamp = elapsedNsec(mLastTimestampUpdateNsec);
if (timeSinceLastGoodTstamp <= MAX_TIME_IGNORING_TSTAMPS_NSECS) {
return; // Ignore this one.
......@@ -800,8 +847,8 @@ class AudioSinkAudioTrackImpl {
// Got a new value.
if (DEBUG_LEVEL >= 1) {
long dev1 = convertNsecsToUsecs(prevRefNanoTimeAtFramePos0 - newNanoTimeAtFramePos0);
long dev2 = convertNsecsToUsecs(prevRefNanoTimeAtFramePos0 - mRefNanoTimeAtFramePos0);
long dev1 = nSecToUsec(prevRefNanoTimeAtFramePos0 - newNanoTimeAtFramePos0);
long dev2 = nSecToUsec(prevRefNanoTimeAtFramePos0 - mRefNanoTimeAtFramePos0);
Log.i(mTag,
"Updated mRefNanoTimeAtFramePos0=" + mRefNanoTimeAtFramePos0 / 1000 + " us ("
+ dev1 + "/" + dev2 + ")");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment