Commit af1afa54 authored by Andrew Scherkus's avatar Andrew Scherkus

Have AudioRendererImpl advance time until it's told to stop.

This removes the hacky fall-back-to-interpolated-time code when audio
ends before video. Considering we're still getting audio render
callbacks during this time, we can simply account for how much
silence we've written to compute the media time.

More importantly, this brings AudioRendererImpl's TimeSource
implementation in line with how WallClockTimeSource operates and gets
us one step closer to removing TimeDeltaInterpolator entirely.

BUG=370634
R=dalecurtis@chromium.org, xhwang@chromium.org

Review URL: https://codereview.chromium.org/518613002

Cr-Commit-Position: refs/heads/master@{#293343}
parent 1d25bf76
......@@ -18,8 +18,8 @@ AudioClock::AudioClock(base::TimeDelta start_timestamp, int sample_rate)
static_cast<double>(base::Time::kMicrosecondsPerSecond) /
sample_rate),
total_buffered_frames_(0),
current_media_timestamp_(start_timestamp),
audio_data_buffered_(0) {
front_timestamp_(start_timestamp),
back_timestamp_(start_timestamp) {
}
AudioClock::~AudioClock() {
......@@ -35,7 +35,7 @@ void AudioClock::WroteAudio(int frames_written,
DCHECK_GE(playback_rate, 0);
// First write: initialize buffer with silence.
if (start_timestamp_ == current_media_timestamp_ && buffered_.empty())
if (start_timestamp_ == front_timestamp_ && buffered_.empty())
PushBufferedAudioData(delay_frames, 0.0f);
// Move frames from |buffered_| into the computed timestamp based on
......@@ -45,24 +45,24 @@ void AudioClock::WroteAudio(int frames_written,
// reallocations in cases where |buffered_| gets emptied.
int64_t frames_played =
std::max(INT64_C(0), total_buffered_frames_ - delay_frames);
current_media_timestamp_ += ComputeBufferedMediaTime(frames_played);
front_timestamp_ += ComputeBufferedMediaTime(frames_played);
PushBufferedAudioData(frames_written, playback_rate);
PushBufferedAudioData(frames_requested - frames_written, 0.0f);
PopBufferedAudioData(frames_played);
back_timestamp_ += base::TimeDelta::FromMicroseconds(
frames_written * playback_rate * microseconds_per_frame_);
// Update cached values.
double scaled_frames = 0;
double scaled_frames_at_same_rate = 0;
bool found_silence = false;
audio_data_buffered_ = false;
for (size_t i = 0; i < buffered_.size(); ++i) {
if (buffered_[i].playback_rate == 0) {
found_silence = true;
continue;
}
audio_data_buffered_ = true;
// Any buffered silence breaks our contiguous stretch of audio data.
if (found_silence)
break;
......@@ -80,12 +80,12 @@ void AudioClock::WroteAudio(int frames_written,
microseconds_per_frame_);
}
base::TimeDelta AudioClock::CurrentMediaTimestampSinceWriting(
base::TimeDelta AudioClock::TimestampSinceWriting(
base::TimeDelta time_since_writing) const {
int64_t frames_played_since_writing = std::min(
total_buffered_frames_,
static_cast<int64_t>(time_since_writing.InSecondsF() * sample_rate_));
return current_media_timestamp_ +
return front_timestamp_ +
ComputeBufferedMediaTime(frames_played_since_writing);
}
......
......@@ -16,6 +16,36 @@ namespace media {
// estimating the amount of delay in wall clock time. Takes changes in playback
// rate into account to handle scenarios where multiple rates may be present in
// a playback pipeline with large delay.
//
//
// USAGE
//
// Prior to starting audio playback, construct an AudioClock with an initial
// media timestamp and a sample rate matching the sample rate the audio device
// was opened at.
//
// Each time the audio rendering callback is executed, call WroteAudio() once
// (and only once!) containing information on what was written:
// 1) How many frames of audio data requested
// 2) How many frames of audio data provided
// 3) The playback rate of the audio data provided
// 4) The current amount of delay
//
// After a call to WroteAudio(), clients can inspect the resulting media
// timestamp. This can be used for UI purposes, synchronizing video, etc...
//
//
// DETAILS
//
// Silence (whether caused by the initial audio delay or failing to write the
// amount of requested frames due to underflow) is also modeled and will cause
// the media timestamp to stop increasing until all known silence has been
// played. AudioClock's model is initialized with silence during the first call
// to WroteAudio() using the delay value.
//
// Playback rates are tracked for translating frame durations into media
// durations. Since silence doesn't affect media timestamps, it also isn't
// affected by playback rates.
class MEDIA_EXPORT AudioClock {
public:
AudioClock(base::TimeDelta start_timestamp, int sample_rate);
......@@ -29,15 +59,29 @@ class MEDIA_EXPORT AudioClock {
int delay_frames,
float playback_rate);
// Calculates the current media timestamp taking silence and changes in
// playback rate into account.
base::TimeDelta current_media_timestamp() const {
return current_media_timestamp_;
}
// Returns the bounds of media data currently buffered by the audio hardware,
// taking silence and changes in playback rate into account. Buffered audio
// structure and timestamps are updated with every call to WroteAudio().
//
// start_timestamp = 1000 ms sample_rate = 40 Hz
// +-----------------------+-----------------------+-----------------------+
// | 10 frames silence | 20 frames @ 1.0x | 20 frames @ 0.5x |
// | = 250 ms (wall) | = 500 ms (wall) | = 500 ms (wall) |
// | = 0 ms (media) | = 500 ms (media) | = 250 ms (media) |
// +-----------------------+-----------------------+-----------------------+
// ^ ^
// front_timestamp() is equal to back_timestamp() is equal to
// |start_timestamp| since no amount of media frames tracked
// media data has been played yet. by AudioClock, which would be
// 1000 + 500 + 250 = 1750 ms.
base::TimeDelta front_timestamp() const { return front_timestamp_; }
base::TimeDelta back_timestamp() const { return back_timestamp_; }
// Clients can provide |time_since_writing| to simulate the passage of time
// since last writing audio to get a more accurate current media timestamp.
base::TimeDelta CurrentMediaTimestampSinceWriting(
//
// The value will be bounded between front_timestamp() and back_timestamp().
base::TimeDelta TimestampSinceWriting(
base::TimeDelta time_since_writing) const;
// Returns the amount of contiguous media time buffered at the head of the
......@@ -53,10 +97,6 @@ class MEDIA_EXPORT AudioClock {
return contiguous_audio_data_buffered_at_same_rate_;
}
// Returns true if there is any audio data buffered by the audio hardware,
// even if there is silence mixed in.
bool audio_data_buffered() const { return audio_data_buffered_; }
private:
// Even with a ridiculously high sample rate of 256kHz, using 64 bits will
// permit tracking up to 416999965 days worth of time (that's 1141 millenia).
......@@ -81,10 +121,10 @@ class MEDIA_EXPORT AudioClock {
std::deque<AudioData> buffered_;
int64_t total_buffered_frames_;
base::TimeDelta current_media_timestamp_;
base::TimeDelta front_timestamp_;
base::TimeDelta back_timestamp_;
// Cached results of last call to WroteAudio().
bool audio_data_buffered_;
base::TimeDelta contiguous_audio_data_buffered_;
base::TimeDelta contiguous_audio_data_buffered_at_same_rate_;
......
This diff is collapsed.
......@@ -148,6 +148,7 @@ void AudioRendererImpl::SetMediaTime(base::TimeDelta time) {
DCHECK_EQ(state_, kFlushed);
start_timestamp_ = time;
ended_timestamp_ = kInfiniteDuration();
audio_clock_.reset(new AudioClock(time, audio_parameters_.sample_rate()));
}
......@@ -547,7 +548,6 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
const int delay_frames = static_cast<int>(playback_delay.InSecondsF() *
audio_parameters_.sample_rate());
int frames_written = 0;
base::Closure time_cb;
{
base::AutoLock auto_lock(lock_);
......@@ -587,46 +587,59 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
frames_written =
algorithm_->FillBuffer(audio_bus, requested_frames, playback_rate_);
}
audio_clock_->WroteAudio(
frames_written, requested_frames, delay_frames, playback_rate_);
// Per the TimeSource API the media time should always increase even after
// we've rendered all known audio data. Doing so simplifies scenarios where
// we have other sources of media data that need to be scheduled after audio
// data has ended.
//
// That being said, we don't want to advance time when underflowed as we
// know more decoded frames will eventually arrive. If we did, we would
// throw things out of sync when said decoded frames arrive.
int frames_after_end_of_stream = 0;
if (frames_written == 0) {
if (received_end_of_stream_ && !rendered_end_of_stream_ &&
!audio_clock_->audio_data_buffered()) {
rendered_end_of_stream_ = true;
task_runner_->PostTask(FROM_HERE, ended_cb_);
} else if (!received_end_of_stream_ && state_ == kPlaying) {
if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
algorithm_->IncreaseQueueCapacity();
SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
}
if (received_end_of_stream_) {
if (ended_timestamp_ == kInfiniteDuration())
ended_timestamp_ = audio_clock_->back_timestamp();
frames_after_end_of_stream = requested_frames;
} else if (state_ == kPlaying &&
buffering_state_ != BUFFERING_HAVE_NOTHING) {
algorithm_->IncreaseQueueCapacity();
SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
}
}
audio_clock_->WroteAudio(frames_written + frames_after_end_of_stream,
requested_frames,
delay_frames,
playback_rate_);
if (CanRead_Locked()) {
task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioRendererImpl::AttemptRead,
weak_factory_.GetWeakPtr()));
}
// Firing |ended_cb_| means we no longer need to run |time_cb_|.
if (!rendered_end_of_stream_ &&
last_timestamp_update_ != audio_clock_->current_media_timestamp()) {
if (last_timestamp_update_ != audio_clock_->front_timestamp()) {
// Since |max_time| uses linear interpolation, only provide an upper bound
// that is for audio data at the same playback rate. Failing to do so can
// make time jump backwards when the linear interpolated time advances
// past buffered regions of audio at different rates.
last_timestamp_update_ = audio_clock_->current_media_timestamp();
last_timestamp_update_ = audio_clock_->front_timestamp();
base::TimeDelta max_time =
last_timestamp_update_ +
audio_clock_->contiguous_audio_data_buffered_at_same_rate();
time_cb = base::Bind(time_cb_, last_timestamp_update_, max_time);
task_runner_->PostTask(
FROM_HERE, base::Bind(time_cb_, last_timestamp_update_, max_time));
if (last_timestamp_update_ >= ended_timestamp_ &&
!rendered_end_of_stream_) {
rendered_end_of_stream_ = true;
task_runner_->PostTask(FROM_HERE, ended_cb_);
}
}
}
if (!time_cb.is_null())
task_runner_->PostTask(FROM_HERE, time_cb);
DCHECK_LE(frames_written, requested_frames);
return frames_written;
}
......
......@@ -249,6 +249,7 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioClock> audio_clock_;
base::TimeDelta start_timestamp_;
base::TimeDelta ended_timestamp_;
base::TimeDelta last_timestamp_update_;
// End variables which must be accessed under |lock_|. ----------------------
......
......@@ -501,14 +501,6 @@ void RendererImpl::OnAudioRendererEnded() {
DCHECK(!audio_ended_);
audio_ended_ = true;
// Start clock since there is no more audio to trigger clock updates.
{
base::TimeDelta duration = get_duration_cb_.Run();
base::AutoLock auto_lock(interpolator_lock_);
interpolator_->SetUpperBound(duration);
StartClockIfWaitingForTimeUpdate_Locked();
}
RunEndedCallbackIfNeeded();
}
......@@ -536,10 +528,8 @@ void RendererImpl::RunEndedCallbackIfNeeded() {
return;
{
base::TimeDelta duration = get_duration_cb_.Run();
base::AutoLock auto_lock(interpolator_lock_);
PauseClockAndStopTicking_Locked();
interpolator_->SetBounds(duration, duration);
}
ended_cb_.Run();
......
......@@ -401,34 +401,6 @@ TEST_F(RendererImplTest, GetMediaTime) {
EXPECT_EQ(kAudioUpdateMaxTimeMs, GetMediaTimeMs());
}
TEST_F(RendererImplTest, AudioStreamShorterThanVideo) {
// Replace what's used for interpolating to simulate wall clock time.
renderer_impl_->SetTimeDeltaInterpolatorForTesting(
new TimeDeltaInterpolator(&test_tick_clock_));
InitializeWithAudioAndVideo();
Play();
EXPECT_EQ(kStartPlayingTimeInMs, GetMediaTimeMs());
// Verify that the clock doesn't advance since it hasn't been started by
// a time update from the audio stream.
EXPECT_FALSE(IsMediaTimeAdvancing());
// Signal end of audio stream.
audio_ended_cb_.Run();
base::RunLoop().RunUntilIdle();
// Verify that the clock advances.
EXPECT_TRUE(IsMediaTimeAdvancing());
// Signal end of video stream and make sure OnEnded() callback occurs.
EXPECT_CALL(time_source_, StopTicking());
EXPECT_CALL(callbacks_, OnEnded());
video_ended_cb_.Run();
base::RunLoop().RunUntilIdle();
}
TEST_F(RendererImplTest, AudioTimeUpdateDuringFlush) {
// Replace what's used for interpolating to simulate wall clock time.
renderer_impl_->SetTimeDeltaInterpolatorForTesting(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment