Commit a3b17371 authored by scherkus's avatar scherkus Committed by Commit bot

Introduce media::AudioClock::TimeUntilPlayback().

Given a media timestamp within {front,back}_timestamp(), will return the amount
of wall time until being played by the audio hardware.

This function will be used with the upcoming video frame scheduler.

BUG=370634

Review URL: https://codereview.chromium.org/591603003

Cr-Commit-Position: refs/heads/master@{#296052}
parent 773a32c2
...@@ -89,6 +89,43 @@ base::TimeDelta AudioClock::TimestampSinceWriting( ...@@ -89,6 +89,43 @@ base::TimeDelta AudioClock::TimestampSinceWriting(
ComputeBufferedMediaTime(frames_played_since_writing); ComputeBufferedMediaTime(frames_played_since_writing);
} }
base::TimeDelta AudioClock::TimeUntilPlayback(base::TimeDelta timestamp) const {
DCHECK(timestamp >= front_timestamp_);
DCHECK(timestamp <= back_timestamp_);
int64_t frames_until_timestamp = 0;
double timestamp_us = timestamp.InMicroseconds();
double media_time_us = front_timestamp_.InMicroseconds();
for (size_t i = 0; i < buffered_.size(); ++i) {
// Leading silence is always accounted prior to anything else.
if (buffered_[i].playback_rate == 0) {
frames_until_timestamp += buffered_[i].frames;
continue;
}
// Calculate upper bound on media time for current block of buffered frames.
double delta_us = buffered_[i].frames * buffered_[i].playback_rate *
microseconds_per_frame_;
double max_media_time_us = media_time_us + delta_us;
// Determine amount of media time to convert to frames for current block. If
// target timestamp falls within current block, scale the amount of frames
// based on remaining amount of media time.
if (timestamp_us <= max_media_time_us) {
frames_until_timestamp +=
buffered_[i].frames * (timestamp_us - media_time_us) / delta_us;
break;
}
media_time_us = max_media_time_us;
frames_until_timestamp += buffered_[i].frames;
}
return base::TimeDelta::FromMicroseconds(frames_until_timestamp *
microseconds_per_frame_);
}
AudioClock::AudioData::AudioData(int64_t frames, float playback_rate) AudioClock::AudioData::AudioData(int64_t frames, float playback_rate)
: frames(frames), playback_rate(playback_rate) { : frames(frames), playback_rate(playback_rate) {
} }
......
...@@ -84,6 +84,12 @@ class MEDIA_EXPORT AudioClock { ...@@ -84,6 +84,12 @@ class MEDIA_EXPORT AudioClock {
base::TimeDelta TimestampSinceWriting( base::TimeDelta TimestampSinceWriting(
base::TimeDelta time_since_writing) const; base::TimeDelta time_since_writing) const;
// Returns the amount of wall time until |timestamp| will be played by the
// audio hardware.
//
// |timestamp| must be within front_timestamp() and back_timestamp().
base::TimeDelta TimeUntilPlayback(base::TimeDelta timestamp) const;
// Returns the amount of contiguous media time buffered at the head of the // Returns the amount of contiguous media time buffered at the head of the
// audio hardware buffer. Silence introduced into the audio hardware buffer is // audio hardware buffer. Silence introduced into the audio hardware buffer is
// treated as a break in media time. // treated as a break in media time.
......
...@@ -39,6 +39,11 @@ class AudioClockTest : public testing::Test { ...@@ -39,6 +39,11 @@ class AudioClockTest : public testing::Test {
milliseconds)).InMilliseconds(); milliseconds)).InMilliseconds();
} }
int TimeUntilPlaybackInMilliseconds(int timestamp_ms) {
return clock_.TimeUntilPlayback(base::TimeDelta::FromMilliseconds(
timestamp_ms)).InMilliseconds();
}
int ContiguousAudioDataBufferedInDays() { int ContiguousAudioDataBufferedInDays() {
return clock_.contiguous_audio_data_buffered().InDays(); return clock_.contiguous_audio_data_buffered().InDays();
} }
...@@ -278,8 +283,9 @@ TEST_F(AudioClockTest, ZeroDelay) { ...@@ -278,8 +283,9 @@ TEST_F(AudioClockTest, ZeroDelay) {
TEST_F(AudioClockTest, TimestampSinceLastWriting) { TEST_F(AudioClockTest, TimestampSinceLastWriting) {
// Construct an audio clock with the following representation: // Construct an audio clock with the following representation:
// //
// |- existing delay -|------------ calls to WroteAudio() -----------------|
// +-------------------+----------------+------------------+----------------+ // +-------------------+----------------+------------------+----------------+
// | 10 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x | // | 20 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x |
// +-------------------+----------------+------------------+----------------+ // +-------------------+----------------+------------------+----------------+
// Media timestamp: 0 1000 1500 3500 // Media timestamp: 0 1000 1500 3500
// Wall clock time: 2000 3000 4000 5000 // Wall clock time: 2000 3000 4000 5000
...@@ -287,6 +293,7 @@ TEST_F(AudioClockTest, TimestampSinceLastWriting) { ...@@ -287,6 +293,7 @@ TEST_F(AudioClockTest, TimestampSinceLastWriting) {
WroteAudio(10, 10, 40, 0.5); WroteAudio(10, 10, 40, 0.5);
WroteAudio(10, 10, 40, 2.0); WroteAudio(10, 10, 40, 2.0);
EXPECT_EQ(0, FrontTimestampInMilliseconds()); EXPECT_EQ(0, FrontTimestampInMilliseconds());
EXPECT_EQ(3500, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds()); EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Simulate passing 2000ms of initial delay in the audio hardware. // Simulate passing 2000ms of initial delay in the audio hardware.
...@@ -314,6 +321,38 @@ TEST_F(AudioClockTest, TimestampSinceLastWriting) { ...@@ -314,6 +321,38 @@ TEST_F(AudioClockTest, TimestampSinceLastWriting) {
EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(6000)); EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(6000));
} }
TEST_F(AudioClockTest, TimeUntilPlayback) {
// Construct an audio clock with the following representation:
//
// existing
// |- delay -|------------------ calls to WroteAudio() ------------------|
// +------------+---------+------------+-----------+------------+-----------+
// | 20 silence | 10 @ 1x | 10 silence | 10 @ 0.5x | 10 silence | 10 @ 2.0x |
// +------------+---------+------------+-----------+------------+-----------+
// Media: 0 1000 1000 1500 1500 3500
// Wall: 2000 3000 4000 5000 6000 7000
WroteAudio(10, 10, 60, 1.0);
WroteAudio(0, 10, 60, 1.0);
WroteAudio(10, 10, 60, 0.5);
WroteAudio(0, 10, 60, 0.5);
WroteAudio(10, 10, 60, 2.0);
EXPECT_EQ(0, FrontTimestampInMilliseconds());
EXPECT_EQ(3500, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Media timestamp zero has to wait for silence to pass.
EXPECT_EQ(2000, TimeUntilPlaybackInMilliseconds(0));
// From then on out it's simply adding up the number of frames and taking
// silence into account.
EXPECT_EQ(2500, TimeUntilPlaybackInMilliseconds(500));
EXPECT_EQ(3000, TimeUntilPlaybackInMilliseconds(1000));
EXPECT_EQ(4500, TimeUntilPlaybackInMilliseconds(1250));
EXPECT_EQ(5000, TimeUntilPlaybackInMilliseconds(1500));
EXPECT_EQ(6500, TimeUntilPlaybackInMilliseconds(2500));
EXPECT_EQ(7000, TimeUntilPlaybackInMilliseconds(3500));
}
TEST_F(AudioClockTest, SupportsYearsWorthOfAudioData) { TEST_F(AudioClockTest, SupportsYearsWorthOfAudioData) {
// Use number of frames that would be likely to overflow 32-bit integer math. // Use number of frames that would be likely to overflow 32-bit integer math.
const int huge_amount_of_frames = std::numeric_limits<int>::max(); const int huge_amount_of_frames = std::numeric_limits<int>::max();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment