Commit 2b44aea3 authored by jrummell@chromium.org's avatar jrummell@chromium.org

Add new class AudioBufferQueue.

As part of the work to simplify the handling of audio data, adding this
class to create a queue of audio data. Using this class will come in a
subsequent CL.

BUG=248989

Review URL: https://chromiumcodereview.appspot.com/17112016

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@207761 0039d316-1c4b-4281-b951-d872f2087c98
parent 06743316
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/audio_buffer_queue.h"
#include <algorithm>
#include "base/logging.h"
#include "media/base/audio_bus.h"
#include "media/base/buffers.h"
namespace media {
AudioBufferQueue::AudioBufferQueue() { Clear(); }
AudioBufferQueue::~AudioBufferQueue() {}
void AudioBufferQueue::Clear() {
buffers_.clear();
current_buffer_ = buffers_.begin();
current_buffer_offset_ = 0;
frames_ = 0;
current_time_ = kNoTimestamp();
}
void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
// If we have just written the first buffer, update |current_time_| to be the
// start time.
if (buffers_.empty()) {
DCHECK_EQ(frames_, 0);
current_time_ = buffer_in->timestamp();
}
// Add the buffer to the queue. Inserting into deque invalidates all
// iterators, so point to the first buffer.
buffers_.push_back(buffer_in);
current_buffer_ = buffers_.begin();
// Update the |frames_| counter since we have added frames.
frames_ += buffer_in->frame_count();
CHECK_GT(frames_, 0); // make sure it doesn't overflow.
}
int AudioBufferQueue::ReadFrames(int frames, AudioBus* dest) {
DCHECK_GE(dest->frames(), frames);
return InternalRead(frames, true, 0, dest);
}
int AudioBufferQueue::PeekFrames(int frames,
int forward_offset,
AudioBus* dest) {
DCHECK_GE(dest->frames(), frames);
return InternalRead(frames, false, forward_offset, dest);
}
void AudioBufferQueue::SeekFrames(int frames) {
// Perform seek only if we have enough bytes in the queue.
CHECK_LE(frames, frames_);
int taken = InternalRead(frames, true, 0, NULL);
DCHECK_EQ(taken, frames);
}
int AudioBufferQueue::InternalRead(int frames,
bool advance_position,
int forward_offset,
AudioBus* dest) {
// Counts how many frames are actually read from the buffer queue.
int taken = 0;
BufferQueue::iterator current_buffer = current_buffer_;
int current_buffer_offset = current_buffer_offset_;
int frames_to_skip = forward_offset;
while (taken < frames) {
// |current_buffer| is valid since the first time this buffer is appended
// with data. Make sure there is data to be processed.
if (current_buffer == buffers_.end())
break;
scoped_refptr<AudioBuffer> buffer = *current_buffer;
int remaining_frames_in_buffer =
buffer->frame_count() - current_buffer_offset;
if (frames_to_skip > 0) {
// If there are frames to skip, do it first. May need to skip into
// subsequent buffers.
int skipped = std::min(remaining_frames_in_buffer, frames_to_skip);
current_buffer_offset += skipped;
frames_to_skip -= skipped;
} else {
// Find the right amount to copy from the current buffer. We shall copy no
// more than |frames| frames in total and each single step copies no more
// than the current buffer size.
int copied = std::min(frames - taken, remaining_frames_in_buffer);
// if |dest| is NULL, there's no need to copy.
if (dest)
buffer->ReadFrames(copied, current_buffer_offset, taken, dest);
// Increase total number of frames copied, which regulates when to end
// this loop.
taken += copied;
// We have read |copied| frames from the current buffer. Advance the
// offset.
current_buffer_offset += copied;
}
// Has the buffer has been consumed?
if (current_buffer_offset == buffer->frame_count()) {
if (advance_position) {
// Next buffer may not have timestamp, so we need to update current
// timestamp before switching to the next buffer.
UpdateCurrentTime(current_buffer, current_buffer_offset);
}
// If we are at the last buffer, no more data to be copied, so stop.
BufferQueue::iterator next = current_buffer + 1;
if (next == buffers_.end())
break;
// Advances the iterator.
current_buffer = next;
current_buffer_offset = 0;
}
}
if (advance_position) {
// Update the appropriate values since |taken| frames have been copied out.
frames_ -= taken;
DCHECK_GE(frames_, 0);
DCHECK(current_buffer_ != buffers_.end() || frames_ == 0);
current_buffer_ = current_buffer;
current_buffer_offset_ = current_buffer_offset;
UpdateCurrentTime(current_buffer_, current_buffer_offset_);
// Remove any buffers before the current buffer as there is no going
// backwards.
buffers_.erase(buffers_.begin(), current_buffer_);
}
return taken;
}
void AudioBufferQueue::UpdateCurrentTime(BufferQueue::iterator buffer,
int offset) {
if (buffer != buffers_.end() && (*buffer)->timestamp() != kNoTimestamp()) {
double time_offset = ((*buffer)->duration().InMicroseconds() * offset) /
static_cast<double>((*buffer)->frame_count());
current_time_ =
(*buffer)->timestamp() + base::TimeDelta::FromMicroseconds(
static_cast<int64>(time_offset + 0.5));
}
}
} // namespace media
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
#define MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
#include <deque>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_buffer.h"
#include "media/base/media_export.h"
namespace media {
class AudioBus;
// A queue of AudioBuffers to support reading of arbitrary chunks of a media
// data source. Audio data can be copied into an AudioBus for output. The
// current position can be forwarded to anywhere in the buffered data.
//
// This class is not inherently thread-safe. Concurrent access must be
// externally serialized.
class MEDIA_EXPORT AudioBufferQueue {
public:
AudioBufferQueue();
~AudioBufferQueue();
// Clears the buffer queue.
void Clear();
// Appends |buffer_in| to this queue.
void Append(const scoped_refptr<AudioBuffer>& buffer_in);
// Reads a maximum of |frames| frames into |dest| from the current position.
// Returns the number of frames read. The current position will advance by the
// amount of frames read.
int ReadFrames(int frames, AudioBus* dest);
// Copies up to |frames| frames from current position to |dest|. Returns
// number of frames copied. Doesn't advance current position. Starts at
// |forward_offset| from current position.
int PeekFrames(int frames, int forward_offset, AudioBus* dest);
// Moves the current position forward by |frames| frames. If |frames| exceeds
// frames available, the seek operation will fail.
void SeekFrames(int frames);
// Returns the number of frames buffered beyond the current position.
int frames() const { return frames_; }
// Returns the current timestamp, taking into account current offset. The
// value calculated based on the timestamp of the current buffer. If timestamp
// for the current buffer is set to 0, then returns value that corresponds to
// the last position in a buffer that had timestamp set. kNoTimestamp() is
// returned if no buffers we read from had timestamp set.
base::TimeDelta current_time() const { return current_time_; }
private:
// Definition of the buffer queue.
typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
// An internal method shared by ReadFrames() and SeekFrames() that actually
// does reading. It reads a maximum of |frames| frames into |dest|. Returns
// the number of frames read. The current position will be moved forward by
// the number of frames read if |advance_position| is set. If |dest| is NULL,
// only the current position will advance but no data will be copied.
// |forward_offset| can be used to skip frames before reading.
int InternalRead(int frames,
bool advance_position,
int forward_offset,
AudioBus* dest);
// Updates |current_time_| with the time that corresponds to the specified
// position in the buffer.
void UpdateCurrentTime(BufferQueue::iterator buffer, int offset);
BufferQueue::iterator current_buffer_;
BufferQueue buffers_;
int current_buffer_offset_;
// Number of frames available to be read in the buffer.
int frames_;
// Keeps track of the most recent time we've seen in case the |buffers_| is
// empty when our owner asks what time it is.
base::TimeDelta current_time_;
DISALLOW_COPY_AND_ASSIGN(AudioBufferQueue);
};
} // namespace media
#endif // MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
This diff is collapsed.
......@@ -6,79 +6,11 @@
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/test_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
template <class T>
static scoped_refptr<AudioBuffer> MakeInterleavedBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
const base::TimeDelta start_time) {
DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
format == kSampleFormatS32 || format == kSampleFormatF32);
// Create a block of memory with values:
// start
// start + increment
// start + 2 * increment, ...
// Since this is interleaved data, channel 0 data will be:
// start
// start + channels * increment
// start + 2 * channels * increment, ...
int buffer_size = frames * channels * sizeof(T);
scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
uint8* data[] = { memory.get() };
T* buffer = reinterpret_cast<T*>(memory.get());
for (int i = 0; i < frames * channels; ++i) {
buffer[i] = start;
start += increment;
}
// Duration is 1 second per frame (for simplicity).
base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data, start_time, duration);
}
template <class T>
static scoped_refptr<AudioBuffer> MakePlanarBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
const base::TimeDelta start_time) {
DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
// Create multiple blocks of data, once for each channel.
// Values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
// Values in channel 1 will be:
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
int buffer_size = frames * sizeof(T);
scoped_ptr<uint8*[]> data(new uint8*[channels]);
scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
for (int i = 0; i < channels; ++i) {
data.get()[i] = memory.get() + i * buffer_size;
T* buffer = reinterpret_cast<T*>(data.get()[i]);
for (int j = 0; j < frames; ++j) {
buffer[j] = start;
start += increment;
}
}
// Duration is 1 second per frame (for simplicity).
base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data.get(), start_time, duration);
}
static void VerifyResult(float* channel_data,
int frames,
float start,
......@@ -95,7 +27,7 @@ TEST(AudioBufferTest, CopyFrom) {
const int channels = 1;
const int frames = 8;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
kSampleFormatU8, channels, 1, 1, frames, start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(buffer->timestamp(), start_time);
......@@ -129,7 +61,7 @@ TEST(AudioBufferTest, ReadU8) {
const int channels = 4;
const int frames = 4;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, start_time);
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
......@@ -148,7 +80,7 @@ TEST(AudioBufferTest, ReadS16) {
const int channels = 2;
const int frames = 10;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int16>(
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
......@@ -172,7 +104,7 @@ TEST(AudioBufferTest, ReadS32) {
const int channels = 2;
const int frames = 6;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int32>(
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
......@@ -194,7 +126,7 @@ TEST(AudioBufferTest, ReadF32) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<float>(
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time);
// Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
......@@ -215,7 +147,7 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<int16>(
scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
......@@ -247,7 +179,7 @@ TEST(AudioBufferTest, ReadF32Planar) {
const int channels = 4;
const int frames = 100;
const base::TimeDelta start_time;
scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<float>(
scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 1.0f, 1.0f, frames, start_time);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
......
......@@ -5,9 +5,12 @@
#include "media/base/test_helpers.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop.h"
#include "base/test/test_timeouts.h"
#include "base/time.h"
#include "base/timer.h"
#include "media/base/audio_buffer.h"
#include "media/base/bind_to_loop.h"
#include "ui/gfx/rect.h"
......@@ -143,4 +146,100 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
return kLargeSize;
}
template <class T>
scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
base::TimeDelta start_time) {
DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
format == kSampleFormatS32 || format == kSampleFormatF32);
// Create a block of memory with values:
// start
// start + increment
// start + 2 * increment, ...
// Since this is interleaved data, channel 0 data will be:
// start
// start + channels * increment
// start + 2 * channels * increment, ...
int buffer_size = frames * channels * sizeof(T);
scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
uint8* data[] = { memory.get() };
T* buffer = reinterpret_cast<T*>(memory.get());
for (int i = 0; i < frames * channels; ++i) {
buffer[i] = start;
start += increment;
}
// Duration is 1 second per frame (for simplicity).
base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data, start_time, duration);
}
template <class T>
scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
base::TimeDelta start_time) {
DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
// Create multiple blocks of data, one for each channel.
// Values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
// Values in channel 1 will be:
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
int buffer_size = frames * sizeof(T);
scoped_ptr<uint8*[]> data(new uint8*[channels]);
scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
for (int i = 0; i < channels; ++i) {
data.get()[i] = memory.get() + i * buffer_size;
T* buffer = reinterpret_cast<T*>(data.get()[i]);
for (int j = 0; j < frames; ++j) {
buffer[j] = start;
start += increment;
}
}
// Duration is 1 second per frame (for simplicity).
base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data.get(), start_time, duration);
}
// Instantiate all the types of MakeInterleavedAudioBuffer() and
// MakePlanarAudioBuffer() needed.
#define DEFINE_INTERLEAVED_INSTANCE(type) \
template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
SampleFormat format, \
int channels, \
type start, \
type increment, \
int frames, \
base::TimeDelta start_time)
DEFINE_INTERLEAVED_INSTANCE(uint8);
DEFINE_INTERLEAVED_INSTANCE(int16);
DEFINE_INTERLEAVED_INSTANCE(int32);
DEFINE_INTERLEAVED_INSTANCE(float);
#define DEFINE_PLANAR_INSTANCE(type) \
template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
SampleFormat format, \
int channels, \
type start, \
type increment, \
int frames, \
base::TimeDelta start_time);
DEFINE_PLANAR_INSTANCE(int16);
DEFINE_PLANAR_INSTANCE(float);
} // namespace media
......@@ -8,16 +8,20 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "media/base/pipeline_status.h"
#include "media/base/sample_format.h"
#include "media/base/video_decoder_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "ui/gfx/size.h"
namespace base {
class MessageLoop;
class TimeDelta;
}
namespace media {
class AudioBuffer;
// Return a callback that expects to be run once.
base::Closure NewExpectedClosure();
PipelineStatusCB NewExpectedStatusCB(PipelineStatus status);
......@@ -79,6 +83,51 @@ class TestVideoConfig {
DISALLOW_IMPLICIT_CONSTRUCTORS(TestVideoConfig);
};
// Create an AudioBuffer containing |frames| frames of data, where each sample
// is of type T. Each frame will have the data from |channels| channels
// interleaved. |start| and |increment| are used to specify the values for the
// samples. Since this is interleaved data, channel 0 data will be:
// |start|
// |start| + |channels| * |increment|
// |start| + 2 * |channels| * |increment|, and so on.
// Data for subsequent channels is similar. No check is done that |format|
// requires data to be of type T, but it is verified that |format| is an
// interleaved format.
//
// |start_time| will be used as the start time for the samples. Duration is set
// to 1 second per frame, to simplify calculations.
template <class T>
scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
base::TimeDelta start_time);
// Create an AudioBuffer containing |frames| frames of data, where each sample
// is of type T. Since this is planar data, there will be a block for each of
// |channel| channels. |start| and |increment| are used to specify the values
// for the samples, which are created in channel order. Since this is planar
// data, channel 0 data will be:
// |start|
// |start| + |increment|
// |start| + 2 * |increment|, and so on.
// Data for channel 1 will follow where channel 0 ends. Subsequent channels are
// similar. No check is done that |format| requires data to be of type T, but it
// is verified that |format| is a planar format.
//
// |start_time| will be used as the start time for the samples. Duration is set
// to 1 second per frame, to simplify calculations.
template <class T>
scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
SampleFormat format,
int channels,
T start,
T increment,
int frames,
base::TimeDelta start_time);
} // namespace media
#endif // MEDIA_BASE_TEST_HELPERS_H_
......@@ -198,6 +198,8 @@
'base/android/media_resource_getter.h',
'base/audio_buffer.cc',
'base/audio_buffer.h',
'base/audio_buffer_queue.cc',
'base/audio_buffer_queue.h',
'base/audio_capturer_source.h',
'base/audio_converter.cc',
'base/audio_converter.h',
......@@ -894,6 +896,7 @@
'base/android/media_codec_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
'base/audio_buffer_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
'base/audio_bus_unittest.cc',
'base/audio_converter_unittest.cc',
'base/audio_fifo_unittest.cc',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment