Switch AudioRenderSink::Callback to use AudioBus.

As titled, switches everything over to using the AudioBus
class instead of const std::vector<float*>.  Allows removal
of lots of crufty allocations and memsets.

BUG=114700
TEST=unit tests, layout tests, try bots.  Nothing should change.


Review URL: https://chromiumcodereview.appspot.com/10823175

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@150906 0039d316-1c4b-4281-b951-d872f2087c98
parent 88031984
......@@ -4,6 +4,8 @@
#include "content/renderer/media/render_audiosourceprovider.h"
#include <vector>
#include "base/basictypes.h"
#include "base/command_line.h"
#include "base/logging.h"
......@@ -77,9 +79,12 @@ void RenderAudioSourceProvider::provideInput(
for (size_t i = 0; i < audio_data.size(); ++i)
v[i] = audio_data[i];
scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::WrapVector(
number_of_frames, v);
// TODO(crogers): figure out if we should volume scale here or in common
// WebAudio code. In any case we need to take care of volume.
renderer_->Render(v, number_of_frames, 0);
renderer_->Render(audio_bus.get(), 0);
} else {
// Provide silence if the source is not running.
for (size_t i = 0; i < audio_data.size(); ++i)
......
......@@ -22,8 +22,6 @@
#ifndef CONTENT_RENDERER_MEDIA_RENDER_AUDIOSOURCEPROVIDER_H_
#define CONTENT_RENDERER_MEDIA_RENDER_AUDIOSOURCEPROVIDER_H_
#include <vector>
#include "base/synchronization/lock.h"
#include "media/base/audio_renderer_sink.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
......
......@@ -42,20 +42,20 @@ double RendererWebAudioDeviceImpl::sampleRate() {
return 44100.0;
}
int RendererWebAudioDeviceImpl::Render(const std::vector<float*>& audio_data,
int number_of_frames,
int RendererWebAudioDeviceImpl::Render(media::AudioBus* audio_bus,
int audio_delay_milliseconds) {
// Make the client callback to get rendered audio.
DCHECK(client_callback_);
if (client_callback_) {
// Wrap the pointers using WebVector.
WebVector<float*> web_audio_data(audio_data.size());
for (size_t i = 0; i < audio_data.size(); ++i)
web_audio_data[i] = audio_data[i];
WebVector<float*> web_audio_data(
static_cast<size_t>(audio_bus->channels()));
for (int i = 0; i < audio_bus->channels(); ++i)
web_audio_data[i] = audio_bus->channel(i);
client_callback_->render(web_audio_data, number_of_frames);
client_callback_->render(web_audio_data, audio_bus->frames());
}
return number_of_frames;
return audio_bus->frames();
}
void RendererWebAudioDeviceImpl::OnRenderError() {
......
......@@ -5,17 +5,11 @@
#ifndef CONTENT_RENDERER_MEDIA_MEDIA_RENDERER_WEBAUDIODEVICE_IMPL_H_
#define CONTENT_RENDERER_MEDIA_MEDIA_RENDERER_WEBAUDIODEVICE_IMPL_H_
#include <vector>
#include "base/memory/ref_counted.h"
#include "media/base/audio_renderer_sink.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebAudioDevice.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
namespace media {
class AudioRendererSink;
}
class RendererWebAudioDeviceImpl
: public WebKit::WebAudioDevice,
public media::AudioRendererSink::RenderCallback {
......@@ -30,8 +24,7 @@ class RendererWebAudioDeviceImpl
virtual double sampleRate();
// AudioRendererSink::RenderCallback implementation.
virtual int Render(const std::vector<float*>& audio_data,
int number_of_frames,
virtual int Render(media::AudioBus* audio_bus,
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
......
......@@ -174,10 +174,9 @@ int32_t WebRtcAudioDeviceImpl::Release() {
}
int WebRtcAudioDeviceImpl::Render(
const std::vector<float*>& audio_data,
int number_of_frames,
media::AudioBus* audio_bus,
int audio_delay_milliseconds) {
DCHECK_LE(number_of_frames, output_buffer_size());
DCHECK_LE(audio_bus->frames(), output_buffer_size());
{
base::AutoLock auto_lock(lock_);
......@@ -185,7 +184,7 @@ int WebRtcAudioDeviceImpl::Render(
output_delay_ms_ = audio_delay_milliseconds;
}
const int channels = audio_data.size();
const int channels = audio_bus->channels();
DCHECK_LE(channels, output_channels());
int samples_per_sec = output_sample_rate();
......@@ -205,7 +204,7 @@ int WebRtcAudioDeviceImpl::Render(
// Get audio samples in blocks of 10 milliseconds from the registered
// webrtc::AudioTransport source. Keep reading until our internal buffer
// is full.
while (accumulated_audio_samples < number_of_frames) {
while (accumulated_audio_samples < audio_bus->frames()) {
// Get 10ms and append output to temporary byte buffer.
audio_transport_callback_->NeedMorePlayData(samples_per_10_msec,
bytes_per_sample_,
......@@ -222,13 +221,13 @@ int WebRtcAudioDeviceImpl::Render(
for (int channel_index = 0; channel_index < channels; ++channel_index) {
media::DeinterleaveAudioChannel(
output_buffer_.get(),
audio_data[channel_index],
audio_bus->channel(channel_index),
channels,
channel_index,
bytes_per_sample_,
number_of_frames);
audio_bus->frames());
}
return number_of_frames;
return audio_bus->frames();
}
void WebRtcAudioDeviceImpl::OnRenderError() {
......@@ -237,11 +236,10 @@ void WebRtcAudioDeviceImpl::OnRenderError() {
LOG(ERROR) << "OnRenderError()";
}
void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data,
int number_of_frames,
void WebRtcAudioDeviceImpl::Capture(media::AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume) {
DCHECK_LE(number_of_frames, input_buffer_size());
DCHECK_LE(audio_bus->frames(), input_buffer_size());
#if defined(OS_WIN) || defined(OS_MACOSX)
DCHECK_LE(volume, 1.0);
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
......@@ -261,15 +259,15 @@ void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data,
output_delay_ms = output_delay_ms_;
}
const int channels = audio_data.size();
const int channels = audio_bus->channels();
DCHECK_LE(channels, input_channels());
uint32_t new_mic_level = 0;
// Interleave, scale, and clip input to int and store result in
// a local byte buffer.
media::InterleaveFloatToInt(audio_data,
media::InterleaveFloatToInt(audio_bus,
input_buffer_.get(),
number_of_frames,
audio_bus->frames(),
input_audio_parameters_.bits_per_sample() / 8);
int samples_per_sec = input_sample_rate();
......@@ -291,7 +289,7 @@ void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data,
// Write audio samples in blocks of 10 milliseconds to the registered
// webrtc::AudioTransport sink. Keep writing until our internal byte
// buffer is empty.
while (accumulated_audio_samples < number_of_frames) {
while (accumulated_audio_samples < audio_bus->frames()) {
// Deliver 10ms of recorded 16-bit linear PCM audio.
audio_transport_callback_->RecordedDataIsAvailable(
audio_byte_buffer,
......
......@@ -6,7 +6,6 @@
#define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
......@@ -218,14 +217,12 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
virtual int32_t Release() OVERRIDE;
// media::AudioRendererSink::RenderCallback implementation.
virtual int Render(const std::vector<float*>& audio_data,
int number_of_frames,
virtual int Render(media::AudioBus* audio_bus,
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
// AudioInputDevice::CaptureCallback implementation.
virtual void Capture(const std::vector<float*>& audio_data,
int number_of_frames,
virtual void Capture(media::AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume) OVERRIDE;
virtual void OnCaptureError() OVERRIDE;
......
......@@ -13,6 +13,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
#include "media/audio/audio_util.h"
#include "media/base/audio_bus.h"
using base::PlatformThread;
......@@ -188,23 +189,17 @@ AudioDeviceThread::Callback::Callback(
CHECK_NE(samples_per_ms_, 0);
}
AudioDeviceThread::Callback::~Callback() {
for (size_t i = 0; i < audio_data_.size(); ++i)
base::AlignedFree(audio_data_[i]);
}
AudioDeviceThread::Callback::~Callback() {}
void AudioDeviceThread::Callback::InitializeOnAudioThread() {
DCHECK(audio_data_.empty());
DCHECK(!audio_bus_.get());
MapSharedMemory();
DCHECK(shared_memory_.memory() != NULL);
// Allocate buffer with a 16-byte alignment to allow SSE optimizations.
audio_data_.reserve(audio_parameters_.channels());
for (int i = 0; i < audio_parameters_.channels(); ++i) {
audio_data_.push_back(static_cast<float*>(base::AlignedAlloc(
sizeof(float) * audio_parameters_.frames_per_buffer(), 16)));
}
// TODO(dalecurtis): Instead of creating a new AudioBus and memcpy'ing into
// the shared memory we should wrap the shared memory.
audio_bus_ = AudioBus::Create(audio_parameters_);
}
} // namespace media.
......@@ -5,10 +5,9 @@
#ifndef MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
#define MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
#include <vector>
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/shared_memory.h"
#include "base/sync_socket.h"
#include "base/synchronization/lock.h"
......@@ -18,6 +17,7 @@
class MessageLoop;
namespace media {
class AudioBus;
// Data transfer between browser and render process uses a combination
// of sync sockets and shared memory. To read from the socket and render
......@@ -60,7 +60,7 @@ class MEDIA_EXPORT AudioDeviceThread {
// Audio buffers that are allocated in InitializeOnAudioThread() based on
// info from audio_parameters_.
std::vector<float*> audio_data_;
scoped_ptr<AudioBus> audio_bus_;
base::SharedMemory shared_memory_;
const int memory_length_;
......
......@@ -10,6 +10,7 @@
#include "base/time.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_util.h"
#include "media/base/audio_bus.h"
namespace media {
......@@ -328,24 +329,23 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
int audio_delay_milliseconds = pending_data / bytes_per_ms_;
int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
const size_t number_of_frames = audio_parameters_.frames_per_buffer();
const int bytes_per_sample = sizeof(memory[0]);
// Deinterleave each channel and convert to 32-bit floating-point
// with nominal range -1.0 -> +1.0.
for (int channel_index = 0; channel_index < audio_parameters_.channels();
for (int channel_index = 0; channel_index < audio_bus_->channels();
++channel_index) {
DeinterleaveAudioChannel(memory,
audio_data_[channel_index],
audio_parameters_.channels(),
audio_bus_->channel(channel_index),
audio_bus_->channels(),
channel_index,
bytes_per_sample,
number_of_frames);
audio_bus_->frames());
}
// Deliver captured data to the client in floating point format
// and update the audio-delay measurement.
capture_callback_->Capture(audio_data_, number_of_frames,
capture_callback_->Capture(audio_bus_.get(),
audio_delay_milliseconds, volume);
}
......
......@@ -88,8 +88,7 @@ class MEDIA_EXPORT AudioInputDevice
public:
class MEDIA_EXPORT CaptureCallback {
public:
virtual void Capture(const std::vector<float*>& audio_data,
int number_of_frames,
virtual void Capture(AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume) = 0;
virtual void OnCaptureError() = 0;
......
......@@ -266,17 +266,18 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback");
// Update the audio-delay measurement then ask client to render audio.
size_t num_frames = render_callback_->Render(audio_data_,
audio_parameters_.frames_per_buffer(), audio_delay_milliseconds);
size_t num_frames = render_callback_->Render(
audio_bus_.get(), audio_delay_milliseconds);
// Interleave, scale, and clip to int.
// TODO(crogers/vrk): Figure out a way to avoid the float -> int -> float
// conversions that happen in the <audio> and WebRTC scenarios.
InterleaveFloatToInt(audio_data_, shared_memory_.memory(),
num_frames, audio_parameters_.bits_per_sample() / 8);
// TODO(dalecurtis): Remove this when we have float everywhere.
InterleaveFloatToInt(
audio_bus_.get(), shared_memory_.memory(), num_frames,
audio_parameters_.bits_per_sample() / 8);
// Let the host know we are done.
SetActualDataSizeInBytes(&shared_memory_, memory_length_,
SetActualDataSizeInBytes(
&shared_memory_, memory_length_,
num_frames * audio_parameters_.GetBytesPerFrame());
}
......
......@@ -35,9 +35,7 @@ class MockRenderCallback : public AudioRendererSink::RenderCallback {
MockRenderCallback() {}
virtual ~MockRenderCallback() {}
MOCK_METHOD3(Render, int(const std::vector<float*>& audio_data,
int number_of_frames,
int audio_delay_milliseconds));
MOCK_METHOD2(Render, int(AudioBus* audio_bus, int audio_delay_milliseconds));
MOCK_METHOD0(OnRenderError, void());
};
......@@ -87,19 +85,6 @@ ACTION_P(QuitLoop, loop) {
loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
}
// Zeros out |number_of_frames| in all channel buffers pointed to by
// the |audio_data| vector.
void ZeroAudioData(int number_of_frames,
const std::vector<float*>& audio_data) {
std::vector<float*>::const_iterator it = audio_data.begin();
for (; it != audio_data.end(); ++it) {
float* channel = *it;
for (int j = 0; j < number_of_frames; ++j) {
channel[j] = 0.0f;
}
}
}
} // namespace.
class AudioOutputDeviceTest : public testing::Test {
......@@ -222,21 +207,10 @@ TEST_F(AudioOutputDeviceTest, CreateStream) {
// So, for the sake of this test, we consider the call to Render a sign
// of success and quit the loop.
// A note on the call to ZeroAudioData():
// Valgrind caught a bug in AudioOutputDevice::AudioThreadCallback::Process()
// whereby we always interleaved all the frames in the buffer regardless
// of how many were actually rendered. So to keep the benefits of that
// test, we explicitly pass 0 in here as the number of frames to
// ZeroAudioData(). Other tests might want to pass the requested number
// by using WithArgs<1, 0>(Invoke(&ZeroAudioData)) and set the return
// value accordingly.
const int kNumberOfFramesToProcess = 0;
EXPECT_CALL(callback_, Render(_, _, _))
EXPECT_CALL(callback_, Render(_, _))
.WillOnce(DoAll(
WithArgs<0>(Invoke(
testing::CreateFunctor(&ZeroAudioData,
kNumberOfFramesToProcess))),
QuitLoop(io_loop_.message_loop_proxy()),
Return(kNumberOfFramesToProcess)));
......
......@@ -22,6 +22,7 @@
#include "base/time.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/audio_util.h"
#include "media/base/audio_bus.h"
#if defined(OS_MACOSX)
#include "media/audio/mac/audio_low_latency_input_mac.h"
......@@ -230,7 +231,7 @@ bool DeinterleaveAudioChannel(void* source,
// |Format| is the destination type, |Fixed| is a type larger than |Format|
// such that operations can be made without overflowing.
template<class Format, class Fixed>
static void InterleaveFloatToInt(const std::vector<float*>& source,
static void InterleaveFloatToInt(const AudioBus* source,
void* dst_bytes, size_t number_of_frames) {
Format* destination = reinterpret_cast<Format*>(dst_bytes);
Fixed max_value = std::numeric_limits<Format>::max();
......@@ -243,9 +244,9 @@ static void InterleaveFloatToInt(const std::vector<float*>& source,
min_value = -(bias - 1);
}
int channels = source.size();
int channels = source->channels();
for (int i = 0; i < channels; ++i) {
float* channel_data = source[i];
const float* channel_data = source->channel(i);
for (size_t j = 0; j < number_of_frames; ++j) {
Fixed sample = max_value * channel_data[j];
if (sample > max_value)
......@@ -258,7 +259,7 @@ static void InterleaveFloatToInt(const std::vector<float*>& source,
}
}
void InterleaveFloatToInt(const std::vector<float*>& source, void* dst,
void InterleaveFloatToInt(const AudioBus* source, void* dst,
size_t number_of_frames, int bytes_per_sample) {
switch (bytes_per_sample) {
case 1:
......
......@@ -6,7 +6,6 @@
#define MEDIA_AUDIO_AUDIO_UTIL_H_
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "media/base/channel_layout.h"
......@@ -17,6 +16,7 @@ class SharedMemory;
}
namespace media {
class AudioBus;
// For all audio functions 3 audio formats are supported:
// 8 bits unsigned 0 to 255.
......@@ -86,7 +86,7 @@ MEDIA_EXPORT bool DeinterleaveAudioChannel(void* source,
// The size of the |source| vector determines the number of channels.
// The |destination| buffer is assumed to be large enough to hold the
// result. Thus it must be at least size: number_of_frames * source.size()
MEDIA_EXPORT void InterleaveFloatToInt(const std::vector<float*>& source,
MEDIA_EXPORT void InterleaveFloatToInt(const AudioBus* audio_bus,
void* destination,
size_t number_of_frames,
int bytes_per_sample);
......
......@@ -24,15 +24,11 @@ void NullAudioSink::Initialize(const AudioParameters& params,
DCHECK(!initialized_);
params_ = params;
audio_data_.reserve(params.channels());
for (int i = 0; i < params.channels(); ++i) {
float* channel_data = new float[params.frames_per_buffer()];
audio_data_.push_back(channel_data);
}
audio_bus_ = AudioBus::Create(params_);
if (hash_audio_for_testing_) {
md5_channel_contexts_.reset(new base::MD5Context[params.channels()]);
for (int i = 0; i < params.channels(); i++)
md5_channel_contexts_.reset(new base::MD5Context[params_.channels()]);
for (int i = 0; i < params_.channels(); i++)
base::MD5Init(&md5_channel_contexts_[i]);
}
......@@ -73,8 +69,6 @@ void NullAudioSink::SetPlaying(bool is_playing) {
NullAudioSink::~NullAudioSink() {
DCHECK(!thread_.IsRunning());
for (size_t i = 0; i < audio_data_.size(); ++i)
delete [] audio_data_[i];
}
void NullAudioSink::FillBufferTask() {
......@@ -83,16 +77,15 @@ void NullAudioSink::FillBufferTask() {
base::TimeDelta delay;
// Only consume buffers when actually playing.
if (playing_) {
int requested_frames = params_.frames_per_buffer();
int frames_received = callback_->Render(audio_data_, requested_frames, 0);
int frames_received = callback_->Render(audio_bus_.get(), 0);
int frames_per_millisecond =
params_.sample_rate() / base::Time::kMillisecondsPerSecond;
if (hash_audio_for_testing_ && frames_received > 0) {
DCHECK_EQ(sizeof(float), sizeof(uint32));
int channels = audio_data_.size();
int channels = audio_bus_->channels();
for (int channel_idx = 0; channel_idx < channels; ++channel_idx) {
float* channel = audio_data_[channel_idx];
float* channel = audio_bus_->channel(channel_idx);
for (int frame_idx = 0; frame_idx < frames_received; frame_idx++) {
// Convert float to uint32 w/o conversion loss.
uint32 frame = base::ByteSwapToLE32(
......@@ -135,7 +128,7 @@ std::string NullAudioSink::GetAudioHashForTesting() {
// Hash all channels into the first channel.
base::MD5Digest digest;
for (size_t i = 1; i < audio_data_.size(); i++) {
for (int i = 1; i < audio_bus_->channels(); i++) {
base::MD5Final(&digest, &md5_channel_contexts_[i]);
base::MD5Update(&md5_channel_contexts_[0], base::StringPiece(
reinterpret_cast<char*>(&digest), sizeof(base::MD5Digest)));
......
......@@ -13,13 +13,13 @@
// audio device or we haven't written an audio implementation for a particular
// platform yet.
#include <vector>
#include "base/md5.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "media/base/audio_renderer_sink.h"
namespace media {
class AudioBus;
class MEDIA_EXPORT NullAudioSink
: NON_EXPORTED_BASE(public AudioRendererSink) {
......@@ -53,7 +53,7 @@ class MEDIA_EXPORT NullAudioSink
void SetPlaying(bool is_playing);
// A buffer passed to FillBuffer to advance playback.
std::vector<float*> audio_data_;
scoped_ptr<AudioBus> audio_bus_;
AudioParameters params_;
bool initialized_;
......
......@@ -46,11 +46,6 @@ AudioRendererMixer::~AudioRendererMixer() {
// AudioRendererSinks must be stopped before being destructed.
audio_sink_->Stop();
// Clean up |mixer_input_audio_data_|.
for (size_t i = 0; i < mixer_input_audio_data_.size(); ++i)
base::AlignedFree(mixer_input_audio_data_[i]);
mixer_input_audio_data_.clear();
// Ensures that all mixer inputs have stopped themselves prior to destruction
// and have called RemoveMixerInput().
DCHECK_EQ(mixer_inputs_.size(), 0U);
......@@ -68,45 +63,40 @@ void AudioRendererMixer::RemoveMixerInput(
mixer_inputs_.erase(input);
}
int AudioRendererMixer::Render(const std::vector<float*>& audio_data,
int number_of_frames,
int AudioRendererMixer::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
current_audio_delay_milliseconds_ = audio_delay_milliseconds;
if (resampler_.get())
resampler_->Resample(audio_data, number_of_frames);
resampler_->Resample(audio_bus, audio_bus->frames());
else
ProvideInput(audio_data, number_of_frames);
ProvideInput(audio_bus);
// Always return the full number of frames requested, ProvideInput() will pad
// with silence if it wasn't able to acquire enough data.
return number_of_frames;
return audio_bus->frames();
}
void AudioRendererMixer::ProvideInput(const std::vector<float*>& audio_data,
int number_of_frames) {
void AudioRendererMixer::ProvideInput(AudioBus* audio_bus) {
base::AutoLock auto_lock(mixer_inputs_lock_);
// Allocate staging area for each mixer input's audio data on first call. We
// won't know how much to allocate until here because of resampling.
if (mixer_input_audio_data_.size() == 0) {
mixer_input_audio_data_.reserve(audio_data.size());
for (size_t i = 0; i < audio_data.size(); ++i) {
// Allocate audio data with a 16-byte alignment for SSE optimizations.
mixer_input_audio_data_.push_back(static_cast<float*>(
base::AlignedAlloc(sizeof(float) * number_of_frames, 16)));
}
mixer_input_audio_data_size_ = number_of_frames;
// won't know how much to allocate until here because of resampling. Ensure
// our intermediate AudioBus is sized exactly as the original. Resize should
// only happen once due to the way the resampler works.
if (!mixer_input_audio_bus_.get() ||
mixer_input_audio_bus_->frames() != audio_bus->frames()) {
mixer_input_audio_bus_ =
AudioBus::Create(audio_bus->channels(), audio_bus->frames());
}
// Sanity check our inputs.
DCHECK_LE(number_of_frames, mixer_input_audio_data_size_);
DCHECK_EQ(audio_data.size(), mixer_input_audio_data_.size());
DCHECK_EQ(audio_bus->frames(), mixer_input_audio_bus_->frames());
DCHECK_EQ(audio_bus->channels(), mixer_input_audio_bus_->channels());
// Zero |audio_data| so we're mixing into a clean buffer and return silence if
// Zero |audio_bus| so we're mixing into a clean buffer and return silence if
// we couldn't get enough data from our inputs.
for (size_t i = 0; i < audio_data.size(); ++i)
memset(audio_data[i], 0, number_of_frames * sizeof(*audio_data[i]));
audio_bus->Zero();
// Have each mixer render its data into an output buffer then mix the result.
for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin();
......@@ -121,15 +111,14 @@ void AudioRendererMixer::ProvideInput(const std::vector<float*>& audio_data,
continue;
int frames_filled = input->callback()->Render(
mixer_input_audio_data_, number_of_frames,
current_audio_delay_milliseconds_);
mixer_input_audio_bus_.get(), current_audio_delay_milliseconds_);
if (frames_filled == 0)
continue;
// Volume adjust and mix each mixer input into |audio_data| after rendering.
for (size_t j = 0; j < audio_data.size(); ++j) {
VectorFMAC(
mixer_input_audio_data_[j], volume, frames_filled, audio_data[j]);
// Volume adjust and mix each mixer input into |audio_bus| after rendering.
for (int i = 0; i < audio_bus->channels(); ++i) {
VectorFMAC(mixer_input_audio_bus_->channel(i), volume, frames_filled,
audio_bus->channel(i));
}
// No need to clamp values as InterleaveFloatToInt() will take care of this
......
......@@ -6,7 +6,6 @@
#define MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
#include <set>
#include <vector>
#include "base/gtest_prod_util.h"
#include "base/synchronization/lock.h"
......@@ -38,16 +37,14 @@ class MEDIA_EXPORT AudioRendererMixer
FRIEND_TEST_ALL_PREFIXES(AudioRendererMixerTest, VectorFMACBenchmark);
// AudioRendererSink::RenderCallback implementation.
virtual int Render(const std::vector<float*>& audio_data,
int number_of_frames,
virtual int Render(AudioBus* audio_bus,
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
// Handles mixing and volume adjustment. Renders |number_of_frames| into
// |audio_data|. When resampling is necessary, ProvideInput() will be called
// Handles mixing and volume adjustment. Fully fills |audio_bus| with mixed
// audio data. When resampling is necessary, ProvideInput() will be called
// by MultiChannelResampler when more data is necessary.
void ProvideInput(const std::vector<float*>& audio_data,
int number_of_frames);
void ProvideInput(AudioBus* audio_bus);
// Multiply each element of |src| (up to |len|) by |scale| and add to |dest|.
static void VectorFMAC(const float src[], float scale, int len, float dest[]);
......@@ -68,8 +65,7 @@ class MEDIA_EXPORT AudioRendererMixer
base::Lock mixer_inputs_lock_;
// Vector for rendering audio data from each mixer input.
int mixer_input_audio_data_size_;
std::vector<float*> mixer_input_audio_data_;
scoped_ptr<AudioBus> mixer_input_audio_bus_;
// Handles resampling post-mixing.
scoped_ptr<MultiChannelResampler> resampler_;
......
......@@ -191,19 +191,8 @@ class AudioRendererMixerTest
input_parameters_, output_parameters_, sink_));
mixer_callback_ = sink_->callback();
// TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
// allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
audio_data_.reserve(output_parameters_.channels());
for (int i = 0; i < output_parameters_.channels(); ++i)
audio_data_.push_back(new float[output_parameters_.frames_per_buffer()]);
// TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
// allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
expected_audio_data_.reserve(output_parameters_.channels());
for (int i = 0; i < output_parameters_.channels(); ++i) {
expected_audio_data_.push_back(
new float[output_parameters_.frames_per_buffer()]);
}
audio_bus_ = AudioBus::Create(output_parameters_);
expected_audio_bus_ = AudioBus::Create(output_parameters_);
// Allocate one callback for generating expected results.
double step = kSineCycles / static_cast<double>(
......@@ -241,13 +230,13 @@ class AudioRendererMixerTest
}
bool ValidateAudioData(int index, int frames, float scale) {
for (size_t i = 0; i < audio_data_.size(); ++i) {
for (int i = 0; i < audio_bus_->channels(); ++i) {
for (int j = index; j < frames; j++) {
double error = fabs(
audio_data_[i][j] - expected_audio_data_[i][j] * scale);
double error = fabs(audio_bus_->channel(i)[j] -
expected_audio_bus_->channel(i)[j] * scale);
if (error > epsilon_) {
EXPECT_NEAR(
expected_audio_data_[i][j] * scale, audio_data_[i][j], epsilon_)
EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale,
audio_bus_->channel(i)[j], epsilon_)
<< " i=" << i << ", j=" << j;
return false;
}
......@@ -257,8 +246,6 @@ class AudioRendererMixerTest
}
bool RenderAndValidateAudioData(float scale) {
int request_frames = output_parameters_.frames_per_buffer();
// Half fill won't be exactly half when resampling since the resampler
// will have enough data to fill out more of the buffer based on its
// internal buffer and kernel size. So special case some of the checks.
......@@ -269,19 +256,16 @@ class AudioRendererMixerTest
for (size_t i = 0; i < fake_callbacks_.size(); ++i)
fake_callbacks_[i]->set_half_fill(true);
expected_callback_->set_half_fill(true);
for (size_t i = 0; i < expected_audio_data_.size(); ++i) {
memset(expected_audio_data_[i], 0,
sizeof(*expected_audio_data_[i]) * request_frames);
}
expected_audio_bus_->Zero();
}
// Render actual audio data.
int frames = mixer_callback_->Render(audio_data_, request_frames, 0);
if (frames != request_frames)
int frames = mixer_callback_->Render(audio_bus_.get(), 0);
if (frames != audio_bus_->frames())
return false;
// Render expected audio data (without scaling).
expected_callback_->Render(expected_audio_data_, request_frames, 0);
expected_callback_->Render(expected_audio_bus_.get(), 0);
if (half_fill_) {
// Verify first half of audio data for both resampling and non-resampling.
......@@ -296,11 +280,12 @@ class AudioRendererMixerTest
}
}
// Fill |audio_data_| fully with |value|.
// Fill |audio_bus_| fully with |value|.
void FillAudioData(float value) {
for (size_t i = 0; i < audio_data_.size(); ++i)
std::fill(audio_data_[i],
audio_data_[i] + output_parameters_.frames_per_buffer(), value);
for (int i = 0; i < audio_bus_->channels(); ++i) {
std::fill(audio_bus_->channel(i),
audio_bus_->channel(i) + audio_bus_->frames(), value);
}
}
// Verify silence when mixer inputs are in pre-Start() and post-Start().
......@@ -414,26 +399,21 @@ class AudioRendererMixerTest
mixer_inputs_[i]->Stop();
}
// Verify we get silence back; fill |audio_data_| before hand to be sure.
// Verify we get silence back; fill |audio_bus_| before hand to be sure.
FillAudioData(1.0f);
EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
}
protected:
virtual ~AudioRendererMixerTest() {
for (size_t i = 0; i < audio_data_.size(); ++i)
delete [] audio_data_[i];
for (size_t i = 0; i < expected_audio_data_.size(); ++i)
delete [] expected_audio_data_[i];
}
virtual ~AudioRendererMixerTest() {}
scoped_refptr<MockAudioRendererSink> sink_;
scoped_ptr<AudioRendererMixer> mixer_;
AudioRendererSink::RenderCallback* mixer_callback_;
AudioParameters input_parameters_;
AudioParameters output_parameters_;
std::vector<float*> audio_data_;
std::vector<float*> expected_audio_data_;
scoped_ptr<AudioBus> audio_bus_;
scoped_ptr<AudioBus> expected_audio_bus_;
std::vector< scoped_refptr<AudioRendererMixerInput> > mixer_inputs_;
ScopedVector<FakeAudioRenderCallback> fake_callbacks_;
scoped_ptr<FakeAudioRenderCallback> expected_callback_;
......
......@@ -9,6 +9,7 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "media/base/media_export.h"
namespace media {
......@@ -22,14 +23,9 @@ class AudioRendererSink
public:
class RenderCallback {
public:
// Fills entire buffer of length |number_of_frames| but returns actual
// number of frames it got from its source (|number_of_frames| in case of
// continuous stream). That actual number of frames is passed to host
// together with PCM audio data and host is free to use or ignore it.
// TODO(crogers): use base:Callback instead.
virtual int Render(const std::vector<float*>& audio_data,
int number_of_frames,
int audio_delay_milliseconds) = 0;
// Attempts to completely fill all channels of |audio_bus|, returns actual
// number of frames filled.
virtual int Render(AudioBus* audio_bus, int audio_delay_milliseconds) = 0;
// Signals an error has occurred.
virtual void OnRenderError() = 0;
......
......@@ -19,21 +19,21 @@ FakeAudioRenderCallback::FakeAudioRenderCallback(double step)
FakeAudioRenderCallback::~FakeAudioRenderCallback() {}
int FakeAudioRenderCallback::Render(const std::vector<float*>& audio_data,
int number_of_frames,
int FakeAudioRenderCallback::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
int number_of_frames = audio_bus->frames();
if (half_fill_)
number_of_frames /= 2;
// Fill first channel with a sine wave.
for (int i = 0; i < number_of_frames; ++i)
audio_data[0][i] = sin(2 * M_PI * (x_ + step_ * i));
audio_bus->channel(0)[i] = sin(2 * M_PI * (x_ + step_ * i));
x_ += number_of_frames * step_;
// Copy first channel into the rest of the channels.
for (size_t i = 1; i < audio_data.size(); ++i)
memcpy(audio_data[i], audio_data[0],
number_of_frames * sizeof(*audio_data[0]));
for (int i = 1; i < audio_bus->channels(); ++i)
memcpy(audio_bus->channel(i), audio_bus->channel(0),
number_of_frames * sizeof(*audio_bus->channel(i)));
return number_of_frames;
}
......
......@@ -5,8 +5,6 @@
#ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
#define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
#include <vector>
#include "media/base/audio_renderer_sink.h"
#include "testing/gmock/include/gmock/gmock.h"
......@@ -24,8 +22,7 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback {
// Renders a sine wave into the provided audio data buffer. If |half_fill_|
// is set, will only fill half the buffer.
int Render(const std::vector<float*>& audio_data, int number_of_frames,
int audio_delay_milliseconds) OVERRIDE;
int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE;
MOCK_METHOD0(OnRenderError, void());
// Toggles only filling half the requested amount during Render().
......
......@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "media/base/audio_bus.h"
namespace media {
......@@ -14,7 +15,6 @@ MultiChannelResampler::MultiChannelResampler(int channels,
double io_sample_rate_ratio,
const ReadCB& read_cb)
: last_frame_count_(0),
first_frame_count_(0),
read_cb_(read_cb) {
// Allocate each channel's resampler.
resamplers_.reserve(channels);
......@@ -24,17 +24,10 @@ MultiChannelResampler::MultiChannelResampler(int channels,
}
}
MultiChannelResampler::~MultiChannelResampler() {
// Clean up |resampler_audio_data_|. Skip the first channel since we never
// allocated that, but just used the destination passed into ProvideInput().
for (size_t i = 1; i < resampler_audio_data_.size(); ++i)
delete [] resampler_audio_data_[i];
resampler_audio_data_.clear();
}
MultiChannelResampler::~MultiChannelResampler() {}
void MultiChannelResampler::Resample(const std::vector<float*>& destination,
int frames) {
DCHECK_EQ(destination.size(), resamplers_.size());
void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) {
DCHECK_EQ(static_cast<size_t>(audio_bus->channels()), resamplers_.size());
// We need to ensure that SincResampler only calls ProvideInput once for each
// channel. To ensure this, we chunk the number of requested frames into
......@@ -55,7 +48,8 @@ void MultiChannelResampler::Resample(const std::vector<float*>& destination,
// the first channel, then it will call it for the remaining channels,
// since they all buffer in the same way and are processing the same
// number of frames.
resamplers_[i]->Resample(destination[i] + frames_done, frames_this_time);
resamplers_[i]->Resample(
audio_bus->channel(i) + frames_done, frames_this_time);
}
frames_done += frames_this_time;
......@@ -66,32 +60,37 @@ void MultiChannelResampler::ProvideInput(int channel, float* destination,
int frames) {
// Get the data from the multi-channel provider when the first channel asks
// for it. For subsequent channels, we can just dish out the channel data
// from that (stored in |resampler_audio_data_|).
// from that (stored in |resampler_audio_bus_|).
if (channel == 0) {
// Allocate staging arrays on the first request.
if (resampler_audio_data_.size() == 0) {
first_frame_count_ = frames;
// Skip allocation of the first buffer, since we'll use |destination|
// directly for that.
resampler_audio_data_.reserve(resamplers_.size());
// Allocate staging arrays on the first request and if the frame size or
// |destination| changes (should only happen once).
if (!resampler_audio_bus_.get() ||
resampler_audio_bus_->frames() != frames ||
wrapped_resampler_audio_bus_->channel(0) != destination) {
resampler_audio_bus_ = AudioBus::Create(resamplers_.size(), frames);
// Create a channel vector based on |resampler_audio_bus_| but using
// |destination| directly for the first channel and then wrap it in a new
// AudioBus so we can avoid an extra memcpy later.
resampler_audio_data_.clear();
resampler_audio_data_.reserve(resampler_audio_bus_->channels());
resampler_audio_data_.push_back(destination);
for (size_t i = 1; i < resamplers_.size(); ++i)
resampler_audio_data_.push_back(new float[frames]);
} else {
DCHECK_LE(frames, first_frame_count_);
resampler_audio_data_[0] = destination;
for (int i = 1; i < resampler_audio_bus_->channels(); ++i)
resampler_audio_data_.push_back(resampler_audio_bus_->channel(i));
wrapped_resampler_audio_bus_ = AudioBus::WrapVector(
frames, resampler_audio_data_);
}
last_frame_count_ = frames;
read_cb_.Run(resampler_audio_data_, frames);
read_cb_.Run(wrapped_resampler_audio_bus_.get());
} else {
// All channels must ask for the same amount. This should always be the
// case, but let's just make sure.
DCHECK_EQ(frames, last_frame_count_);
// Copy the channel data from what we received from |read_cb_|.
memcpy(destination, resampler_audio_data_[channel],
sizeof(*resampler_audio_data_[channel]) * frames);
memcpy(destination, resampler_audio_bus_->channel(channel),
sizeof(*resampler_audio_bus_->channel(channel)) * frames);
}
}
......
......@@ -13,16 +13,16 @@
#include "media/base/sinc_resampler.h"
namespace media {
class AudioBus;
// MultiChannelResampler is a multi channel wrapper for SincResampler; allowing
// high quality sample rate conversion of multiple channels at once.
class MEDIA_EXPORT MultiChannelResampler {
public:
// Callback type for providing more data into the resampler. Expects |frames|
// of data for all channels to be rendered into |destination|; zero padded if
// not enough frames are available to satisfy the request.
typedef base::Callback<void(const std::vector<float*>& destination,
int frames)> ReadCB;
// Callback type for providing more data into the resampler. Expects AudioBus
// to be completely filled with data upon return; zero padded if not enough
// frames are available to satisfy the request.
typedef base::Callback<void(AudioBus* audio_bus)> ReadCB;
// Constructs a MultiChannelResampler with the specified |read_cb|, which is
// used to acquire audio data for resampling. |io_sample_rate_ratio| is the
......@@ -31,8 +31,8 @@ class MEDIA_EXPORT MultiChannelResampler {
const ReadCB& read_cb);
virtual ~MultiChannelResampler();
// Resample |frames| of data from |read_cb_| into |destination|.
void Resample(const std::vector<float*>& destination, int frames);
// Resamples |frames| of data from |read_cb_| into AudioBus.
void Resample(AudioBus* audio_bus, int frames);
private:
// SincResampler::ReadCB implementation. ProvideInput() will be called for
......@@ -43,17 +43,15 @@ class MEDIA_EXPORT MultiChannelResampler {
// frames for every channel.
int last_frame_count_;
// Sanity check to ensure |resampler_audio_data_| is properly allocated.
int first_frame_count_;
// Source of data for resampling.
ReadCB read_cb_;
// Each channel has its own high quality resampler.
ScopedVector<SincResampler> resamplers_;
// Buffer for audio data going into SincResampler from ReadCB. Owned by this
// class and only temporarily passed out to ReadCB when data is required.
// Buffers for audio data going into SincResampler from ReadCB.
scoped_ptr<AudioBus> resampler_audio_bus_;
scoped_ptr<AudioBus> wrapped_resampler_audio_bus_;
std::vector<float*> resampler_audio_data_;
};
......
......@@ -8,6 +8,7 @@
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_bus.h"
#include "media/base/multi_channel_resampler.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -37,33 +38,20 @@ class MultiChannelResamplerTest
: public testing::TestWithParam<int> {
public:
MultiChannelResamplerTest() {}
virtual ~MultiChannelResamplerTest() {
if (!audio_data_.empty()) {
for (size_t i = 0; i < audio_data_.size(); ++i)
delete [] audio_data_[i];
audio_data_.clear();
}
}
virtual ~MultiChannelResamplerTest() {}
void InitializeAudioData(int channels, int frames) {
frames_ = frames;
audio_data_.reserve(channels);
for (int i = 0; i < channels; ++i) {
audio_data_.push_back(new float[frames]);
// Zero initialize so we can be sure every value has been provided.
memset(audio_data_[i], 0, sizeof(*audio_data_[i]) * frames);
}
audio_bus_ = AudioBus::Create(channels, frames);
}
// MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just
// fills the provided audio_data with |kFillValue|.
virtual void ProvideInput(const std::vector<float*>& audio_data,
int number_of_frames) {
EXPECT_EQ(audio_data.size(), audio_data_.size());
for (size_t i = 0; i < audio_data.size(); ++i)
for (int j = 0; j < number_of_frames; ++j)
audio_data[i][j] = kFillValue;
virtual void ProvideInput(AudioBus* audio_bus) {
EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
for (int i = 0; i < audio_bus->channels(); ++i)
for (int j = 0; j < audio_bus->frames(); ++j)
audio_bus->channel(i)[j] = kFillValue;
}
void MultiChannelTest(int channels, int frames, double expected_max_rms_error,
......@@ -73,7 +61,7 @@ class MultiChannelResamplerTest
channels, kScaleFactor, base::Bind(
&MultiChannelResamplerTest::ProvideInput,
base::Unretained(this)));
resampler.Resample(audio_data_, frames);
resampler.Resample(audio_bus_.get(), frames);
TestValues(expected_max_rms_error, expected_max_error);
}
......@@ -91,19 +79,19 @@ class MultiChannelResamplerTest
// Calculate Root-Mean-Square-Error for the resampling.
double max_error = 0.0;
double sum_of_squares = 0.0;
for (size_t i = 0; i < audio_data_.size(); ++i) {
for (int i = 0; i < audio_bus_->channels(); ++i) {
for (int j = 0; j < frames_; ++j) {
// Ensure all values are accounted for.
ASSERT_NE(audio_data_[i][j], 0);
ASSERT_NE(audio_bus_->channel(i)[j], 0);
double error = fabs(audio_data_[i][j] - kFillValue);
double error = fabs(audio_bus_->channel(i)[j] - kFillValue);
max_error = std::max(max_error, error);
sum_of_squares += error * error;
}
}
double rms_error = sqrt(
sum_of_squares / (frames_ * audio_data_.size()));
sum_of_squares / (frames_ * audio_bus_->channels()));
EXPECT_LE(rms_error, expected_max_rms_error);
EXPECT_LE(max_error, expected_max_error);
......@@ -111,7 +99,7 @@ class MultiChannelResamplerTest
protected:
int frames_;
std::vector<float*> audio_data_;
scoped_ptr<AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest);
};
......
......@@ -81,6 +81,8 @@ class MEDIA_EXPORT SincResampler {
float* const r3_;
float* const r4_;
float* const r5_;
DISALLOW_COPY_AND_ASSIGN(SincResampler);
};
} // namespace media
......
......@@ -325,13 +325,11 @@ bool AudioRendererImpl::IsBeforePrerollTime(
(buffer->GetTimestamp() + buffer->GetDuration()) < preroll_timestamp_;
}
int AudioRendererImpl::Render(const std::vector<float*>& audio_data,
int number_of_frames,
int AudioRendererImpl::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
if (stopped_ || GetPlaybackRate() == 0.0f) {
// Output silence if stopped.
for (size_t i = 0; i < audio_data.size(); ++i)
memset(audio_data[i], 0, sizeof(float) * number_of_frames);
audio_bus->Zero();
return 0;
}
......@@ -348,30 +346,29 @@ int AudioRendererImpl::Render(const std::vector<float*>& audio_data,
int bytes_per_frame = audio_parameters_.GetBytesPerFrame();
const int buf_size = number_of_frames * bytes_per_frame;
const int buf_size = audio_bus->frames() * bytes_per_frame;
scoped_array<uint8> buf(new uint8[buf_size]);
int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay);
int frames_filled = FillBuffer(buf.get(), audio_bus->frames(), request_delay);
int bytes_filled = frames_filled * bytes_per_frame;
DCHECK_LE(bytes_filled, buf_size);
UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now());
// Deinterleave each audio channel.
int channels = audio_data.size();
int channels = audio_bus->channels();
for (int channel_index = 0; channel_index < channels; ++channel_index) {
media::DeinterleaveAudioChannel(buf.get(),
audio_data[channel_index],
audio_bus->channel(channel_index),
channels,
channel_index,
bytes_per_frame / channels,
frames_filled);
// If FillBuffer() didn't give us enough data then zero out the remainder.
if (frames_filled < number_of_frames) {
int frames_to_zero = number_of_frames - frames_filled;
memset(audio_data[channel_index] + frames_filled,
0,
sizeof(float) * frames_to_zero);
if (frames_filled < audio_bus->frames()) {
int frames_to_zero = audio_bus->frames() - frames_filled;
memset(audio_bus->channel(channel_index) + frames_filled, 0,
sizeof(*audio_bus->channel(channel_index)) * frames_to_zero);
}
}
return frames_filled;
......
......@@ -119,8 +119,7 @@ class MEDIA_EXPORT AudioRendererImpl
void DoPause();
// media::AudioRendererSink::RenderCallback implementation.
virtual int Render(const std::vector<float*>& audio_data,
int number_of_frames,
virtual int Render(AudioBus* audio_bus,
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment