Commit aaf48c8b authored by henrika@chromium.org's avatar henrika@chromium.org

Re-enables three WebRTC unit tests in content.

It looks like some disabled unit tests could easily be enabled again just by extending the MockMediaInternals class.

BUG=23533039
TEST=content_unittests

Review URL: https://chromiumcodereview.appspot.com/23533039

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@221784 0039d316-1c4b-4281-b951-d872f2087c98
parent f53194f2
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "base/basictypes.h" #include "base/basictypes.h"
#include "content/browser/media/media_internals.h" #include "content/browser/media/media_internals.h"
#include "content/public/browser/media_observer.h" #include "content/public/browser/media_observer.h"
#include "media/audio/audio_parameters.h"
#include "media/base/media_log_event.h" #include "media/base/media_log_event.h"
#include "testing/gmock/include/gmock/gmock.h" #include "testing/gmock/include/gmock/gmock.h"
...@@ -47,6 +48,10 @@ class MockMediaInternals : public MediaInternals { ...@@ -47,6 +48,10 @@ class MockMediaInternals : public MediaInternals {
void(void* host, int stream_id)); void(void* host, int stream_id));
MOCK_METHOD3(OnSetAudioStreamPlaying, MOCK_METHOD3(OnSetAudioStreamPlaying,
void(void* host, int stream_id, bool playing)); void(void* host, int stream_id, bool playing));
MOCK_METHOD4(OnAudioStreamCreated,
void(void* host, int stream_id,
const media::AudioParameters& params,
const std::string& input_device_id));
MOCK_METHOD3(OnSetAudioStreamStatus, MOCK_METHOD3(OnSetAudioStreamStatus,
void(void* host, int stream_id, const std::string& status)); void(void* host, int stream_id, const std::string& status));
MOCK_METHOD3(OnSetAudioStreamVolume, MOCK_METHOD3(OnSetAudioStreamVolume,
......
...@@ -120,6 +120,7 @@ void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data, ...@@ -120,6 +120,7 @@ void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data,
DCHECK_LE(number_of_frames, output_buffer_size()); DCHECK_LE(number_of_frames, output_buffer_size());
{ {
base::AutoLock auto_lock(lock_); base::AutoLock auto_lock(lock_);
DCHECK(audio_transport_callback_);
// Store the reported audio delay locally. // Store the reported audio delay locally.
output_delay_ms_ = audio_delay_milliseconds; output_delay_ms_ = audio_delay_milliseconds;
} }
......
...@@ -496,9 +496,14 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) { ...@@ -496,9 +496,14 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
// be utilized to implement the actual audio path. The test registers a // be utilized to implement the actual audio path. The test registers a
// webrtc::VoEExternalMedia implementation to hijack the output audio and // webrtc::VoEExternalMedia implementation to hijack the output audio and
// verify that streaming starts correctly. // verify that streaming starts correctly.
// Disabled when running headless since the bots don't have the required config. // TODO(henrika): include on Android as well as soon as alla race conditions
// Flaky, http://crbug.com/167299 . // in OpenSLES are resolved.
TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) { #if defined(OS_ANDROID)
#define MAYBE_StartPlayout DISABLED_StartPlayout
#else
#define MAYBE_StartPlayout StartPlayout
#endif
TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
if (!has_output_devices_) { if (!has_output_devices_) {
LOG(WARNING) << "No output device detected."; LOG(WARNING) << "No output device detected.";
return; return;
...@@ -507,12 +512,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) { ...@@ -507,12 +512,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) {
scoped_ptr<media::AudioHardwareConfig> config = scoped_ptr<media::AudioHardwareConfig> config =
CreateRealHardwareConfig(audio_manager_.get()); CreateRealHardwareConfig(audio_manager_.get());
SetAudioHardwareConfig(config.get()); SetAudioHardwareConfig(config.get());
media::AudioParameters params(config->GetOutputConfig());
if (!HardwareSampleRatesAreValid()) if (!HardwareSampleRatesAreValid())
return; return;
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1); OnAudioStreamCreated(_, 1, params, StrEq(""))).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamPlaying(_, 1, true)).Times(1); OnSetAudioStreamPlaying(_, 1, true)).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
...@@ -520,33 +526,30 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) { ...@@ -520,33 +526,30 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) {
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnDeleteAudioStream(_, 1)).Times(AnyNumber()); OnDeleteAudioStream(_, 1)).Times(AnyNumber());
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
ASSERT_TRUE(engine.valid()); ASSERT_TRUE(engine.valid());
ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
ASSERT_TRUE(base.valid()); ASSERT_TRUE(base.valid());
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
int err = base->Init(webrtc_audio_device.get()); int err = base->Init(webrtc_audio_device.get());
ASSERT_EQ(0, err); ASSERT_EQ(0, err);
int ch = base->CreateChannel();
EXPECT_NE(-1, ch);
ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get()); ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
ASSERT_TRUE(external_media.valid()); ASSERT_TRUE(external_media.valid());
base::WaitableEvent event(false, false); base::WaitableEvent event(false, false);
scoped_ptr<WebRTCMediaProcessImpl> media_process( scoped_ptr<WebRTCMediaProcessImpl> media_process(
new WebRTCMediaProcessImpl(&event)); new WebRTCMediaProcessImpl(&event));
int ch = base->CreateChannel();
EXPECT_NE(-1, ch);
EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing( EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
ch, webrtc::kPlaybackPerChannel, *media_process.get())); ch, webrtc::kPlaybackPerChannel, *media_process.get()));
EXPECT_EQ(0, base->StartPlayout(ch)); EXPECT_EQ(0, base->StartPlayout(ch));
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play(); renderer->Play();
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
...@@ -674,9 +677,14 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) { ...@@ -674,9 +677,14 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
} }
// Uses WebRtcAudioDeviceImpl to play a local wave file. // Uses WebRtcAudioDeviceImpl to play a local wave file.
// Disabled when running headless since the bots don't have the required config. // TODO(henrika): include on Android as well as soon as alla race conditions
// Flaky, http://crbug.com/167298 . // in OpenSLES are resolved.
TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) { #if defined(OS_ANDROID)
#define MAYBE_PlayLocalFile DISABLED_PlayLocalFile
#else
#define MAYBE_PlayLocalFile PlayLocalFile
#endif
TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
if (!has_output_devices_) { if (!has_output_devices_) {
LOG(WARNING) << "No output device detected."; LOG(WARNING) << "No output device detected.";
return; return;
...@@ -688,12 +696,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) { ...@@ -688,12 +696,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) {
scoped_ptr<media::AudioHardwareConfig> config = scoped_ptr<media::AudioHardwareConfig> config =
CreateRealHardwareConfig(audio_manager_.get()); CreateRealHardwareConfig(audio_manager_.get());
SetAudioHardwareConfig(config.get()); SetAudioHardwareConfig(config.get());
media::AudioParameters params(config->GetOutputConfig());
if (!HardwareSampleRatesAreValid()) if (!HardwareSampleRatesAreValid())
return; return;
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1); OnAudioStreamCreated(_, 1, params, StrEq(""))).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamPlaying(_, 1, true)).Times(1); OnSetAudioStreamPlaying(_, 1, true)).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
...@@ -701,23 +710,21 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) { ...@@ -701,23 +710,21 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) {
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnDeleteAudioStream(_, 1)).Times(AnyNumber()); OnDeleteAudioStream(_, 1)).Times(AnyNumber());
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
ASSERT_TRUE(engine.valid()); ASSERT_TRUE(engine.valid());
ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
ASSERT_TRUE(base.valid()); ASSERT_TRUE(base.valid());
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
int err = base->Init(webrtc_audio_device.get()); int err = base->Init(webrtc_audio_device.get());
ASSERT_EQ(0, err); ASSERT_EQ(0, err);
int ch = base->CreateChannel(); int ch = base->CreateChannel();
EXPECT_NE(-1, ch); EXPECT_NE(-1, ch);
EXPECT_EQ(0, base->StartPlayout(ch)); EXPECT_EQ(0, base->StartPlayout(ch));
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play(); renderer->Play();
ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get()); ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
...@@ -733,7 +740,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) { ...@@ -733,7 +740,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) {
// Play 2 seconds worth of audio and then quit. // Play 2 seconds worth of audio and then quit.
message_loop_.PostDelayedTask(FROM_HERE, message_loop_.PostDelayedTask(FROM_HERE,
base::MessageLoop::QuitClosure(), base::MessageLoop::QuitClosure(),
base::TimeDelta::FromSeconds(6)); base::TimeDelta::FromSeconds(2));
message_loop_.Run(); message_loop_.Run();
renderer->Stop(); renderer->Stop();
...@@ -767,10 +774,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { ...@@ -767,10 +774,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
scoped_ptr<media::AudioHardwareConfig> config = scoped_ptr<media::AudioHardwareConfig> config =
CreateRealHardwareConfig(audio_manager_.get()); CreateRealHardwareConfig(audio_manager_.get());
SetAudioHardwareConfig(config.get()); SetAudioHardwareConfig(config.get());
media::AudioParameters params(config->GetOutputConfig());
if (!HardwareSampleRatesAreValid()) if (!HardwareSampleRatesAreValid())
return; return;
EXPECT_CALL(media_observer(),
OnAudioStreamCreated(_, 1, params, StrEq(""))).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamPlaying(_, 1, true)); OnSetAudioStreamPlaying(_, 1, true));
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
...@@ -778,17 +788,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { ...@@ -778,17 +788,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnDeleteAudioStream(_, 1)).Times(AnyNumber()); OnDeleteAudioStream(_, 1)).Times(AnyNumber());
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
ASSERT_TRUE(engine.valid()); ASSERT_TRUE(engine.valid());
ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
ASSERT_TRUE(base.valid()); ASSERT_TRUE(base.valid());
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
int err = base->Init(webrtc_audio_device.get()); int err = base->Init(webrtc_audio_device.get());
ASSERT_EQ(0, err); ASSERT_EQ(0, err);
...@@ -826,6 +832,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { ...@@ -826,6 +832,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
EXPECT_EQ(0, base->StartPlayout(ch)); EXPECT_EQ(0, base->StartPlayout(ch));
EXPECT_EQ(0, base->StartSend(ch)); EXPECT_EQ(0, base->StartSend(ch));
scoped_refptr<WebRtcAudioRenderer> renderer =
new WebRtcAudioRenderer(kRenderViewId);
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play(); renderer->Play();
LOG(INFO) << ">> You should now be able to hear yourself in loopback..."; LOG(INFO) << ">> You should now be able to hear yourself in loopback...";
...@@ -900,7 +909,15 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { ...@@ -900,7 +909,15 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
EXPECT_EQ(0, base->Terminate()); EXPECT_EQ(0, base->Terminate());
} }
TEST_F(MAYBE_WebRTCAudioDeviceTest, WebRtcPlayoutSetupTime) {
// TODO(henrika): include on Android as well as soon as alla race conditions
// in OpenSLES are resolved.
#if defined(OS_ANDROID)
#define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime
#else
#define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime
#endif
TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
if (!has_output_devices_) { if (!has_output_devices_) {
LOG(WARNING) << "No output device detected."; LOG(WARNING) << "No output device detected.";
return; return;
...@@ -909,10 +926,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, WebRtcPlayoutSetupTime) { ...@@ -909,10 +926,13 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, WebRtcPlayoutSetupTime) {
scoped_ptr<media::AudioHardwareConfig> config = scoped_ptr<media::AudioHardwareConfig> config =
CreateRealHardwareConfig(audio_manager_.get()); CreateRealHardwareConfig(audio_manager_.get());
SetAudioHardwareConfig(config.get()); SetAudioHardwareConfig(config.get());
media::AudioParameters params(config->GetOutputConfig());
if (!HardwareSampleRatesAreValid()) if (!HardwareSampleRatesAreValid())
return; return;
EXPECT_CALL(media_observer(),
OnAudioStreamCreated(_, 1, params, StrEq(""))).Times(1);
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
OnSetAudioStreamStatus(_, 1, _)).Times(AnyNumber()); OnSetAudioStreamStatus(_, 1, _)).Times(AnyNumber());
EXPECT_CALL(media_observer(), EXPECT_CALL(media_observer(),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment