Commit 807b6733 authored by tommi@chromium.org's avatar tommi@chromium.org

Turn audio ducking on by default on Windows again.

This changes the behavior for opting output devices into ducking so that ducking
is only enabled if an input device has already been opened with ducking enabled.

Entering the ducking session is now done for local streams as well, including
HTMLMediaElement audio rendering.  This allows the communication application to
also play back unducked ambient sounds.

On the browser side, when we open the default communication device(s) (input and
output), we now initialize them into a session that is separate from other
audio.  This makes it much easier to observe, control and troubleshoot what is
happening during a comm session.  It also fixes a problem whereby opening up an
output communications device before opening up an input communications device
would cause the output device to get ducked for some reason (and not opted out
of ducking).

Review URL: https://codereview.chromium.org/367923004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@281814 0039d316-1c4b-4281-b951-d872f2087c98
parent c4cf4ee1
......@@ -62,10 +62,7 @@ struct {
{ MediaAudioConstraints::kGoogTypingNoiseDetection, true },
{ MediaAudioConstraints::kGoogExperimentalNoiseSuppression, false },
#if defined(OS_WIN)
// TODO(tommi): Turn this back to |true| on Windows when ducking issues
// have been resolved.
// Bugs: crbug/391414, crbug/391247.
{ kMediaStreamAudioDucking, false },
{ kMediaStreamAudioDucking, true },
#else
{ kMediaStreamAudioDucking, false },
#endif
......
......@@ -336,10 +336,7 @@ TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
}
EXPECT_FALSE(audio_constraints.NeedsAudioProcessing());
#if defined(OS_WIN)
// TODO(tommi): Turn this back to EXPECT_TRUE on Windows when ducking issues
// have been resolved.
// Bugs: crbug/391414, crbug/391247.
EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
#else
EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
#endif
......
......@@ -10,6 +10,7 @@
#include "content/renderer/media/media_stream_dispatcher_eventhandler.h"
#include "content/renderer/render_thread_impl.h"
#include "content/renderer/render_view_impl.h"
#include "media/audio/audio_parameters.h"
#include "third_party/WebKit/public/web/WebUserGestureIndicator.h"
#include "url/gurl.h"
......@@ -105,6 +106,7 @@ void MediaStreamDispatcher::CancelGenerateStream(
void MediaStreamDispatcher::StopStreamDevice(
const StreamDeviceInfo& device_info) {
DCHECK(main_loop_->BelongsToCurrentThread());
DVLOG(1) << "MediaStreamDispatcher::StopStreamDevice"
<< ", {device_id = " << device_info.device.id << "}";
// Remove |device_info| from all streams in |label_stream_map_|.
......@@ -383,6 +385,7 @@ void MediaStreamDispatcher::OnDeviceOpenFailed(int request_id) {
int MediaStreamDispatcher::audio_session_id(const std::string& label,
int index) {
DCHECK(main_loop_->BelongsToCurrentThread());
LabelStreamMap::iterator it = label_stream_map_.find(label);
if (it == label_stream_map_.end() ||
it->second.audio_array.size() <= static_cast<size_t>(index)) {
......@@ -392,11 +395,13 @@ int MediaStreamDispatcher::audio_session_id(const std::string& label,
}
bool MediaStreamDispatcher::IsStream(const std::string& label) {
DCHECK(main_loop_->BelongsToCurrentThread());
return label_stream_map_.find(label) != label_stream_map_.end();
}
int MediaStreamDispatcher::video_session_id(const std::string& label,
int index) {
DCHECK(main_loop_->BelongsToCurrentThread());
LabelStreamMap::iterator it = label_stream_map_.find(label);
if (it == label_stream_map_.end() ||
it->second.video_array.size() <= static_cast<size_t>(index)) {
......@@ -405,4 +410,19 @@ int MediaStreamDispatcher::video_session_id(const std::string& label,
return it->second.video_array[index].session_id;
}
bool MediaStreamDispatcher::IsAudioDuckingActive() const {
DCHECK(main_loop_->BelongsToCurrentThread());
LabelStreamMap::const_iterator stream_it = label_stream_map_.begin();
while (stream_it != label_stream_map_.end()) {
const StreamDeviceInfoArray& audio_array = stream_it->second.audio_array;
for (StreamDeviceInfoArray::const_iterator device_it = audio_array.begin();
device_it != audio_array.end(); ++device_it) {
if (device_it->device.input.effects & media::AudioParameters::DUCKING)
return true;
}
++stream_it;
}
return false;
}
} // namespace content
......@@ -95,6 +95,11 @@ class CONTENT_EXPORT MediaStreamDispatcher
// Returns an audio session_id given a label and an index.
virtual int audio_session_id(const std::string& label, int index);
// Returns true if an audio input stream is currently active that was opened
// with audio ducking enabled. This is information is used when playing out
// audio so that rendered audio can be excluded from the ducking operation.
bool IsAudioDuckingActive() const;
protected:
int GetNextIpcIdForTest() { return next_ipc_id_; }
......@@ -102,6 +107,7 @@ class CONTENT_EXPORT MediaStreamDispatcher
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, BasicVideoDevice);
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, TestFailure);
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, CancelGenerateStream);
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, CheckDuckingState);
struct Request;
......
......@@ -12,6 +12,7 @@
#include "content/public/common/media_stream_request.h"
#include "content/renderer/media/media_stream_dispatcher.h"
#include "content/renderer/media/media_stream_dispatcher_eventhandler.h"
#include "media/audio/audio_parameters.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
......@@ -410,4 +411,66 @@ TEST_F(MediaStreamDispatcherTest, DeviceClosed) {
StreamDeviceInfo::kNoId);
}
TEST_F(MediaStreamDispatcherTest, CheckDuckingState) {
scoped_ptr<MediaStreamDispatcher> dispatcher(new MediaStreamDispatcher(NULL));
scoped_ptr<MockMediaStreamDispatcherEventHandler>
handler(new MockMediaStreamDispatcherEventHandler);
StreamOptions components(true, false); // audio only.
int ipc_request_id1 = dispatcher->next_ipc_id_;
dispatcher->GenerateStream(kRequestId1, handler.get()->AsWeakPtr(),
components, GURL());
EXPECT_EQ(1u, dispatcher->requests_.size());
// Ducking isn't active at this point.
EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
// Complete the creation of stream1 with a single audio track that has
// ducking enabled.
StreamDeviceInfoArray audio_device_array(1);
StreamDeviceInfo& audio_device_info = audio_device_array[0];
audio_device_info.device.name = "Microphone";
audio_device_info.device.type = kAudioType;
audio_device_info.session_id = kAudioSessionId;
audio_device_info.device.input.effects = media::AudioParameters::DUCKING;
StreamDeviceInfoArray video_device_array; // Empty for this test.
const char kStreamLabel[] = "stream1";
dispatcher->OnMessageReceived(MediaStreamMsg_StreamGenerated(
kRouteId, ipc_request_id1, kStreamLabel,
audio_device_array, video_device_array));
EXPECT_EQ(handler->request_id_, kRequestId1);
EXPECT_EQ(0u, dispatcher->requests_.size());
// Ducking should now be reported as active.
EXPECT_TRUE(dispatcher->IsAudioDuckingActive());
// Stop the device (removes the stream).
dispatcher->OnMessageReceived(
MediaStreamMsg_DeviceStopped(kRouteId, kStreamLabel,
handler->audio_device_));
// Ducking should now be reported as inactive again.
EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
// Now do the same sort of test with the DUCKING flag off.
audio_device_info.device.input.effects =
media::AudioParameters::ECHO_CANCELLER;
dispatcher->OnMessageReceived(MediaStreamMsg_StreamGenerated(
kRouteId, ipc_request_id1, kStreamLabel,
audio_device_array, video_device_array));
EXPECT_EQ(handler->request_id_, kRequestId1);
EXPECT_EQ(0u, dispatcher->requests_.size());
// Ducking should still be reported as not active.
EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
// Stop the device (removes the stream).
dispatcher->OnMessageReceived(
MediaStreamMsg_DeviceStopped(kRouteId, kStreamLabel,
handler->audio_device_));
}
} // namespace content
......@@ -98,6 +98,14 @@ void HarmonizeConstraintsAndEffects(RTCMediaConstraints* constraints,
}
DVLOG(1) << "Disabling constraint: "
<< kConstraintEffectMap[i].constraint;
} else if (kConstraintEffectMap[i].effect ==
media::AudioParameters::DUCKING && value && !is_mandatory) {
// Special handling of the DUCKING flag that sets the optional
// constraint to |false| to match what the device will support.
constraints->AddOptional(kConstraintEffectMap[i].constraint,
webrtc::MediaConstraintsInterface::kValueFalse, true);
// No need to modify |effects| since the ducking flag is already off.
DCHECK((*effects & media::AudioParameters::DUCKING) == 0);
}
}
}
......
......@@ -9,8 +9,10 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/media/media_stream_dispatcher.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_logging.h"
#include "content/renderer/render_view_impl.h"
#include "media/audio/audio_output_device.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/sample_rates.h"
......@@ -181,6 +183,19 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer {
OnPlayStateChanged on_play_state_changed_;
};
// Returns either AudioParameters::NO_EFFECTS or AudioParameters::DUCKING
// depending on whether or not an input element is currently open with
// ducking enabled.
int GetCurrentDuckingFlag(int render_view_id) {
RenderViewImpl* render_view = RenderViewImpl::FromRoutingID(render_view_id);
if (render_view && render_view->media_stream_dispatcher() &&
render_view->media_stream_dispatcher()->IsAudioDuckingActive()) {
return media::AudioParameters::DUCKING;
}
return media::AudioParameters::NO_EFFECTS;
}
} // namespace
WebRtcAudioRenderer::WebRtcAudioRenderer(
......@@ -200,24 +215,18 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(
start_ref_count_(0),
audio_delay_milliseconds_(0),
fifo_delay_milliseconds_(0),
// TODO(tommi): Ducking is currently not set on sink_params due to an
// issue on Windows that causes the ducked state to be pinned if an output
// stream is opened before an input stream (both in communication mode).
// Furthermore the input stream may not be associated with the output
// stream, which results in the output stream getting incorrectly ducked.
// What should happen here is that the ducking flag should be raised
// iff an input device is currently open with ducking set.
// Bugs: crbug/391414, crbug/391247.
sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 0, sample_rate, 16,
frames_per_buffer, media::AudioParameters::NO_EFFECTS) {
frames_per_buffer,
GetCurrentDuckingFlag(source_render_view_id)) {
WebRtcLogMessage(base::StringPrintf(
"WAR::WAR. source_render_view_id=%d"
", session_id=%d, sample_rate=%d, frames_per_buffer=%d",
", session_id=%d, sample_rate=%d, frames_per_buffer=%d, effects=%i",
source_render_view_id,
session_id,
sample_rate,
frames_per_buffer));
frames_per_buffer,
sink_params_.effects()));
}
WebRtcAudioRenderer::~WebRtcAudioRenderer() {
......
......@@ -10,7 +10,9 @@
#include "base/metrics/histogram.h"
#include "base/synchronization/lock.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/media/media_stream_dispatcher.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/render_view_impl.h"
#include "media/audio/audio_output_device.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_fifo.h"
......@@ -93,44 +95,6 @@ void WebRtcLocalAudioRenderer::OnSetFormat(
capture_thread_checker_.DetachFromThread();
DCHECK(capture_thread_checker_.CalledOnValidThread());
// Reset the |source_params_|, |sink_params_| and |loopback_fifo_| to match
// the new format.
{
base::AutoLock auto_lock(thread_lock_);
if (source_params_ == params)
return;
source_params_ = params;
sink_params_ = media::AudioParameters(source_params_.format(),
source_params_.channel_layout(), source_params_.channels(),
source_params_.input_channels(), source_params_.sample_rate(),
source_params_.bits_per_sample(),
#if defined(OS_ANDROID)
// On Android, input and output use the same sample rate. In order to
// use the low latency mode, we need to use the buffer size suggested by
// the AudioManager for the sink. It will later be used to decide
// the buffer size of the shared memory buffer.
frames_per_buffer_,
#else
2 * source_params_.frames_per_buffer(),
#endif
// If DUCKING is enabled on the source, it needs to be enabled on the
// sink as well.
source_params_.effects());
// TODO(henrika): we could add a more dynamic solution here but I prefer
// a fixed size combined with bad audio at overflow. The alternative is
// that we start to build up latency and that can be more difficult to
// detect. Tests have shown that the FIFO never contains more than 2 or 3
// audio frames but I have selected a max size of ten buffers just
// in case since these tests were performed on a 16 core, 64GB Win 7
// machine. We could also add some sort of error notifier in this area if
// the FIFO overflows.
loopback_fifo_.reset(new media::AudioFifo(
params.channels(), 10 * params.frames_per_buffer()));
}
// Post a task on the main render thread to reconfigure the |sink_| with the
// new format.
message_loop_->PostTask(
......@@ -278,10 +242,11 @@ void WebRtcLocalAudioRenderer::MaybeStartSink() {
if (!sink_.get() || !source_params_.IsValid())
return;
base::AutoLock auto_lock(thread_lock_);
// Clear up the old data in the FIFO.
loopback_fifo_->Clear();
{
// Clear up the old data in the FIFO.
base::AutoLock auto_lock(thread_lock_);
loopback_fifo_->Clear();
}
if (!sink_params_.IsValid() || !playing_ || !volume_ || sink_started_)
return;
......@@ -300,6 +265,59 @@ void WebRtcLocalAudioRenderer::ReconfigureSink(
DVLOG(1) << "WebRtcLocalAudioRenderer::ReconfigureSink()";
int implicit_ducking_effect = 0;
RenderViewImpl* render_view =
RenderViewImpl::FromRoutingID(source_render_view_id_);
if (render_view &&
render_view->media_stream_dispatcher() &&
render_view->media_stream_dispatcher()->IsAudioDuckingActive()) {
DVLOG(1) << "Forcing DUCKING to be ON for output";
implicit_ducking_effect = media::AudioParameters::DUCKING;
} else {
DVLOG(1) << "DUCKING not forced ON for output";
}
if (source_params_ == params)
return;
// Reset the |source_params_|, |sink_params_| and |loopback_fifo_| to match
// the new format.
source_params_ = params;
sink_params_ = media::AudioParameters(source_params_.format(),
source_params_.channel_layout(), source_params_.channels(),
source_params_.input_channels(), source_params_.sample_rate(),
source_params_.bits_per_sample(),
#if defined(OS_ANDROID)
// On Android, input and output use the same sample rate. In order to
// use the low latency mode, we need to use the buffer size suggested by
// the AudioManager for the sink. It will later be used to decide
// the buffer size of the shared memory buffer.
frames_per_buffer_,
#else
2 * source_params_.frames_per_buffer(),
#endif
// If DUCKING is enabled on the source, it needs to be enabled on the
// sink as well.
source_params_.effects() | implicit_ducking_effect);
{
// TODO(henrika): we could add a more dynamic solution here but I prefer
// a fixed size combined with bad audio at overflow. The alternative is
// that we start to build up latency and that can be more difficult to
// detect. Tests have shown that the FIFO never contains more than 2 or 3
// audio frames but I have selected a max size of ten buffers just
// in case since these tests were performed on a 16 core, 64GB Win 7
// machine. We could also add some sort of error notifier in this area if
// the FIFO overflows.
media::AudioFifo* new_fifo = new media::AudioFifo(
params.channels(), 10 * params.frames_per_buffer());
base::AutoLock auto_lock(thread_lock_);
loopback_fifo_.reset(new_fifo);
}
if (!sink_)
return; // WebRtcLocalAudioRenderer has not yet been started.
......@@ -309,6 +327,7 @@ void WebRtcLocalAudioRenderer::ReconfigureSink(
sink_->Stop();
sink_started_ = false;
}
sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_,
source_render_frame_id_);
MaybeStartSink();
......
......@@ -135,9 +135,11 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
base::TimeDelta total_render_time_;
// The audio parameters of the capture source.
// Must only be touched on the main thread.
media::AudioParameters source_params_;
// The audio parameters used by the sink.
// Must only be touched on the main thread.
media::AudioParameters sink_params_;
// Set when playing, cleared when paused.
......
......@@ -9,6 +9,7 @@
#include "base/strings/utf_string_conversions.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/audio_bus.h"
using base::win::ScopedComPtr;
......@@ -97,7 +98,9 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
}
}
WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
WASAPIAudioInputStream::~WASAPIAudioInputStream() {
DCHECK(CalledOnValidThread());
}
bool WASAPIAudioInputStream::Open() {
DCHECK(CalledOnValidThread());
......@@ -512,6 +515,11 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
DLOG(WARNING) << "Ducking has been requested for a non-default device."
"Not supported.";
// We can't honor the requested effect flag, so turn it off and
// continue. We'll check this flag later to see if we've actually
// opened up the communications device, so it's important that it
// reflects the active state.
effects_ &= ~AudioParameters::DUCKING;
endpoint_device_.Release(); // Fall back on code below.
}
}
......@@ -639,12 +647,14 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
// buffer is never smaller than the minimum buffer size needed to ensure
// that glitches do not occur between the periodic processing passes.
// This setting should lead to lowest possible latency.
HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
flags,
0, // hnsBufferDuration
0,
&format_,
NULL);
HRESULT hr = audio_client_->Initialize(
AUDCLNT_SHAREMODE_SHARED,
flags,
0, // hnsBufferDuration
0,
&format_,
(effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL);
if (FAILED(hr))
return hr;
......
......@@ -157,8 +157,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
// A copy of the supplied AudioParameter's |effects|.
const int effects_;
// A copy of the supplied AudioParameter's |effects|. If ducking was
// specified (desired device=communications) but we ended up not being
// able to open the communications device, this flag will be cleared.
int effects_;
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
......
......@@ -71,6 +71,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
source_(NULL),
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
......@@ -120,7 +121,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
DCHECK(stop_render_event_.IsValid());
}
WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
}
bool WASAPIAudioOutputStream::Open() {
VLOG(1) << "WASAPIAudioOutputStream::Open()";
......@@ -128,11 +131,19 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
DCHECK(!audio_client_);
DCHECK(!audio_render_client_);
// Will be set to true if we ended up opening the default communications
// device.
bool communications_device = false;
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
if (device_id_.empty() ||
CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
communications_device = (device_role_ == eCommunications);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
......@@ -157,7 +168,8 @@ bool WASAPIAudioOutputStream::Open() {
// mode and using event-driven buffer handling.
hr = CoreAudioUtil::SharedModeInitialize(
audio_client, &format_, audio_samples_render_event_.Get(),
&endpoint_buffer_size_frames_);
&endpoint_buffer_size_frames_,
communications_device ? &kCommunicationsSessionId : NULL);
if (FAILED(hr))
return false;
......@@ -198,7 +210,7 @@ bool WASAPIAudioOutputStream::Open() {
if (!audio_render_client)
return false;
// Store valid COM interfaces.
// Store valid COM interfaces.
audio_client_ = audio_client;
audio_render_client_ = audio_render_client;
......
......@@ -23,6 +23,12 @@ using base::win::ScopedHandle;
namespace media {
// See header file for documentation.
// {BE39AF4F-087C-423F-9303-234EC1E5B8EE}
const GUID kCommunicationsSessionId = {
0xbe39af4f, 0x87c, 0x423f, { 0x93, 0x3, 0x23, 0x4e, 0xc1, 0xe5, 0xb8, 0xee }
};
enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
// Converts Microsoft's channel configuration to ChannelLayout.
......@@ -731,10 +737,9 @@ ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
return static_cast<ChannelConfig>(format.dwChannelMask);
}
HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
const WAVEFORMATPCMEX* format,
HANDLE event_handle,
uint32* endpoint_buffer_size) {
HRESULT CoreAudioUtil::SharedModeInitialize(
IAudioClient* client, const WAVEFORMATPCMEX* format, HANDLE event_handle,
uint32* endpoint_buffer_size, const GUID* session_guid) {
DCHECK(IsSupported());
// Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
......@@ -760,7 +765,7 @@ HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
0,
0,
reinterpret_cast<const WAVEFORMATEX*>(format),
NULL);
session_guid);
if (FAILED(hr)) {
DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
return hr;
......
......@@ -200,10 +200,15 @@ class MEDIA_EXPORT CoreAudioUtil {
// If a valid event is provided in |event_handle|, the client will be
// initialized for event-driven buffer handling. If |event_handle| is set to
// NULL, event-driven buffer handling is not utilized.
// This function will initialize the audio client as part of the default
// audio session if NULL is passed for |session_guid|, otherwise the client
// will be associated with the specified session.
static HRESULT SharedModeInitialize(IAudioClient* client,
const WAVEFORMATPCMEX* format,
HANDLE event_handle,
uint32* endpoint_buffer_size);
uint32* endpoint_buffer_size,
const GUID* session_guid);
// TODO(henrika): add ExclusiveModeInitialize(...)
// Create an IAudioRenderClient client for an existing IAudioClient given by
......@@ -230,6 +235,13 @@ class MEDIA_EXPORT CoreAudioUtil {
DISALLOW_COPY_AND_ASSIGN(CoreAudioUtil);
};
// The special audio session identifier we use when opening up the default
// communication device. This has the effect that a separate volume control
// will be shown in the system's volume mixer and control over ducking and
// visually observing the behavior of ducking, is easier.
// Use with |SharedModeInitialize|.
extern const GUID kCommunicationsSessionId;
} // namespace media
#endif // MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
......@@ -346,13 +346,13 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
// Perform a shared-mode initialization without event-driven buffer handling.
uint32 endpoint_buffer_size = 0;
HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
// It is only possible to create a client once.
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_FALSE(SUCCEEDED(hr));
EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
......@@ -360,7 +360,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
EXPECT_TRUE(client);
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
......@@ -373,7 +373,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
client, AUDCLNT_SHAREMODE_SHARED, &format));
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_TRUE(FAILED(hr));
EXPECT_EQ(hr, E_INVALIDARG);
......@@ -389,7 +389,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
client, AUDCLNT_SHAREMODE_SHARED, &format));
hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
}
......@@ -420,7 +420,7 @@ TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
// Do a proper initialization and verify that it works this time.
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
render_client = CoreAudioUtil::CreateRenderClient(client);
EXPECT_TRUE(render_client);
EXPECT_GT(endpoint_buffer_size, 0u);
......@@ -432,7 +432,7 @@ TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
// Do a proper initialization and verify that it works this time.
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
capture_client = CoreAudioUtil::CreateCaptureClient(client);
EXPECT_TRUE(capture_client);
EXPECT_GT(endpoint_buffer_size, 0u);
......@@ -454,7 +454,7 @@ TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
&format)));
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
&endpoint_buffer_size);
&endpoint_buffer_size, NULL);
EXPECT_GT(endpoint_buffer_size, 0u);
ScopedComPtr<IAudioRenderClient> render_client(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment