Commit b67648ad authored by Oskar Sundbom's avatar Oskar Sundbom Committed by Commit Bot

Win: Remove support for the voice processing DMO echo canceller

After experimenting with it, we've decided to not move forward. As the
implementation is causing some issues, it's best to just remove it.

Bug: 888783, 845187
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I4ef9622340266930737f9a2597c33375702f53b8
Reviewed-on: https://chromium-review.googlesource.com/c/1273069
Commit-Queue: Oskar Sundbom <ossu@chromium.org>
Reviewed-by: default avatarHenrik Andreasson <henrika@chromium.org>
Reviewed-by: default avatarHenrik Grunell <grunell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#598722}
parent 793f7c35
......@@ -231,13 +231,9 @@ source_set("audio") {
]
libs += [
"dmoguids.lib",
"dxguid.lib",
"msdmo.lib",
"setupapi.lib",
"strmiids.lib",
"winmm.lib",
"wmcodecdspuuid.lib",
]
}
......
......@@ -58,7 +58,6 @@
#include <Audioclient.h>
#include <MMDeviceAPI.h>
#include <dmo.h>
#include <endpointvolume.h>
#include <stddef.h>
#include <stdint.h>
......@@ -94,12 +93,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
public:
// The ctor takes all the usual parameters, plus |manager| which is the
// the audio manager who is creating this object.
WASAPIAudioInputStream(
AudioManagerWin* manager,
const AudioParameters& params,
const std::string& device_id,
const AudioManager::LogCallback& log_callback,
AudioManagerBase::VoiceProcessingMode voice_processing_mode);
WASAPIAudioInputStream(AudioManagerWin* manager,
const AudioParameters& params,
const std::string& device_id,
const AudioManager::LogCallback& log_callback);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
......@@ -119,27 +116,16 @@ class MEDIA_EXPORT WASAPIAudioInputStream
bool started() const { return started_; }
private:
// DelegateSimpleThread::Delegate implementation. Calls either
// RunWithAudioCaptureClient() or RunWithDmo().
// DelegateSimpleThread::Delegate implementation.
void Run() override;
// Waits for an event that the audio capture client has data ready.
bool RunWithAudioCaptureClient();
// Polls the DMO (voice processing component) for data every 5 ms.
bool RunWithDmo();
// Pulls capture data from the audio capture client and pushes it to the sink.
// Pulls capture data from the endpoint device and pushes it to the sink.
void PullCaptureDataAndPushToSink();
// Pulls capture data from the DMO and pushes it to the sink.
void PullDmoCaptureDataAndPushToSink();
// Issues the OnError() callback to the |sink_|.
void HandleError(HRESULT err);
// The Open() method is divided into these sub methods when not using the
// voice processing DMO.
// The Open() method is divided into these sub methods.
HRESULT SetCaptureDevice();
HRESULT GetAudioEngineStreamFormat();
// Returns whether the desired format is supported or not and writes the
......@@ -155,15 +141,6 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// the format.
void MaybeReportFormatRelatedInitError(HRESULT hr) const;
// The Open() method is divided into these sub methods when using the voice
// processing DMO. In addition, SetupConverterAndStoreFormatInfo() above is
// also called.
bool InitializeDmo();
bool SetDmoProperties();
bool SetDmoFormat();
bool SetDmoDevices(IPropertyStore* ps);
bool CreateDummyRenderClientsForDmo();
// AudioConverter::InputCallback implementation.
double ProvideInput(AudioBus* audio_bus, uint32_t frames_delayed) override;
......@@ -215,10 +192,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
WAVEFORMATEX output_format_;
// Contains the audio format we get data from the audio engine in. Set to
// |output_format_| at construction and might be changed to a close match
// if the audio engine doesn't support the originally set format, or to the
// format the voice capture DMO outputs if it's used. Note that this is also
// the format after the fifo, i.e. the input format to the converter if any.
// |output_format_| at construction and might be changed to a close match if
// the audio engine doesn't support the originally set format. Note that this
// is also the format after the fifo, i.e. the input format to the converter
// if any.
WAVEFORMATEX input_format_;
bool opened_ = false;
......@@ -237,8 +214,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// converter.
size_t packet_size_bytes_ = 0;
// Length of the audio endpoint buffer, or the buffer size used for the DMO.
// That is, the buffer size before the fifo.
// Length of the audio endpoint buffer, i.e. the buffer size before the fifo.
uint32_t endpoint_buffer_size_frames_ = 0;
// Contains the unique name of the selected endpoint device.
......@@ -246,10 +222,6 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// device role and is not a valid ID as such.
std::string device_id_;
// Contains the unique name of the output device from which to cancel echo, in
// case voice processing is enabled, i.e. |use_voice_processing_| is true.
std::string output_device_id_for_aec_;
// Pointer to the object that will receive the recorded audio samples.
AudioInputCallback* sink_ = nullptr;
......@@ -325,22 +297,6 @@ class MEDIA_EXPORT WASAPIAudioInputStream
UINT64 total_lost_frames_ = 0;
UINT64 largest_glitch_frames_ = 0;
// Indicates if the voice processing DMO should be used.
bool use_voice_processing_ = false;
// The voice processing DMO and its data buffer.
Microsoft::WRL::ComPtr<IMediaObject> voice_capture_dmo_;
Microsoft::WRL::ComPtr<IMediaBuffer> media_buffer_;
// Dummy rendering when using the DMO. The DMO requires audio rendering to the
// device it's set up to use, otherwise it won't produce any capture audio
// data. Normally, when the DMO is used there's a render stream, but it's not
// guaranteed so we need to support the lack of it. We do this by always
// opening a render client and rendering silence to it when the DMO is
// running.
Microsoft::WRL::ComPtr<IAudioClient> audio_client_for_render_;
Microsoft::WRL::ComPtr<IAudioRenderClient> audio_render_client_;
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
......
......@@ -170,16 +170,11 @@ static bool HasCoreAudioAndInputDevices(AudioManager* audio_man) {
// also allows the user to modify the default settings.
class AudioInputStreamWrapper {
public:
explicit AudioInputStreamWrapper(AudioManager* audio_manager,
bool use_voice_processing)
explicit AudioInputStreamWrapper(AudioManager* audio_manager)
: audio_man_(audio_manager) {
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
AudioDeviceDescription::kDefaultDeviceId, false, &default_params_)));
EXPECT_EQ(format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
if (use_voice_processing) {
default_params_.set_effects(default_params_.effects() |
AudioParameters::ECHO_CANCELLER);
}
frames_per_buffer_ = default_params_.frames_per_buffer();
}
......@@ -227,9 +222,8 @@ class AudioInputStreamWrapper {
// Convenience method which creates a default AudioInputStream object.
static AudioInputStream* CreateDefaultAudioInputStream(
AudioManager* audio_manager,
bool use_voice_processing) {
AudioInputStreamWrapper aisw(audio_manager, use_voice_processing);
AudioManager* audio_manager) {
AudioInputStreamWrapper aisw(audio_manager);
AudioInputStream* ais = aisw.Create();
return ais;
}
......@@ -264,9 +258,7 @@ class ScopedAudioInputStream {
DISALLOW_COPY_AND_ASSIGN(ScopedAudioInputStream);
};
// The test class. The boolean parameter specifies if voice processing should be
// used.
class WinAudioInputTest : public ::testing::TestWithParam<bool> {
class WinAudioInputTest : public ::testing::Test {
public:
WinAudioInputTest() {
audio_manager_ =
......@@ -311,16 +303,15 @@ TEST_F(WinAudioInputTest, WASAPIAudioInputStreamEffects) {
media::AudioDeviceDescriptions device_descriptions;
device_info_accessor.GetAudioInputDeviceDescriptions(&device_descriptions);
// All devices in the device description list should have the experimental
// echo canceller capability.
// No device should have any effects.
for (const auto& device : device_descriptions) {
AudioParameters params =
device_info_accessor.GetInputStreamParameters(device.unique_id);
EXPECT_EQ(params.effects(), AudioParameters::EXPERIMENTAL_ECHO_CANCELLER);
EXPECT_EQ(params.effects(), AudioParameters::NO_EFFECTS);
}
// The two loopback devices are not included in the device description list
// above. They should have no effects.
// above. They should also have no effects.
AudioParameters params = device_info_accessor.GetInputStreamParameters(
AudioDeviceDescription::kLoopbackInputDeviceId);
EXPECT_EQ(params.effects(), AudioParameters::NO_EFFECTS);
......@@ -331,27 +322,27 @@ TEST_F(WinAudioInputTest, WASAPIAudioInputStreamEffects) {
}
// Test Create(), Close() calling sequence.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager_.get(), GetParam()));
CreateDefaultAudioInputStream(audio_manager_.get()));
ais.Close();
}
// Test Open(), Close() calling sequence.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager_.get(), GetParam()));
CreateDefaultAudioInputStream(audio_manager_.get()));
EXPECT_TRUE(ais->Open());
ais.Close();
}
// Test Open(), Start(), Close() calling sequence.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager_.get(), GetParam()));
CreateDefaultAudioInputStream(audio_manager_.get()));
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
......@@ -359,10 +350,10 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
}
// Test Open(), Start(), Stop(), Close() calling sequence.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager_.get(), GetParam()));
CreateDefaultAudioInputStream(audio_manager_.get()));
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
......@@ -371,10 +362,10 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
}
// Test some additional calling sequences.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager_.get(), GetParam()));
CreateDefaultAudioInputStream(audio_manager_.get()));
// Open(), Open() should fail the second time.
EXPECT_TRUE(ais->Open());
......@@ -396,7 +387,7 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
ais.Close();
}
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
int count = 0;
......@@ -405,7 +396,7 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// Create default WASAPI input stream which records in stereo using
// the shared mixing rate. The default buffer size is 10ms.
AudioInputStreamWrapper aisw(audio_manager_.get(), GetParam());
AudioInputStreamWrapper aisw(audio_manager_.get());
ScopedAudioInputStream ais(aisw.Create());
EXPECT_TRUE(ais->Open());
......@@ -479,7 +470,7 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
}
// Test that we can capture a stream in loopback.
TEST_P(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
TEST_F(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
AudioDeviceInfoAccessorForTests device_info_accessor(audio_manager_.get());
ABORT_AUDIO_TEST_IF_NOT(device_info_accessor.HasAudioOutputDevices() &&
CoreAudioUtil::IsSupported());
......@@ -513,7 +504,7 @@ TEST_P(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
// To include disabled tests in test execution, just invoke the test program
// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
// environment variable to a value greater than 0.
TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
TEST_F(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
// Name of the output PCM file containing captured data. The output file
......@@ -521,7 +512,7 @@ TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
// Example of full name: \src\build\Debug\out_stereo_10sec.pcm.
const char* file_name = "out_10sec.pcm";
AudioInputStreamWrapper aisw(audio_manager_.get(), GetParam());
AudioInputStreamWrapper aisw(audio_manager_.get());
ScopedAudioInputStream ais(aisw.Create());
ASSERT_TRUE(ais->Open());
......@@ -535,7 +526,7 @@ TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
ais.Close();
}
TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamResampleToFile) {
TEST_F(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamResampleToFile) {
ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
// This is basically the same test as WASAPIAudioInputStreamRecordToFile
......@@ -570,8 +561,6 @@ TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamResampleToFile) {
// Otherwise (e.g. 44.1kHz, 22.05kHz etc) we convert to 48kHz.
const int hw_sample_rate = params.sample_rate();
params.Reset(params.format(), test.layout, test.rate, test.frames);
if (GetParam())
params.set_effects(params.effects() | AudioParameters::ECHO_CANCELLER);
std::string file_name(base::StringPrintf(
"resampled_10sec_%i_to_%i_%s.pcm", hw_sample_rate, params.sample_rate(),
......@@ -595,8 +584,4 @@ TEST_P(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamResampleToFile) {
}
}
INSTANTIATE_TEST_CASE_P(/* Intentially left empty */,
WinAudioInputTest,
::testing::Bool());
} // namespace media
......@@ -23,7 +23,6 @@
#include "base/strings/string_number_conversions.h"
#include "base/win/windows_version.h"
#include "media/audio/audio_device_description.h"
#include "media/audio/audio_features.h"
#include "media/audio/audio_io.h"
#include "media/audio/win/audio_device_listener_win.h"
#include "media/audio/win/audio_low_latency_input_win.h"
......@@ -187,12 +186,6 @@ AudioParameters AudioManagerWin::GetInputStreamParameters(
if (user_buffer_size)
parameters.set_frames_per_buffer(user_buffer_size);
if (device_id != AudioDeviceDescription::kLoopbackInputDeviceId &&
device_id != AudioDeviceDescription::kLoopbackWithMuteDeviceId) {
parameters.set_effects(parameters.effects() |
AudioParameters::EXPERIMENTAL_ECHO_CANCELLER);
}
return parameters;
}
......@@ -263,14 +256,7 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
const LogCallback& log_callback) {
// Used for both AUDIO_PCM_LOW_LATENCY and AUDIO_PCM_LINEAR.
DVLOG(1) << "MakeLowLatencyInputStream: " << device_id;
VoiceProcessingMode voice_processing_mode =
params.effects() & AudioParameters::ECHO_CANCELLER
? VoiceProcessingMode::kEnabled
: VoiceProcessingMode::kDisabled;
return new WASAPIAudioInputStream(this, params, device_id, log_callback,
voice_processing_mode);
return new WASAPIAudioInputStream(this, params, device_id, log_callback);
}
std::string AudioManagerWin::GetDefaultInputDeviceID() {
......
......@@ -902,71 +902,4 @@ bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
return true;
}
HRESULT CoreAudioUtil::GetDeviceCollectionIndex(const std::string& device_id,
EDataFlow data_flow,
WORD* index) {
ComPtr<IMMDeviceEnumerator> enumerator = CreateDeviceEnumerator();
if (!enumerator.Get()) {
DLOG(ERROR) << "Failed to create device enumerator.";
return E_FAIL;
}
ComPtr<IMMDeviceCollection> device_collection;
HRESULT hr = enumerator->EnumAudioEndpoints(data_flow, DEVICE_STATE_ACTIVE,
&device_collection);
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to get device collection.";
return hr;
}
UINT number_of_devices = 0;
hr = device_collection->GetCount(&number_of_devices);
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to get device collection count.";
return hr;
}
ComPtr<IMMDevice> device;
for (WORD i = 0; i < number_of_devices; ++i) {
hr = device_collection->Item(i, &device);
if (FAILED(hr)) {
DLOG(WARNING) << "Failed to get device.";
continue;
}
ScopedCoMem<WCHAR> current_device_id;
hr = device->GetId(&current_device_id);
if (FAILED(hr)) {
DLOG(WARNING) << "Failed to get device id.";
continue;
}
if (base::UTF16ToUTF8(current_device_id.get()) == device_id) {
*index = i;
return S_OK;
}
}
DVLOG(1) << "No matching device found.";
return S_FALSE;
}
HRESULT CoreAudioUtil::SetBoolProperty(IPropertyStore* property_store,
REFPROPERTYKEY key,
VARIANT_BOOL value) {
base::win::ScopedPropVariant pv;
PROPVARIANT* pv_ptr = pv.Receive();
pv_ptr->vt = VT_BOOL;
pv_ptr->boolVal = value;
return property_store->SetValue(key, pv.get());
}
HRESULT CoreAudioUtil::SetVtI4Property(IPropertyStore* property_store,
REFPROPERTYKEY key,
LONG value) {
base::win::ScopedPropVariant pv;
PROPVARIANT* pv_ptr = pv.Receive();
pv_ptr->vt = VT_I4;
pv_ptr->lVal = value;
return property_store->SetValue(key, pv.get());
}
} // namespace media
......@@ -207,23 +207,6 @@ class MEDIA_EXPORT CoreAudioUtil {
static bool FillRenderEndpointBufferWithSilence(
IAudioClient* client, IAudioRenderClient* render_client);
// Gets the device collection index for the device specified by |device_id|.
// If the device is found in the device collection, the index is written to
// |*index| and S_OK is returned. If the device is not found, S_FALSE is
// returned and |*index| is left unchanged. In case of an error, the error
// result is returned and |*index| is left unchanged.
static HRESULT GetDeviceCollectionIndex(const std::string& device_id,
EDataFlow data_flow,
WORD* index);
// Sets the property identified by |key| to |value| in |*property_store|.
static HRESULT SetBoolProperty(IPropertyStore* property_store,
REFPROPERTYKEY key,
VARIANT_BOOL value);
static HRESULT SetVtI4Property(IPropertyStore* property_store,
REFPROPERTYKEY key,
LONG value);
private:
CoreAudioUtil() {}
~CoreAudioUtil() {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment