Commit af041ca4 authored by Xiangjun Zhang's avatar Xiangjun Zhang Committed by Commit Bot

Mirroring Service: Add audio capturing client implementation.

Add the audio mirroring client side code for Mirroring Service.

Bug: 734672
Change-Id: I2a503fb5a059861a9cc4aa190a80134876eade4b
Reviewed-on: https://chromium-review.googlesource.com/1083626
Commit-Queue: Xiangjun Zhang <xjz@chromium.org>
Reviewed-by: default avatarYuri Wiitala <miu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#568310}
parent 499278c4
......@@ -25,6 +25,8 @@ source_set("interface") {
source_set("service") {
sources = [
"captured_audio_input.cc",
"captured_audio_input.h",
"message_dispatcher.cc",
"message_dispatcher.h",
"mirror_settings.cc",
......@@ -64,6 +66,7 @@ source_set("service") {
"//media/cast:net",
"//media/cast:sender",
"//media/mojo/common:common",
"//media/mojo/interfaces",
"//media/mojo/interfaces:remoting",
"//mojo/public/cpp/bindings",
"//mojo/public/cpp/system",
......@@ -77,6 +80,7 @@ source_set("service") {
source_set("unittests") {
testonly = true
sources = [
"captured_audio_input_unittest.cc",
"fake_network_service.cc",
"fake_network_service.h",
"fake_video_capture_host.cc",
......@@ -104,6 +108,7 @@ source_set("unittests") {
"//media/cast:sender",
"//media/cast:test_support",
"//media/cast:test_support",
"//media/mojo/interfaces",
"//media/mojo/interfaces:remoting",
"//mojo/public/cpp/bindings",
"//net",
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/mirroring/service/captured_audio_input.h"
#include "base/logging.h"
#include "mojo/public/cpp/system/platform_handle.h"
namespace mirroring {
CapturedAudioInput::CapturedAudioInput(StreamCreatorCallback callback)
: stream_creator_callback_(std::move(callback)),
stream_client_binding_(this) {
DETACH_FROM_SEQUENCE(sequence_checker_);
DCHECK(!stream_creator_callback_.is_null());
}
CapturedAudioInput::~CapturedAudioInput() {}
void CapturedAudioInput::CreateStream(media::AudioInputIPCDelegate* delegate,
const media::AudioParameters& params,
bool automatic_gain_control,
uint32_t total_segments) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!automatic_gain_control); // Invalid to be true for screen capture.
DCHECK(delegate);
DCHECK(!delegate_);
delegate_ = delegate;
stream_creator_callback_.Run(this, params, total_segments);
}
void CapturedAudioInput::RecordStream() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(stream_.is_bound());
stream_->Record();
}
void CapturedAudioInput::SetVolume(double volume) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(stream_.is_bound());
stream_->SetVolume(volume);
}
void CapturedAudioInput::CloseStream() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
delegate_ = nullptr;
if (stream_client_binding_.is_bound())
stream_client_binding_.Unbind();
stream_.reset();
}
void CapturedAudioInput::SetOutputDeviceForAec(
const std::string& output_device_id) {
NOTREACHED();
}
void CapturedAudioInput::StreamCreated(
media::mojom::AudioInputStreamPtr stream,
media::mojom::AudioInputStreamClientRequest client_request,
media::mojom::ReadOnlyAudioDataPipePtr data_pipe,
bool initially_muted) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(delegate_);
DCHECK(!stream_);
DCHECK(!stream_client_binding_.is_bound());
stream_ = std::move(stream);
stream_client_binding_.Bind(std::move(client_request));
base::PlatformFile socket_handle;
auto result =
mojo::UnwrapPlatformFile(std::move(data_pipe->socket), &socket_handle);
DCHECK_EQ(result, MOJO_RESULT_OK);
base::ReadOnlySharedMemoryRegion& shared_memory_region =
data_pipe->shared_memory;
DCHECK(shared_memory_region.IsValid());
delegate_->OnStreamCreated(std::move(shared_memory_region), socket_handle,
initially_muted);
}
void CapturedAudioInput::OnError() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(delegate_);
delegate_->OnError();
}
void CapturedAudioInput::OnMutedStateChanged(bool is_muted) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(delegate_);
delegate_->OnMuted(is_muted);
}
} // namespace mirroring
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_MIRRORING_SERVICE_CAPTURED_AUDIO_INPUT_H_
#define COMPONENTS_MIRRORING_SERVICE_CAPTURED_AUDIO_INPUT_H_
#include "base/callback.h"
#include "base/macros.h"
#include "base/sequence_checker.h"
#include "components/mirroring/service/interface.h"
#include "media/audio/audio_input_ipc.h"
#include "media/mojo/interfaces/audio_input_stream.mojom.h"
#include "mojo/public/cpp/bindings/binding.h"
namespace mirroring {
// CapturedAudioInput handles the creation, initialization and control of an
// audio input stream created by Audio Service.
class CapturedAudioInput final : public media::AudioInputIPC,
public AudioStreamCreatorClient,
public media::mojom::AudioInputStreamClient {
public:
using StreamCreatorCallback =
base::RepeatingCallback<void(AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t total_segments)>;
explicit CapturedAudioInput(StreamCreatorCallback callback);
~CapturedAudioInput() override;
private:
// media::AudioInputIPC implementation.
void CreateStream(media::AudioInputIPCDelegate* delegate,
const media::AudioParameters& params,
bool automatic_gain_control,
uint32_t total_segments) override;
void RecordStream() override;
void SetVolume(double volume) override;
void CloseStream() override;
void SetOutputDeviceForAec(const std::string& output_device_id) override;
// AudioStreamCreatorClient implementation
void StreamCreated(media::mojom::AudioInputStreamPtr stream,
media::mojom::AudioInputStreamClientRequest client_request,
media::mojom::ReadOnlyAudioDataPipePtr data_pipe,
bool initially_muted) override;
// media::mojom::AudioInputStreamClient implementation.
void OnError() override;
void OnMutedStateChanged(bool is_muted) override;
SEQUENCE_CHECKER(sequence_checker_);
const StreamCreatorCallback stream_creator_callback_;
mojo::Binding<media::mojom::AudioInputStreamClient> stream_client_binding_;
media::AudioInputIPCDelegate* delegate_ = nullptr;
media::mojom::AudioInputStreamPtr stream_;
DISALLOW_COPY_AND_ASSIGN(CapturedAudioInput);
};
} // namespace mirroring
#endif // COMPONENTS_MIRRORING_SERVICE_CAPTURED_AUDIO_INPUT_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/mirroring/service/captured_audio_input.h"
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "media/base/audio_parameters.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
#include "mojo/public/cpp/system/buffer.h"
#include "mojo/public/cpp/system/platform_handle.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::InvokeWithoutArgs;
namespace mirroring {
namespace {
class MockStream final : public media::mojom::AudioInputStream {
public:
MOCK_METHOD0(Record, void());
MOCK_METHOD1(SetVolume, void(double));
};
class MockDelegate final : public media::AudioInputIPCDelegate {
public:
MockDelegate() {}
~MockDelegate() override {}
MOCK_METHOD1(StreamCreated, void(bool initially_muted));
MOCK_METHOD0(OnError, void());
MOCK_METHOD1(OnMuted, void(bool muted));
MOCK_METHOD0(OnIPCClosed, void());
void OnStreamCreated(base::ReadOnlySharedMemoryRegion shared_memory_region,
base::SyncSocket::Handle socket_handle,
bool initially_muted) override {
StreamCreated(initially_muted);
}
};
} // namespace
class CapturedAudioInputTest : public ::testing::Test {
public:
CapturedAudioInputTest() {}
~CapturedAudioInputTest() override {
scoped_task_environment_.RunUntilIdle();
}
void CreateMockStream(bool initially_muted,
AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t total_segments) {
EXPECT_EQ(base::SyncSocket::kInvalidHandle, socket_.handle());
EXPECT_FALSE(stream_);
media::mojom::AudioInputStreamPtr stream_ptr;
auto input_stream = std::make_unique<MockStream>();
stream_ = input_stream.get();
mojo::MakeStrongBinding(std::move(input_stream),
mojo::MakeRequest(&stream_ptr));
base::CancelableSyncSocket foreign_socket;
EXPECT_TRUE(
base::CancelableSyncSocket::CreatePair(&socket_, &foreign_socket));
client->StreamCreated(
std::move(stream_ptr), mojo::MakeRequest(&stream_client_),
{base::in_place, base::ReadOnlySharedMemoryRegion::Create(1024).region,
mojo::WrapPlatformFile(foreign_socket.Release())},
initially_muted);
}
protected:
void CreateStream(bool initially_muted) {
audio_input_ = std::make_unique<CapturedAudioInput>(
base::BindRepeating(&CapturedAudioInputTest::CreateMockStream,
base::Unretained(this), initially_muted));
base::RunLoop run_loop;
EXPECT_CALL(delegate_, StreamCreated(initially_muted))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
audio_input_->CreateStream(&delegate_, media::AudioParameters(), false, 10);
run_loop.Run();
}
void CloseStream() {
EXPECT_TRUE(audio_input_);
audio_input_->CloseStream();
scoped_task_environment_.RunUntilIdle();
socket_.Close();
audio_input_.reset();
stream_ = nullptr;
}
void SignalStreamError() {
EXPECT_TRUE(stream_client_.is_bound());
base::RunLoop run_loop;
EXPECT_CALL(delegate_, OnError())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
stream_client_->OnError();
run_loop.Run();
}
void SignalMutedStateChanged(bool is_muted) {
EXPECT_TRUE(stream_client_.is_bound());
base::RunLoop run_loop;
EXPECT_CALL(delegate_, OnMuted(true))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
stream_client_->OnMutedStateChanged(is_muted);
run_loop.Run();
}
void SetVolume(double volume) {
EXPECT_TRUE(audio_input_);
base::RunLoop run_loop;
EXPECT_CALL(*stream_, SetVolume(volume))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
audio_input_->SetVolume(volume);
run_loop.Run();
}
void Record() {
EXPECT_TRUE(audio_input_);
base::RunLoop run_loop;
EXPECT_CALL(*stream_, Record())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
audio_input_->RecordStream();
run_loop.Run();
}
private:
base::test::ScopedTaskEnvironment scoped_task_environment_;
std::unique_ptr<media::AudioInputIPC> audio_input_;
MockDelegate delegate_;
MockStream* stream_ = nullptr;
media::mojom::AudioInputStreamClientPtr stream_client_;
base::CancelableSyncSocket socket_;
DISALLOW_COPY_AND_ASSIGN(CapturedAudioInputTest);
};
TEST_F(CapturedAudioInputTest, CreateStream) {
// Test that the initial muted state can be propagated to |delegate_|.
CreateStream(false);
CloseStream();
CreateStream(true);
CloseStream();
}
TEST_F(CapturedAudioInputTest, PropagatesStreamError) {
CreateStream(false);
SignalStreamError();
CloseStream();
}
TEST_F(CapturedAudioInputTest, PropagatesMutedStateChange) {
CreateStream(false);
SignalMutedStateChanged(true);
CloseStream();
}
TEST_F(CapturedAudioInputTest, SetVolume) {
CreateStream(false);
SetVolume(0.8);
CloseStream();
}
TEST_F(CapturedAudioInputTest, Record) {
CreateStream(false);
Record();
CloseStream();
}
} // namespace mirroring
......@@ -7,7 +7,10 @@
#include <string>
#include "media/base/audio_parameters.h"
#include "media/capture/mojom/video_capture.mojom.h"
#include "media/mojo/interfaces/audio_data_pipe.mojom.h"
#include "media/mojo/interfaces/audio_input_stream.mojom.h"
#include "net/base/ip_address.h"
#include "services/network/public/mojom/network_service.mojom.h"
......@@ -78,6 +81,19 @@ class SessionObserver {
virtual void DidStop() = 0;
};
class AudioStreamCreatorClient {
public:
virtual ~AudioStreamCreatorClient() {}
// Called by ResourceProvider when an audio input stream is created as
// requested.
virtual void StreamCreated(
media::mojom::AudioInputStreamPtr stream,
media::mojom::AudioInputStreamClientRequest client_request,
media::mojom::ReadOnlyAudioDataPipePtr data_pipe,
bool initially_muted) = 0;
};
class ResourceProvider {
public:
virtual ~ResourceProvider() {}
......@@ -86,7 +102,9 @@ class ResourceProvider {
media::mojom::VideoCaptureHostRequest request) = 0;
virtual void GetNetworkContext(
network::mojom::NetworkContextRequest request) = 0;
// TODO(xjz): Add interface to get AudioCaptureHost.
virtual void CreateAudioStream(AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t total_segments) = 0;
// TODO(xjz): Add interface for HW encoder profiles query and VEA create
// support.
};
......
......@@ -6,6 +6,8 @@
#include <algorithm>
#include "media/base/audio_parameters.h"
using media::cast::FrameSenderConfig;
using media::cast::Codec;
using media::cast::RtpPayloadType;
......@@ -118,6 +120,14 @@ media::VideoCaptureParams MirrorSettings::GetVideoCaptureParams() {
return params;
}
media::AudioParameters MirrorSettings::GetAudioCaptureParams() {
media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, kAudioTimebase,
kAudioTimebase / 100);
DCHECK(params.IsValid());
return params;
}
base::Value MirrorSettings::ToDictionaryValue() {
base::Value settings(base::Value::Type::DICTIONARY);
settings.SetKey("maxWidth", base::Value(max_width_));
......
......@@ -10,6 +10,10 @@
#include "media/capture/video_capture_types.h"
#include "media/cast/cast_config.h"
namespace media {
class AudioParameters;
} // namespace media
namespace mirroring {
// Holds the default settings for a mirroring session. This class provides the
......@@ -37,6 +41,9 @@ class MirrorSettings {
// Get video capture constraints with the current settings.
media::VideoCaptureParams GetVideoCaptureParams();
// Get Audio capture constraints with the current settings.
media::AudioParameters GetAudioCaptureParams();
int max_width() const { return max_width_; }
int max_height() const { return max_height_; }
......
......@@ -37,14 +37,13 @@ VideoRtpStream::VideoRtpStream(
: video_sender_(std::move(video_sender)),
client_(client),
consecutive_refresh_count_(0),
expecting_a_refresh_frame_(false),
weak_factory_(this) {
expecting_a_refresh_frame_(false) {
DCHECK(video_sender_);
DCHECK(client);
refresh_timer_.Start(FROM_HERE, kRefreshInterval,
base::BindRepeating(&VideoRtpStream::OnRefreshTimerFired,
weak_factory_.GetWeakPtr()));
this->AsWeakPtr()));
}
VideoRtpStream::~VideoRtpStream() {}
......@@ -112,7 +111,7 @@ AudioRtpStream::AudioRtpStream(
AudioRtpStream::~AudioRtpStream() {}
void AudioRtpStream::InsertAudio(std::unique_ptr<media::AudioBus> audio_bus,
base::TimeTicks capture_time) {
const base::TimeTicks& capture_time) {
audio_sender_->InsertAudio(std::move(audio_bus), capture_time);
}
......
......@@ -61,7 +61,7 @@ class RtpStreamClient {
// regular intervals for a short period of time. This provides the video
// encoder, downstream, several copies of the last frame so that it may clear up
// lossy encoding artifacts.
class VideoRtpStream {
class VideoRtpStream : public base::SupportsWeakPtr<VideoRtpStream> {
public:
VideoRtpStream(std::unique_ptr<media::cast::VideoSender> video_sender,
base::WeakPtr<RtpStreamClient> client);
......@@ -71,10 +71,6 @@ class VideoRtpStream {
// |video_frame| is required to provide REFERENCE_TIME in the metadata.
void InsertVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
base::WeakPtr<VideoRtpStream> AsWeakPtr() {
return weak_factory_.GetWeakPtr();
}
void SetTargetPlayoutDelay(base::TimeDelta playout_delay);
private:
......@@ -94,15 +90,11 @@ class VideoRtpStream {
// cleared once the next frame is received.
bool expecting_a_refresh_frame_;
base::WeakPtrFactory<VideoRtpStream> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoRtpStream);
};
// Receives audio data and submits the data to media::cast::AudioSender.
// TODO(xjz): Complete implementation after Audio Service mirroring refactoring
// is landed.
class AudioRtpStream {
class AudioRtpStream : public base::SupportsWeakPtr<AudioRtpStream> {
public:
AudioRtpStream(std::unique_ptr<media::cast::AudioSender> audio_sender,
base::WeakPtr<RtpStreamClient> client);
......@@ -110,7 +102,7 @@ class AudioRtpStream {
// Called by AudioCaptureClient when new audio data is available.
void InsertAudio(std::unique_ptr<media::AudioBus> audio_bus,
base::TimeTicks estimated_capture_time);
const base::TimeTicks& estimated_capture_time);
void SetTargetPlayoutDelay(base::TimeDelta playout_delay);
......
......@@ -17,9 +17,13 @@
#include "base/time/time.h"
#include "base/values.h"
#include "build/build_config.h"
#include "components/mirroring/service/captured_audio_input.h"
#include "components/mirroring/service/udp_socket_client.h"
#include "components/mirroring/service/video_capture_client.h"
#include "crypto/random.h"
#include "media/audio/audio_input_device.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/bind_to_current_loop.h"
#include "media/cast/net/cast_transport.h"
#include "media/cast/sender/audio_sender.h"
#include "media/cast/sender/video_sender.h"
......@@ -217,6 +221,54 @@ void AddStreamObject(int stream_index,
} // namespace
class Session::AudioCapturingCallback final
: public media::AudioCapturerSource::CaptureCallback {
public:
using AudioDataCallback =
base::RepeatingCallback<void(std::unique_ptr<media::AudioBus> audio_bus,
const base::TimeTicks& recorded_time)>;
AudioCapturingCallback(AudioDataCallback audio_data_callback,
base::OnceClosure error_callback)
: audio_data_callback_(std::move(audio_data_callback)),
error_callback_(std::move(error_callback)) {
DCHECK(!audio_data_callback_.is_null());
}
~AudioCapturingCallback() override {}
private:
// media::AudioCapturerSource::CaptureCallback implementation.
void OnCaptureStarted() override {}
// Called on audio thread.
void Capture(const media::AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume,
bool key_pressed) override {
// TODO(xjz): Don't copy the audio data. Instead, send |audio_bus| directly
// to the encoder.
std::unique_ptr<media::AudioBus> captured_audio =
media::AudioBus::Create(audio_bus->channels(), audio_bus->frames());
audio_bus->CopyTo(captured_audio.get());
const base::TimeTicks recorded_time =
base::TimeTicks::Now() -
base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
audio_data_callback_.Run(std::move(captured_audio), recorded_time);
}
void OnCaptureError(const std::string& message) override {
if (!error_callback_.is_null())
std::move(error_callback_).Run();
}
void OnCaptureMuted(bool is_muted) override {}
const AudioDataCallback audio_data_callback_;
base::OnceClosure error_callback_;
DISALLOW_COPY_AND_ASSIGN(AudioCapturingCallback);
};
Session::Session(int32_t session_id,
const CastSinkInfo& sink_info,
const gfx::Size& max_resolution,
......@@ -281,6 +333,11 @@ void Session::StopSession() {
session_monitor_->StopStreamingSession();
session_monitor_.reset();
weak_factory_.InvalidateWeakPtrs();
if (audio_input_device_) {
audio_input_device_->Stop();
audio_input_device_ = nullptr;
}
audio_capturing_callback_.reset();
audio_encode_thread_ = nullptr;
video_encode_thread_ = nullptr;
video_capture_client_.reset();
......@@ -489,8 +546,22 @@ void Session::OnAnswer(const std::string& cast_mode,
cast_transport_.get());
audio_stream_ = std::make_unique<AudioRtpStream>(
std::move(audio_sender), weak_factory_.GetWeakPtr());
// TODO(xjz): Start audio capturing.
NOTIMPLEMENTED();
DCHECK(!audio_capturing_callback_);
// TODO(xjz): Elliminate the thread hops. The audio data is thread-hopped
// from the audio thread, and later thread-hopped again to the encoding
// thread.
audio_capturing_callback_ = std::make_unique<AudioCapturingCallback>(
media::BindToCurrentLoop(base::BindRepeating(
&AudioRtpStream::InsertAudio, audio_stream_->AsWeakPtr())),
base::BindOnce(&Session::ReportError, weak_factory_.GetWeakPtr(),
SessionError::AUDIO_CAPTURE_ERROR));
audio_input_device_ = new media::AudioInputDevice(
std::make_unique<CapturedAudioInput>(base::BindRepeating(
&Session::CreateAudioStream, base::Unretained(this))),
base::ThreadPriority::NORMAL);
audio_input_device_->Initialize(mirror_settings_.GetAudioCaptureParams(),
audio_capturing_callback_.get());
audio_input_device_->Start();
}
if (has_video) {
......@@ -533,6 +604,12 @@ void Session::OnResponseParsingError(const std::string& error_message) {
// TODO(xjz): Log the |error_message| in the mirroring logs.
}
void Session::CreateAudioStream(AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t shared_memory_count) {
resource_provider_->CreateAudioStream(client, params, shared_memory_count);
}
void Session::SetTargetPlayoutDelay(base::TimeDelta playout_delay) {
if (audio_stream_)
audio_stream_->SetTargetPlayoutDelay(playout_delay);
......
......@@ -19,6 +19,8 @@
namespace media {
class AudioInputDevice;
namespace cast {
class CastTransport;
} // namespace cast
......@@ -79,7 +81,15 @@ class Session final : public RtpStreamClient {
// responses.
void OnResponseParsingError(const std::string& error_message);
// Creates an audio input stream through Audio Service. |client| will be
// called after the stream is created.
void CreateAudioStream(AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t shared_memory_count);
private:
class AudioCapturingCallback;
void StopSession();
// Notify |observer_| that error occurred and close the session.
......@@ -118,6 +128,8 @@ class Session final : public RtpStreamClient {
std::unique_ptr<media::cast::CastTransport> cast_transport_;
scoped_refptr<base::SingleThreadTaskRunner> audio_encode_thread_ = nullptr;
scoped_refptr<base::SingleThreadTaskRunner> video_encode_thread_ = nullptr;
std::unique_ptr<AudioCapturingCallback> audio_capturing_callback_;
scoped_refptr<media::AudioInputDevice> audio_input_device_;
base::WeakPtrFactory<Session> weak_factory_;
};
......
......@@ -10,7 +10,7 @@
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
#include "base/values.h"
#include "components/mirroring/service/fake_network_service.h"
#include "components/mirroring/service/fake_video_capture_host.h"
......@@ -39,12 +39,11 @@ class SessionTest : public ResourceProvider,
public CastMessageChannel,
public ::testing::Test {
public:
SessionTest() : receiver_endpoint_(media::cast::test::GetFreeLocalPort()) {
testing_clock_.Advance(base::TimeTicks::Now() - base::TimeTicks());
}
SessionTest() : receiver_endpoint_(media::cast::test::GetFreeLocalPort()) {}
~SessionTest() override { scoped_task_environment_.RunUntilIdle(); }
protected:
// SessionObserver implemenation.
MOCK_METHOD1(OnError, void(SessionError));
MOCK_METHOD0(DidStart, void());
......@@ -53,6 +52,7 @@ class SessionTest : public ResourceProvider,
// ResourceProvider implemenation.
MOCK_METHOD0(OnGetVideoCaptureHost, void());
MOCK_METHOD0(OnGetNetworkContext, void());
MOCK_METHOD0(OnCreateAudioStream, void());
// Called when sends OFFER message.
MOCK_METHOD0(OnOffer, void());
......@@ -84,18 +84,37 @@ class SessionTest : public ResourceProvider,
OnGetNetworkContext();
}
void CreateAudioStream(AudioStreamCreatorClient* client,
const media::AudioParameters& params,
uint32_t total_segments) {
OnCreateAudioStream();
}
void SendAnswer() {
FrameSenderConfig config = MirrorSettings::GetDefaultVideoConfig(
media::cast::RtpPayloadType::VIDEO_VP8,
media::cast::Codec::CODEC_VIDEO_VP8);
ASSERT_TRUE(session_);
std::vector<FrameSenderConfig> audio_configs;
std::vector<FrameSenderConfig> video_configs;
video_configs.emplace_back(config);
if (sink_capability_ != DeviceCapability::VIDEO_ONLY) {
FrameSenderConfig audio_config = MirrorSettings::GetDefaultAudioConfig(
media::cast::RtpPayloadType::AUDIO_OPUS,
media::cast::Codec::CODEC_AUDIO_OPUS);
audio_configs.emplace_back(audio_config);
}
if (sink_capability_ != DeviceCapability::AUDIO_ONLY) {
FrameSenderConfig video_config = MirrorSettings::GetDefaultVideoConfig(
media::cast::RtpPayloadType::VIDEO_VP8,
media::cast::Codec::CODEC_VIDEO_VP8);
video_configs.emplace_back(video_config);
}
auto answer = std::make_unique<Answer>();
answer->udp_port = receiver_endpoint_.port();
answer->send_indexes.push_back(0);
answer->ssrcs.push_back(32);
answer->cast_mode = "mirroring";
const int number_of_configs = audio_configs.size() + video_configs.size();
for (int i = 0; i < number_of_configs; ++i) {
answer->send_indexes.push_back(i);
answer->ssrcs.push_back(31 + i); // Arbitrary receiver SSRCs.
}
ReceiverResponse response;
response.result = "ok";
......@@ -103,15 +122,14 @@ class SessionTest : public ResourceProvider,
response.sequence_number = offer_sequence_number_;
response.answer = std::move(answer);
session_->OnAnswer("mirroring", std::vector<FrameSenderConfig>(),
video_configs, response);
session_->OnAnswer("mirroring", audio_configs, video_configs, response);
}
protected:
void CreateSession() {
void CreateSession(DeviceCapability sink_capability) {
sink_capability_ = sink_capability;
CastSinkInfo sink_info;
sink_info.ip_address = receiver_endpoint_.address();
sink_info.capability = DeviceCapability::AUDIO_AND_VIDEO;
sink_info.capability = sink_capability_;
// Expect to receive OFFER message when session is created.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnGetNetworkContext()).Times(1);
......@@ -123,35 +141,36 @@ class SessionTest : public ResourceProvider,
run_loop.Run();
}
base::test::ScopedTaskEnvironment scoped_task_environment_;
const net::IPEndPoint receiver_endpoint_;
base::SimpleTestTickClock testing_clock_;
std::unique_ptr<Session> session_;
std::unique_ptr<FakeVideoCaptureHost> video_host_;
std::unique_ptr<MockNetworkContext> network_context_;
int32_t offer_sequence_number_ = -1;
private:
DISALLOW_COPY_AND_ASSIGN(SessionTest);
};
TEST_F(SessionTest, Mirroring) {
CreateSession();
scoped_task_environment_.RunUntilIdle();
{
void StartSession() {
// Except mirroing session starts after receiving ANSWER message.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(1);
const int num_to_get_video_host =
sink_capability_ == DeviceCapability::AUDIO_ONLY ? 0 : 1;
const int num_to_create_audio_stream =
sink_capability_ == DeviceCapability::VIDEO_ONLY ? 0 : 1;
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(num_to_get_video_host);
EXPECT_CALL(*this, OnCreateAudioStream()).Times(num_to_create_audio_stream);
EXPECT_CALL(*this, OnError(_)).Times(0);
EXPECT_CALL(*this, DidStart())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
SendAnswer();
run_loop.Run();
scoped_task_environment_.RunUntilIdle();
}
scoped_task_environment_.RunUntilIdle();
{
void StopSession() {
base::RunLoop run_loop;
if (video_host_)
EXPECT_CALL(*video_host_, OnStopped()).Times(1);
EXPECT_CALL(*this, DidStop())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_.reset();
run_loop.Run();
scoped_task_environment_.RunUntilIdle();
}
void SendVideoFrame() {
ASSERT_TRUE(video_host_);
base::RunLoop run_loop;
// Expect to send out some UDP packets.
EXPECT_CALL(*network_context_->udp_socket(), OnSend())
......@@ -159,38 +178,58 @@ TEST_F(SessionTest, Mirroring) {
EXPECT_CALL(*video_host_, ReleaseBuffer(_, _, _))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
// Send one video frame to the consumer.
video_host_->SendOneFrame(gfx::Size(64, 32), testing_clock_.NowTicks());
video_host_->SendOneFrame(gfx::Size(64, 32), base::TimeTicks::Now());
run_loop.Run();
scoped_task_environment_.RunUntilIdle();
}
scoped_task_environment_.RunUntilIdle();
// Stop the session.
{
void SignalAnswerTimeout() {
base::RunLoop run_loop;
EXPECT_CALL(*video_host_, OnStopped()).Times(1);
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(0);
EXPECT_CALL(*this, OnCreateAudioStream()).Times(0);
EXPECT_CALL(*this, OnError(ANSWER_TIME_OUT)).Times(1);
EXPECT_CALL(*this, DidStop())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_.reset();
session_->OnAnswer("mirroring", std::vector<FrameSenderConfig>(),
std::vector<FrameSenderConfig>(), ReceiverResponse());
run_loop.Run();
scoped_task_environment_.RunUntilIdle();
}
scoped_task_environment_.RunUntilIdle();
private:
base::test::ScopedTaskEnvironment scoped_task_environment_;
const net::IPEndPoint receiver_endpoint_;
std::unique_ptr<Session> session_;
std::unique_ptr<FakeVideoCaptureHost> video_host_;
std::unique_ptr<MockNetworkContext> network_context_;
DeviceCapability sink_capability_ = DeviceCapability::AUDIO_ONLY;
int32_t offer_sequence_number_ = -1;
DISALLOW_COPY_AND_ASSIGN(SessionTest);
};
TEST_F(SessionTest, StartAudioOnlyMirroring) {
CreateSession(DeviceCapability::AUDIO_ONLY);
StartSession();
StopSession();
}
TEST_F(SessionTest, StartAudioAndVideoMirroring) {
CreateSession(DeviceCapability::AUDIO_AND_VIDEO);
StartSession();
StopSession();
}
TEST_F(SessionTest, VideoMirroring) {
CreateSession(DeviceCapability::VIDEO_ONLY);
StartSession();
SendVideoFrame();
StopSession();
}
TEST_F(SessionTest, AnswerTimeout) {
CreateSession();
scoped_task_environment_.RunUntilIdle();
{
// Expect error.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(0);
EXPECT_CALL(*this, DidStop()).Times(1);
EXPECT_CALL(*this, OnError(ANSWER_TIME_OUT))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_->OnAnswer("mirroring", std::vector<FrameSenderConfig>(),
std::vector<FrameSenderConfig>(), ReceiverResponse());
run_loop.Run();
}
scoped_task_environment_.RunUntilIdle();
CreateSession(DeviceCapability::AUDIO_AND_VIDEO);
SignalAnswerTimeout();
}
} // namespace mirroring
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment