Commit 73f4320f authored by Xiangjun Zhang's avatar Xiangjun Zhang Committed by Commit Bot

MirroringService: Add OFFER/ANSWER handling and MirrorSettings.

Add implementation to create OFFER message and handle ANSWER response.
Add the MirrorSettings which hold a set of default settings.

Bug: 734672
Change-Id: Ia6b9af5ac66e1f8f9496c0afaa335a22f40bb535
Reviewed-on: https://chromium-review.googlesource.com/1017235
Commit-Queue: Xiangjun Zhang <xjz@chromium.org>
Reviewed-by: default avatarDavid Benjamin <davidben@chromium.org>
Reviewed-by: default avatarYuri Wiitala <miu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555837}
parent be7af108
......@@ -6,6 +6,7 @@ import("//testing/test.gni")
source_set("interface") {
sources = [
"interface.cc",
"interface.h",
]
......@@ -26,6 +27,8 @@ source_set("service") {
sources = [
"message_dispatcher.cc",
"message_dispatcher.h",
"mirror_settings.cc",
"mirror_settings.h",
"receiver_response.cc",
"receiver_response.h",
"rtp_stream.cc",
......@@ -46,6 +49,7 @@ source_set("service") {
deps = [
":interface",
"//crypto",
"//media",
"//media/capture/mojom:video_capture",
"//media/cast:common",
......
include_rules = [
"+crypto",
"+net",
"+services/network/public/mojom",
"+services/network/test",
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/mirroring/service/interface.h"
namespace mirroring {
CastSinkInfo::CastSinkInfo() {}
CastSinkInfo::~CastSinkInfo() {}
CastSinkInfo::CastSinkInfo(const CastSinkInfo& sink_info) = default;
} // namespace mirroring
......@@ -6,7 +6,6 @@
#define COMPONENTS_MIRRORING_SERVICE_INTERFACE_H_
#include <string>
#include <vector>
#include "base/callback.h"
#include "base/values.h"
......@@ -22,14 +21,22 @@ namespace mirroring {
// Errors occurred in a mirroring session.
enum SessionError {
SESSION_START_ERROR, // Error occurred while starting.
AUDIO_CAPTURE_ERROR, // Error occurred in audio capturing.
VIDEO_CAPTURE_ERROR, // Error occurred in video capturing.
CAST_STREAMING_ERROR, // Error occurred in cast streaming.
CAST_TRANSPORT_ERROR, // Error occurred in cast transport.
ANSWER_TIME_OUT, // ANSWER timeout.
ANSWER_NOT_OK, // Not OK answer response.
ANSWER_MISMATCHED_CAST_MODE, // ANSWER cast mode mismatched.
ANSWER_MISMATCHED_SSRC_LENGTH, // ANSWER ssrc length mismatched with indexes.
ANSWER_SELECT_MULTIPLE_AUDIO, // Multiple audio streams selected by ANSWER.
ANSWER_SELECT_MULTIPLE_VIDEO, // Multiple video streams selected by ANSWER.
ANSWER_SELECT_INVALID_INDEX, // Invalid index was selected.
ANSWER_NO_AUDIO_OR_VIDEO, // ANSWER not select audio or video.
AUDIO_CAPTURE_ERROR, // Error occurred in audio capturing.
VIDEO_CAPTURE_ERROR, // Error occurred in video capturing.
RTP_STREAM_ERROR, // Error reported by RtpStream.
ENCODING_ERROR, // Error occurred in encoding.
CAST_TRANSPORT_ERROR, // Error occurred in cast transport.
};
enum SessionType {
enum DeviceCapability {
AUDIO_ONLY,
VIDEO_ONLY,
AUDIO_AND_VIDEO,
......@@ -49,9 +56,20 @@ class CastMessageChannel {
virtual void Send(const CastMessage& message) = 0;
};
class SessionClient {
struct CastSinkInfo {
CastSinkInfo();
~CastSinkInfo();
CastSinkInfo(const CastSinkInfo& sink_info);
net::IPAddress ip_address;
std::string model_name;
std::string friendly_name;
DeviceCapability capability;
};
class SessionObserver {
public:
virtual ~SessionClient() {}
virtual ~SessionObserver() {}
// Called when error occurred. The session will be stopped.
virtual void OnError(SessionError error) = 0;
......@@ -61,25 +79,19 @@ class SessionClient {
// Called when the session is stopped.
virtual void DidStop() = 0;
};
class ResourceProvider {
public:
virtual ~ResourceProvider() {}
virtual void GetVideoCaptureHost(
media::mojom::VideoCaptureHostRequest request) = 0;
virtual void GetNetWorkContext(
virtual void GetNetworkContext(
network::mojom::NetworkContextRequest request) = 0;
// TODO(xjz): Add interface to get AudioCaptureHost.
// TODO(xjz): Add interface for HW encoder profiles query and VEA create
// support.
// TODO(xjz): Change this with an interface to send/receive messages to/from
// receiver through cast channel, and generate/parse the OFFER/ANSWER message
// in Mirroing service.
using GetAnswerCallback = base::OnceCallback<void(
const media::cast::FrameSenderConfig& audio_config,
const media::cast::FrameSenderConfig& video_config)>;
virtual void DoOfferAnswerExchange(
const std::vector<media::cast::FrameSenderConfig>& audio_configs,
const std::vector<media::cast::FrameSenderConfig>& video_configs,
GetAnswerCallback callback) = 0;
};
} // namespace mirroring
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/mirroring/service/mirror_settings.h"
#include <algorithm>
using media::cast::FrameSenderConfig;
using media::cast::Codec;
using media::cast::RtpPayloadType;
namespace mirroring {
namespace {
// Starting end-to-end latency for animated content.
constexpr base::TimeDelta kAnimatedPlayoutDelay =
base::TimeDelta::FromMilliseconds(400);
// Minimum end-to-end latency. This allows cast streaming to adaptively lower
// latency in interactive streaming scenarios.
// TODO(miu): This was 120 before stable launch, but we got user feedback that
// this was causing audio drop-outs. So, we need to fix the Cast Streaming
// implementation before lowering this setting.
constexpr base::TimeDelta kMinPlayoutDelay =
base::TimeDelta::FromMilliseconds(400);
// Maximum end-to-end latency.
constexpr base::TimeDelta kMaxPlayoutDelay =
base::TimeDelta::FromMilliseconds(800);
constexpr int kAudioTimebase = 48000;
constexpr int kVidoTimebase = 90000;
constexpr int kAudioChannels = 2;
constexpr int kAudioFramerate = 100; // 100 FPS for 10ms packets.
constexpr int kMinVideoBitrate = 300000;
constexpr int kMaxVideoBitrate = 5000000;
constexpr int kAudioBitrate = 0; // 0 means automatic.
constexpr int kMaxFrameRate = 30; // The maximum frame rate for captures.
constexpr int kMaxWidth = 1920; // Maximum video width in pixels.
constexpr int kMaxHeight = 1080; // Maximum video height in pixels.
constexpr int kMinWidth = 180; // Minimum video frame width in pixels.
constexpr int kMinHeight = 180; // Minimum video frame height in pixels.
} // namespace
MirrorSettings::MirrorSettings()
: min_width_(kMinWidth),
min_height_(kMinHeight),
max_width_(kMaxWidth),
max_height_(kMaxHeight) {}
MirrorSettings::~MirrorSettings() {}
// static
FrameSenderConfig MirrorSettings::GetDefaultAudioConfig(
RtpPayloadType payload_type,
Codec codec) {
FrameSenderConfig config;
config.sender_ssrc = 1;
config.receiver_ssrc = 2;
config.min_playout_delay = kMinPlayoutDelay;
config.max_playout_delay = kMaxPlayoutDelay;
config.animated_playout_delay = kAnimatedPlayoutDelay;
config.rtp_payload_type = payload_type;
config.rtp_timebase = kAudioTimebase;
config.channels = kAudioChannels;
config.min_bitrate = config.max_bitrate = config.start_bitrate =
kAudioBitrate;
config.max_frame_rate = kAudioFramerate; // 10 ms audio frames
config.codec = codec;
return config;
}
// static
FrameSenderConfig MirrorSettings::GetDefaultVideoConfig(
RtpPayloadType payload_type,
Codec codec) {
FrameSenderConfig config;
config.sender_ssrc = 11;
config.receiver_ssrc = 12;
config.min_playout_delay = kMinPlayoutDelay;
config.max_playout_delay = kMaxPlayoutDelay;
config.animated_playout_delay = kAnimatedPlayoutDelay;
config.rtp_payload_type = payload_type;
config.rtp_timebase = kVidoTimebase;
config.channels = 1;
config.min_bitrate = kMinVideoBitrate;
config.max_bitrate = kMaxVideoBitrate;
config.start_bitrate = kMinVideoBitrate;
config.max_frame_rate = kMaxFrameRate;
config.codec = codec;
return config;
}
void MirrorSettings::SetResolutionContraints(int max_width, int max_height) {
max_width_ = std::max(max_width, min_width_);
max_height_ = std::max(max_height, min_height_);
}
} // namespace mirroring
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_MIRRORING_SERVICE_MIRROR_SETTINGS_H_
#define COMPONENTS_MIRRORING_SERVICE_MIRROR_SETTINGS_H_
#include "base/time/time.h"
#include "media/cast/cast_config.h"
namespace mirroring {
// Holds the default settings for a mirroring session. This class provides the
// audio/video configs that this sender supports. And also provides the
// audio/video constraints used for capturing.
// TODO(xjz): Add the function to generate the audio/video contraints for
// capturing.
// TODO(xjz): Add setters to the settings that might be overriden by integration
// tests.
class MirrorSettings {
public:
MirrorSettings();
~MirrorSettings();
// Get the audio/video config with given codec.
static media::cast::FrameSenderConfig GetDefaultAudioConfig(
media::cast::RtpPayloadType payload_type,
media::cast::Codec codec);
static media::cast::FrameSenderConfig GetDefaultVideoConfig(
media::cast::RtpPayloadType payload_type,
media::cast::Codec codec);
// Call to override the default resolution settings.
void SetResolutionContraints(int max_width, int max_height);
int max_width() const { return max_width_; }
int max_height() const { return max_height_; }
private:
const int min_width_;
const int min_height_;
int max_width_;
int max_height_;
DISALLOW_COPY_AND_ASSIGN(MirrorSettings);
};
} // namespace mirroring
#endif // COMPONENTS_MIRRORING_SERVICE_MIRROR_SETTINGS_H_
......@@ -8,7 +8,6 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/values.h"
#include "build/build_config.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/sender/audio_sender.h"
......@@ -30,81 +29,6 @@ constexpr base::TimeDelta kRefreshInterval =
// limit (60 * 250ms = 15 seconds), refresh frame requests will stop being made.
constexpr int kMaxConsecutiveRefreshFrames = 60;
FrameSenderConfig DefaultOpusConfig() {
FrameSenderConfig config;
config.rtp_payload_type = RtpPayloadType::AUDIO_OPUS;
config.sender_ssrc = 1;
config.receiver_ssrc = 2;
config.rtp_timebase = media::cast::kDefaultAudioSamplingRate;
config.channels = 2;
config.min_bitrate = config.max_bitrate = config.start_bitrate =
media::cast::kDefaultAudioEncoderBitrate;
config.max_frame_rate = 100; // 10 ms audio frames
config.codec = media::cast::CODEC_AUDIO_OPUS;
return config;
}
FrameSenderConfig DefaultVp8Config() {
FrameSenderConfig config;
config.rtp_payload_type = RtpPayloadType::VIDEO_VP8;
config.sender_ssrc = 11;
config.receiver_ssrc = 12;
config.rtp_timebase = media::cast::kVideoFrequency;
config.channels = 1;
config.max_bitrate = media::cast::kDefaultMaxVideoBitrate;
config.min_bitrate = media::cast::kDefaultMinVideoBitrate;
config.max_frame_rate = media::cast::kDefaultMaxFrameRate;
config.codec = media::cast::CODEC_VIDEO_VP8;
return config;
}
FrameSenderConfig DefaultH264Config() {
FrameSenderConfig config;
config.rtp_payload_type = RtpPayloadType::VIDEO_H264;
config.sender_ssrc = 11;
config.receiver_ssrc = 12;
config.rtp_timebase = media::cast::kVideoFrequency;
config.channels = 1;
config.max_bitrate = media::cast::kDefaultMaxVideoBitrate;
config.min_bitrate = media::cast::kDefaultMinVideoBitrate;
config.max_frame_rate = media::cast::kDefaultMaxFrameRate;
config.codec = media::cast::CODEC_VIDEO_H264;
return config;
}
bool IsHardwareVP8EncodingSupported(RtpStreamClient* client) {
// Query for hardware VP8 encoder support.
const std::vector<media::VideoEncodeAccelerator::SupportedProfile>
vea_profiles = client->GetSupportedVideoEncodeAcceleratorProfiles();
for (const auto& vea_profile : vea_profiles) {
if (vea_profile.profile >= media::VP8PROFILE_MIN &&
vea_profile.profile <= media::VP8PROFILE_MAX) {
return true;
}
}
return false;
}
bool IsHardwareH264EncodingSupported(RtpStreamClient* client) {
// Query for hardware H.264 encoder support.
//
// TODO(miu): Look into why H.264 hardware encoder on MacOS is broken.
// http://crbug.com/596674
// TODO(emircan): Look into HW encoder initialization issues on Win.
// https://crbug.com/636064
#if !defined(OS_MACOSX) && !defined(OS_WIN)
const std::vector<media::VideoEncodeAccelerator::SupportedProfile>
vea_profiles = client->GetSupportedVideoEncodeAcceleratorProfiles();
for (const auto& vea_profile : vea_profiles) {
if (vea_profile.profile >= media::H264PROFILE_MIN &&
vea_profile.profile <= media::H264PROFILE_MAX) {
return true;
}
}
#endif // !defined(OS_MACOSX) && !defined(OS_WIN)
return false;
}
} // namespace
VideoRtpStream::VideoRtpStream(
......@@ -125,24 +49,6 @@ VideoRtpStream::VideoRtpStream(
VideoRtpStream::~VideoRtpStream() {}
// static
std::vector<FrameSenderConfig> VideoRtpStream::GetSupportedConfigs(
RtpStreamClient* client) {
std::vector<FrameSenderConfig> supported_configs;
// Prefer VP8 over H.264 for hardware encoder.
if (IsHardwareVP8EncodingSupported(client))
supported_configs.push_back(DefaultVp8Config());
if (IsHardwareH264EncodingSupported(client))
supported_configs.push_back(DefaultH264Config());
// Propose the default software VP8 encoder, if no hardware encoders are
// available.
if (supported_configs.empty())
supported_configs.push_back(DefaultVp8Config());
return supported_configs;
}
void VideoRtpStream::InsertVideoFrame(
scoped_refptr<media::VideoFrame> video_frame) {
DCHECK(client_);
......@@ -205,11 +111,6 @@ AudioRtpStream::AudioRtpStream(
AudioRtpStream::~AudioRtpStream() {}
// static
std::vector<FrameSenderConfig> AudioRtpStream::GetSupportedConfigs() {
return {DefaultOpusConfig()};
}
void AudioRtpStream::InsertAudio(std::unique_ptr<media::AudioBus> audio_bus,
base::TimeTicks capture_time) {
audio_sender_->InsertAudio(std::move(audio_bus), capture_time);
......
......@@ -44,10 +44,6 @@ class RtpStreamClient {
// The following are for hardware video encoding.
// Query the supported hardware encoding profiles.
virtual media::VideoEncodeAccelerator::SupportedProfiles
GetSupportedVideoEncodeAcceleratorProfiles() = 0;
virtual void CreateVideoEncodeAccelerator(
const media::cast::ReceiveVideoEncodeAcceleratorCallback& callback) = 0;
......@@ -71,9 +67,6 @@ class VideoRtpStream {
base::WeakPtr<RtpStreamClient> client);
~VideoRtpStream();
static std::vector<media::cast::FrameSenderConfig> GetSupportedConfigs(
RtpStreamClient* client);
// Called by VideoCaptureClient when a video frame is received.
// |video_frame| is required to provide REFERENCE_TIME in the metadata.
void InsertVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
......@@ -115,8 +108,6 @@ class AudioRtpStream {
base::WeakPtr<RtpStreamClient> client);
~AudioRtpStream();
static std::vector<media::cast::FrameSenderConfig> GetSupportedConfigs();
// Called by AudioCaptureClient when new audio data is available.
void InsertAudio(std::unique_ptr<media::AudioBus> audio_bus,
base::TimeTicks estimated_capture_time);
......
......@@ -43,10 +43,6 @@ class DummyClient final : public RtpStreamClient {
void CreateVideoEncodeMemory(
size_t size,
const media::cast::ReceiveVideoEncodeMemoryCallback& callback) override {}
media::VideoEncodeAccelerator::SupportedProfiles
GetSupportedVideoEncodeAcceleratorProfiles() override {
return media::VideoEncodeAccelerator::SupportedProfiles();
}
base::WeakPtr<RtpStreamClient> GetWeakPtr() {
return weak_factory_.GetWeakPtr();
......
......@@ -5,12 +5,19 @@
#include "components/mirroring/service/session.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_info.h"
#include "base/task_scheduler/post_task.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
#include "base/values.h"
#include "build/build_config.h"
#include "components/mirroring/service/udp_socket_client.h"
#include "components/mirroring/service/video_capture_client.h"
#include "crypto/random.h"
#include "media/cast/net/cast_transport.h"
#include "media/cast/sender/audio_sender.h"
#include "media/cast/sender/video_sender.h"
......@@ -21,6 +28,7 @@
using media::cast::FrameSenderConfig;
using media::cast::RtpPayloadType;
using media::cast::CastTransportStatus;
using media::cast::Codec;
using media::cast::FrameEvent;
using media::cast::PacketEvent;
using media::cast::OperationalStatus;
......@@ -39,6 +47,16 @@ constexpr base::TimeDelta kSendEventsInterval = base::TimeDelta::FromSeconds(1);
constexpr base::TimeDelta kOfferAnswerExchangeTimeout =
base::TimeDelta::FromSeconds(15);
// Used for OFFER/ANSWER message exchange. Some receivers will error out on
// payloadType values other than the ones hard-coded here.
constexpr int kAudioPayloadType = 127;
constexpr int kVideoPayloadType = 96;
constexpr int kAudioSsrcMin = 1;
constexpr int kAudioSsrcMax = 5e5;
constexpr int kVideoSsrcMin = 5e5 + 1;
constexpr int kVideoSsrcMax = 10e5;
class TransportClient final : public media::cast::CastTransport::Client {
public:
explicit TransportClient(Session* session) : session_(session) {}
......@@ -67,142 +85,166 @@ class TransportClient final : public media::cast::CastTransport::Client {
DISALLOW_COPY_AND_ASSIGN(TransportClient);
};
} // namespace
Session::Session(SessionType session_type,
const net::IPEndPoint& receiver_endpoint,
SessionClient* client)
: client_(client), weak_factory_(this) {
DCHECK(client_);
std::vector<FrameSenderConfig> audio_configs;
std::vector<FrameSenderConfig> video_configs;
if (session_type != SessionType::VIDEO_ONLY)
audio_configs = AudioRtpStream::GetSupportedConfigs();
if (session_type != SessionType::AUDIO_ONLY)
video_configs = VideoRtpStream::GetSupportedConfigs(this);
start_timeout_timer_.Start(
FROM_HERE, kOfferAnswerExchangeTimeout,
base::BindRepeating(&Session::OnOfferAnswerExchangeTimeout,
weak_factory_.GetWeakPtr()));
client_->DoOfferAnswerExchange(
audio_configs, video_configs,
base::BindOnce(&Session::StartInternal, weak_factory_.GetWeakPtr(),
receiver_endpoint));
// Generates a string with cryptographically secure random bytes.
std::string MakeRandomString(size_t length) {
std::string result(length, ' ');
DCHECK_EQ(length, result.size());
crypto::RandBytes(base::string_as_array(&result), length);
return result;
}
Session::~Session() {
StopSession();
int NumberOfEncodeThreads() {
// Do not saturate CPU utilization just for encoding. On a lower-end system
// with only 1 or 2 cores, use only one thread for encoding. On systems with
// more cores, allow half of the cores to be used for encoding.
return std::min(8, (base::SysInfo::NumberOfProcessors() + 1) / 2);
}
void Session::StartInternal(const net::IPEndPoint& receiver_endpoint,
const FrameSenderConfig& audio_config,
const FrameSenderConfig& video_config) {
DVLOG(1) << __func__;
start_timeout_timer_.Stop();
DCHECK(!video_capture_client_);
DCHECK(!cast_transport_);
DCHECK(!audio_stream_);
DCHECK(!video_stream_);
DCHECK(!cast_environment_);
DCHECK(client_);
if (audio_config.rtp_payload_type == RtpPayloadType::REMOTE_AUDIO ||
video_config.rtp_payload_type == RtpPayloadType::REMOTE_VIDEO) {
NOTIMPLEMENTED(); // TODO(xjz): Add support for media remoting.
return;
// Scan profiles for hardware VP8 encoder support.
bool IsHardwareVP8EncodingSupported(
const std::vector<media::VideoEncodeAccelerator::SupportedProfile>&
profiles) {
for (const auto& vea_profile : profiles) {
if (vea_profile.profile >= media::VP8PROFILE_MIN &&
vea_profile.profile <= media::VP8PROFILE_MAX) {
return true;
}
}
return false;
}
const bool has_audio =
(audio_config.rtp_payload_type < RtpPayloadType::AUDIO_LAST) &&
(audio_config.rtp_payload_type >= RtpPayloadType::FIRST);
const bool has_video =
(video_config.rtp_payload_type > RtpPayloadType::AUDIO_LAST) &&
(video_config.rtp_payload_type < RtpPayloadType::LAST);
if (!has_audio && !has_video) {
VLOG(1) << "Incorrect ANSWER message: No audio or Video.";
client_->OnError(SESSION_START_ERROR);
return;
// Scan profiles for hardware H.264 encoder support.
bool IsHardwareH264EncodingSupported(
const std::vector<media::VideoEncodeAccelerator::SupportedProfile>&
profiles) {
// TODO(miu): Look into why H.264 hardware encoder on MacOS is broken.
// http://crbug.com/596674
// TODO(emircan): Look into HW encoder initialization issues on Win.
// https://crbug.com/636064
#if !defined(OS_MACOSX) && !defined(OS_WIN)
for (const auto& vea_profile : profiles) {
if (vea_profile.profile >= media::H264PROFILE_MIN &&
vea_profile.profile <= media::H264PROFILE_MAX) {
return true;
}
}
#endif // !defined(OS_MACOSX) && !defined(OS_WIN)
return false;
}
audio_encode_thread_ = base::CreateSingleThreadTaskRunnerWithTraits(
{base::TaskPriority::USER_BLOCKING,
base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
base::SingleThreadTaskRunnerThreadMode::DEDICATED);
video_encode_thread_ = base::CreateSingleThreadTaskRunnerWithTraits(
{base::TaskPriority::USER_BLOCKING,
base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
base::SingleThreadTaskRunnerThreadMode::DEDICATED);
cast_environment_ = new media::cast::CastEnvironment(
base::DefaultTickClock::GetInstance(),
base::ThreadTaskRunnerHandle::Get(), audio_encode_thread_,
video_encode_thread_);
network::mojom::NetworkContextPtr network_context;
client_->GetNetWorkContext(mojo::MakeRequest(&network_context));
auto udp_client = std::make_unique<UdpSocketClient>(
receiver_endpoint, std::move(network_context),
base::BindOnce(&Session::ReportError, weak_factory_.GetWeakPtr(),
SessionError::CAST_TRANSPORT_ERROR));
cast_transport_ = media::cast::CastTransport::Create(
cast_environment_->Clock(), kSendEventsInterval,
std::make_unique<TransportClient>(this), std::move(udp_client),
base::ThreadTaskRunnerHandle::Get());
// Helper to add |config| to |config_list| with given |aes_key|.
void AddSenderConfig(int32_t sender_ssrc,
FrameSenderConfig config,
const std::string& aes_key,
const std::string& aes_iv,
std::vector<FrameSenderConfig>* config_list) {
config.aes_key = aes_key;
config.aes_iv_mask = aes_iv;
config.sender_ssrc = sender_ssrc;
config_list->emplace_back(config);
}
if (has_audio) {
auto audio_sender = std::make_unique<media::cast::AudioSender>(
cast_environment_, audio_config,
base::BindRepeating(&Session::OnEncoderStatusChange,
weak_factory_.GetWeakPtr()),
cast_transport_.get());
audio_stream_ = std::make_unique<AudioRtpStream>(
std::move(audio_sender), weak_factory_.GetWeakPtr());
// TODO(xjz): Start audio capturing.
NOTIMPLEMENTED();
// Generate the stream object from |config| and add it to |stream_list|.
void AddStreamObject(int stream_index,
const std::string& codec_name,
const FrameSenderConfig& config,
const MirrorSettings& mirror_settings,
base::Value::ListStorage* stream_list) {
base::Value stream(base::Value::Type::DICTIONARY);
stream.SetKey("index", base::Value(stream_index));
stream.SetKey("codecName", base::Value(codec_name));
stream.SetKey("rtpProfile", base::Value("cast"));
const bool is_audio =
(config.rtp_payload_type <= media::cast::RtpPayloadType::AUDIO_LAST);
stream.SetKey("rtpPayloadType",
base::Value(is_audio ? kAudioPayloadType : kVideoPayloadType));
stream.SetKey("ssrc", base::Value(int(config.sender_ssrc)));
stream.SetKey(
"targetDelay",
base::Value(int(config.animated_playout_delay.InMilliseconds())));
stream.SetKey("aesKey", base::Value(base::HexEncode(config.aes_key.data(),
config.aes_key.size())));
stream.SetKey("aesIvMask",
base::Value(base::HexEncode(config.aes_iv_mask.data(),
config.aes_iv_mask.size())));
stream.SetKey("timeBase",
base::Value("1/" + std::to_string(config.rtp_timebase)));
stream.SetKey("receiverRtcpEventLog", base::Value(true));
stream.SetKey("rtpExtensions", base::Value("adaptive_playout_delay"));
if (is_audio) {
// Note on "AUTO" bitrate calculation: This is based on libopus source
// at the time of this writing. Internally, it uses the following math:
//
// packet_overhead_bps = 60 bits * num_packets_in_one_second
// approx_encoded_signal_bps = frequency * channels
// estimated_bps = packet_overhead_bps + approx_encoded_signal_bps
//
// For 100 packets/sec at 48 kHz and 2 channels, this is 102kbps.
const int bitrate = config.max_bitrate > 0
? config.max_bitrate
: (60 * config.max_frame_rate +
config.rtp_timebase * config.channels);
stream.SetKey("type", base::Value("audio_source"));
stream.SetKey("bitRate", base::Value(bitrate));
stream.SetKey("sampleRate", base::Value(config.rtp_timebase));
stream.SetKey("channels", base::Value(config.channels));
} else /* is video */ {
stream.SetKey("type", base::Value("video_source"));
stream.SetKey("renderMode", base::Value("video"));
stream.SetKey("maxFrameRate",
base::Value(std::to_string(static_cast<int>(
config.max_frame_rate * 1000)) +
"/1000"));
stream.SetKey("maxBitRate", base::Value(config.max_bitrate));
base::Value::ListStorage resolutions;
base::Value resolution(base::Value::Type::DICTIONARY);
resolution.SetKey("width", base::Value(mirror_settings.max_width()));
resolution.SetKey("height", base::Value(mirror_settings.max_height()));
resolutions.emplace_back(std::move(resolution));
stream.SetKey("resolutions", base::Value(resolutions));
}
stream_list->emplace_back(std::move(stream));
}
if (has_video) {
auto video_sender = std::make_unique<media::cast::VideoSender>(
cast_environment_, video_config,
base::BindRepeating(&Session::OnEncoderStatusChange,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&Session::CreateVideoEncodeAccelerator,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&Session::CreateVideoEncodeMemory,
weak_factory_.GetWeakPtr()),
cast_transport_.get(),
base::BindRepeating(&Session::SetTargetPlayoutDelay,
weak_factory_.GetWeakPtr()));
video_stream_ = std::make_unique<VideoRtpStream>(
std::move(video_sender), weak_factory_.GetWeakPtr());
media::mojom::VideoCaptureHostPtr video_host;
client_->GetVideoCaptureHost(mojo::MakeRequest(&video_host));
video_capture_client_ =
std::make_unique<VideoCaptureClient>(std::move(video_host));
video_capture_client_->Start(
base::BindRepeating(&VideoRtpStream::InsertVideoFrame,
video_stream_->AsWeakPtr()),
base::BindOnce(&Session::ReportError, weak_factory_.GetWeakPtr(),
SessionError::VIDEO_CAPTURE_ERROR));
}
} // namespace
client_->DidStart();
Session::Session(int32_t session_id,
const CastSinkInfo& sink_info,
const gfx::Size& max_resolution,
SessionObserver* observer,
ResourceProvider* resource_provider,
CastMessageChannel* outbound_channel)
: session_id_(session_id),
sink_info_(sink_info),
observer_(observer),
resource_provider_(resource_provider),
message_dispatcher_(outbound_channel,
base::BindRepeating(&Session::OnResponseParsingError,
base::Unretained(this))),
weak_factory_(this) {
DCHECK(resource_provider_);
mirror_settings_.SetResolutionContraints(max_resolution.width(),
max_resolution.height());
CreateAndSendOffer();
}
Session::~Session() {
StopSession();
}
void Session::ReportError(SessionError error) {
DVLOG(1) << __func__ << ": error=" << error;
if (client_)
client_->OnError(error);
if (observer_)
observer_->OnError(error);
StopSession();
}
void Session::StopSession() {
DVLOG(1) << __func__;
if (!client_)
if (!resource_provider_)
return;
weak_factory_.InvalidateWeakPtrs();
start_timeout_timer_.Stop();
audio_encode_thread_ = nullptr;
video_encode_thread_ = nullptr;
video_capture_client_.reset();
......@@ -210,13 +252,16 @@ void Session::StopSession() {
video_stream_.reset();
cast_transport_.reset();
cast_environment_ = nullptr;
client_->DidStop();
client_ = nullptr;
resource_provider_ = nullptr;
if (observer_) {
observer_->DidStop();
observer_ = nullptr;
}
}
void Session::OnError(const std::string& message) {
VLOG(1) << message;
ReportError(SessionError::CAST_STREAMING_ERROR);
ReportError(SessionError::RTP_STREAM_ERROR);
}
void Session::RequestRefreshFrame() {
......@@ -239,13 +284,13 @@ void Session::OnEncoderStatusChange(OperationalStatus status) {
case OperationalStatus::STATUS_CODEC_INIT_FAILED:
case OperationalStatus::STATUS_CODEC_RUNTIME_ERROR:
DVLOG(1) << "OperationalStatus error.";
ReportError(SessionError::CAST_STREAMING_ERROR);
ReportError(SessionError::ENCODING_ERROR);
break;
}
}
media::VideoEncodeAccelerator::SupportedProfiles
Session::GetSupportedVideoEncodeAcceleratorProfiles() {
Session::GetSupportedVeaProfiles() {
// TODO(xjz): Establish GPU channel and query for the supported profiles.
return media::VideoEncodeAccelerator::SupportedProfiles();
}
......@@ -310,6 +355,163 @@ void Session::OnLoggingEventsReceived(
std::move(packet_events));
}
void Session::OnAnswer(const std::string& cast_mode,
const std::vector<FrameSenderConfig>& audio_configs,
const std::vector<FrameSenderConfig>& video_configs,
const ReceiverResponse& response) {
if (!response.answer || response.type == ResponseType::UNKNOWN) {
VLOG(1) << "Received a null ANSWER response.";
ReportError(ANSWER_TIME_OUT);
return;
}
DCHECK_EQ(ResponseType::ANSWER, response.type);
if (response.result != "ok") {
VLOG(1) << "Received an error ANSWER response.";
ReportError(ANSWER_NOT_OK);
return;
}
const Answer& answer = *response.answer;
if (answer.cast_mode != cast_mode) {
VLOG(1) << "Unexpected cast mode=" << answer.cast_mode
<< " while expected mode=" << cast_mode;
ReportError(ANSWER_MISMATCHED_CAST_MODE);
return;
}
if (answer.send_indexes.size() != answer.ssrcs.size()) {
VLOG(1) << "sendIndexes.length != ssrcs.length in ANSWER"
<< " sendIndexes.length=" << answer.send_indexes.size()
<< " ssrcs.length=" << answer.ssrcs.size();
ReportError(ANSWER_MISMATCHED_SSRC_LENGTH);
return;
}
// Select Audio/Video config from ANSWER.
bool has_audio = false;
bool has_video = false;
FrameSenderConfig audio_config;
FrameSenderConfig video_config;
const int video_start_idx = audio_configs.size();
const int video_idx_bound = video_configs.size() + video_start_idx;
for (size_t i = 0; i < answer.send_indexes.size(); ++i) {
if (answer.send_indexes[i] < 0 ||
answer.send_indexes[i] >= video_idx_bound) {
VLOG(1) << "Invalid indexes selected in ANSWER: Select index="
<< answer.send_indexes[i] << " allowed index<" << video_idx_bound;
ReportError(ANSWER_SELECT_INVALID_INDEX);
return;
}
if (answer.send_indexes[i] < video_start_idx) {
// Audio
if (has_audio) {
VLOG(1) << "Receiver selected audio RTP stream twice in ANSWER";
ReportError(ANSWER_SELECT_MULTIPLE_AUDIO);
return;
}
audio_config = audio_configs[answer.send_indexes[i]];
audio_config.receiver_ssrc = answer.ssrcs[i];
has_audio = true;
} else {
// Video
if (has_video) {
VLOG(1) << "Receiver selected video RTP stream twice in ANSWER";
ReportError(ANSWER_SELECT_MULTIPLE_VIDEO);
return;
}
video_config = video_configs[answer.send_indexes[i] - video_start_idx];
video_config.receiver_ssrc = answer.ssrcs[i];
video_config.video_codec_params.number_of_encode_threads =
NumberOfEncodeThreads();
has_video = true;
}
}
if (!has_audio && !has_video) {
VLOG(1) << "Incorrect ANSWER message: No audio or Video.";
ReportError(ANSWER_NO_AUDIO_OR_VIDEO);
return;
}
if ((has_audio &&
audio_config.rtp_payload_type == RtpPayloadType::REMOTE_AUDIO) ||
(has_video &&
video_config.rtp_payload_type == RtpPayloadType::REMOTE_VIDEO)) {
NOTIMPLEMENTED(); // TODO(xjz): Add support for media remoting.
return;
}
// Start streaming.
audio_encode_thread_ = base::CreateSingleThreadTaskRunnerWithTraits(
{base::TaskPriority::USER_BLOCKING,
base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
base::SingleThreadTaskRunnerThreadMode::DEDICATED);
video_encode_thread_ = base::CreateSingleThreadTaskRunnerWithTraits(
{base::TaskPriority::USER_BLOCKING,
base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
base::SingleThreadTaskRunnerThreadMode::DEDICATED);
cast_environment_ = new media::cast::CastEnvironment(
base::DefaultTickClock::GetInstance(),
base::ThreadTaskRunnerHandle::Get(), audio_encode_thread_,
video_encode_thread_);
network::mojom::NetworkContextPtr network_context;
resource_provider_->GetNetworkContext(mojo::MakeRequest(&network_context));
auto udp_client = std::make_unique<UdpSocketClient>(
net::IPEndPoint(sink_info_.ip_address, answer.udp_port),
std::move(network_context),
base::BindOnce(&Session::ReportError, weak_factory_.GetWeakPtr(),
SessionError::CAST_TRANSPORT_ERROR));
cast_transport_ = media::cast::CastTransport::Create(
cast_environment_->Clock(), kSendEventsInterval,
std::make_unique<TransportClient>(this), std::move(udp_client),
base::ThreadTaskRunnerHandle::Get());
if (has_audio) {
auto audio_sender = std::make_unique<media::cast::AudioSender>(
cast_environment_, audio_config,
base::BindRepeating(&Session::OnEncoderStatusChange,
weak_factory_.GetWeakPtr()),
cast_transport_.get());
audio_stream_ = std::make_unique<AudioRtpStream>(
std::move(audio_sender), weak_factory_.GetWeakPtr());
// TODO(xjz): Start audio capturing.
NOTIMPLEMENTED();
}
if (has_video) {
auto video_sender = std::make_unique<media::cast::VideoSender>(
cast_environment_, video_config,
base::BindRepeating(&Session::OnEncoderStatusChange,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&Session::CreateVideoEncodeAccelerator,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&Session::CreateVideoEncodeMemory,
weak_factory_.GetWeakPtr()),
cast_transport_.get(),
base::BindRepeating(&Session::SetTargetPlayoutDelay,
weak_factory_.GetWeakPtr()));
video_stream_ = std::make_unique<VideoRtpStream>(
std::move(video_sender), weak_factory_.GetWeakPtr());
media::mojom::VideoCaptureHostPtr video_host;
resource_provider_->GetVideoCaptureHost(mojo::MakeRequest(&video_host));
video_capture_client_ =
std::make_unique<VideoCaptureClient>(std::move(video_host));
video_capture_client_->Start(
base::BindRepeating(&VideoRtpStream::InsertVideoFrame,
video_stream_->AsWeakPtr()),
base::BindOnce(&Session::ReportError, weak_factory_.GetWeakPtr(),
SessionError::VIDEO_CAPTURE_ERROR));
}
if (observer_)
observer_->DidStart();
}
void Session::OnResponseParsingError(const std::string& error_message) {
// TODO(xjz): Log the |error_message| in the mirroring logs.
}
void Session::SetTargetPlayoutDelay(base::TimeDelta playout_delay) {
if (audio_stream_)
audio_stream_->SetTargetPlayoutDelay(playout_delay);
......@@ -317,10 +519,76 @@ void Session::SetTargetPlayoutDelay(base::TimeDelta playout_delay) {
video_stream_->SetTargetPlayoutDelay(playout_delay);
}
void Session::OnOfferAnswerExchangeTimeout() {
VLOG(1) << "OFFER/ANSWER exchange timed out.";
DCHECK(client_);
client_->OnError(SESSION_START_ERROR);
void Session::CreateAndSendOffer() {
// The random AES key and initialization vector pair used by all streams in
// this session.
const std::string aes_key = MakeRandomString(16); // AES-128.
const std::string aes_iv = MakeRandomString(16); // AES has 128-bit blocks.
std::vector<FrameSenderConfig> audio_configs;
std::vector<FrameSenderConfig> video_configs;
// Generate stream list with supported audio / video configs.
base::Value::ListStorage stream_list;
int stream_index = 0;
if (sink_info_.capability != DeviceCapability::VIDEO_ONLY) {
FrameSenderConfig config = MirrorSettings::GetDefaultAudioConfig(
RtpPayloadType::AUDIO_OPUS, Codec::CODEC_AUDIO_OPUS);
AddSenderConfig(base::RandInt(kAudioSsrcMin, kAudioSsrcMax), config,
aes_key, aes_iv, &audio_configs);
AddStreamObject(stream_index++, "OPUS", audio_configs.back(),
mirror_settings_, &stream_list);
}
if (sink_info_.capability != DeviceCapability::AUDIO_ONLY) {
const int32_t video_ssrc = base::RandInt(kVideoSsrcMin, kVideoSsrcMax);
if (IsHardwareVP8EncodingSupported(GetSupportedVeaProfiles())) {
FrameSenderConfig config = MirrorSettings::GetDefaultVideoConfig(
RtpPayloadType::VIDEO_VP8, Codec::CODEC_VIDEO_VP8);
config.use_external_encoder = true;
AddSenderConfig(video_ssrc, config, aes_key, aes_iv, &video_configs);
AddStreamObject(stream_index++, "VP8", video_configs.back(),
mirror_settings_, &stream_list);
}
if (IsHardwareH264EncodingSupported(GetSupportedVeaProfiles())) {
FrameSenderConfig config = MirrorSettings::GetDefaultVideoConfig(
RtpPayloadType::VIDEO_H264, Codec::CODEC_VIDEO_H264);
config.use_external_encoder = true;
AddSenderConfig(video_ssrc, config, aes_key, aes_iv, &video_configs);
AddStreamObject(stream_index++, "H264", video_configs.back(),
mirror_settings_, &stream_list);
}
if (video_configs.empty()) {
FrameSenderConfig config = MirrorSettings::GetDefaultVideoConfig(
RtpPayloadType::VIDEO_VP8, Codec::CODEC_VIDEO_VP8);
AddSenderConfig(video_ssrc, config, aes_key, aes_iv, &video_configs);
AddStreamObject(stream_index++, "VP8", video_configs.back(),
mirror_settings_, &stream_list);
}
}
DCHECK(!audio_configs.empty() || !video_configs.empty());
// Assemble the OFFER message.
const std::string cast_mode = "mirroring";
base::Value offer(base::Value::Type::DICTIONARY);
offer.SetKey("castMode", base::Value(cast_mode));
offer.SetKey("receiverGetStatus", base::Value("true"));
offer.SetKey("supportedStreams", base::Value(stream_list));
const int32_t sequence_number = message_dispatcher_.GetNextSeqNumber();
base::Value offer_message(base::Value::Type::DICTIONARY);
offer_message.SetKey("type", base::Value("OFFER"));
offer_message.SetKey("sessionId", base::Value(session_id_));
offer_message.SetKey("seqNum", base::Value(sequence_number));
offer_message.SetKey("offer", std::move(offer));
CastMessage message_to_receiver;
message_to_receiver.message_namespace = kWebRtcNamespace;
message_to_receiver.data = std::move(offer_message);
message_dispatcher_.RequestReply(
message_to_receiver, ResponseType::ANSWER, sequence_number,
kOfferAnswerExchangeTimeout,
base::BindOnce(&Session::OnAnswer, base::Unretained(this), cast_mode,
audio_configs, video_configs));
}
} // namespace mirroring
......@@ -8,6 +8,8 @@
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "components/mirroring/service/interface.h"
#include "components/mirroring/service/message_dispatcher.h"
#include "components/mirroring/service/mirror_settings.h"
#include "components/mirroring/service/rtp_stream.h"
#include "media/cast/cast_environment.h"
#include "media/cast/net/cast_transport_defines.h"
......@@ -22,20 +24,31 @@ class CastTransport;
namespace mirroring {
struct ReceiverResponse;
class VideoCaptureClient;
// Controls a mirroring session, including audio/video capturing and Cast
// Streaming. When constructed, it does OFFER/ANSWER exchange with the mirroring
// receiver. Mirroring starts when the exchange succeeds and stops when this
// class is destructed or error occurs. |observer| will get notified when status
// changes. |outbound_channel| is responsible for sending messages to the
// mirroring receiver through Cast Channel.
class Session final : public RtpStreamClient {
public:
Session(SessionType session_type,
const net::IPEndPoint& receiver_endpoint,
SessionClient* client);
Session(int32_t session_id,
const CastSinkInfo& sink_info,
const gfx::Size& max_resolution,
SessionObserver* observer,
ResourceProvider* resource_provider,
CastMessageChannel* outbound_channel);
// TODO(xjz): Add mojom::CastMessageChannelRequest |inbound_channel| to
// receive inbound messages.
~Session() override;
// RtpStreamClient implemenation.
void OnError(const std::string& message) override;
void RequestRefreshFrame() override;
media::VideoEncodeAccelerator::SupportedProfiles
GetSupportedVideoEncodeAcceleratorProfiles() override;
void CreateVideoEncodeAccelerator(
const media::cast::ReceiveVideoEncodeAcceleratorCallback& callback)
override;
......@@ -49,16 +62,23 @@ class Session final : public RtpStreamClient {
std::unique_ptr<std::vector<media::cast::FrameEvent>> frame_events,
std::unique_ptr<std::vector<media::cast::PacketEvent>> packet_events);
private:
// Callback when OFFER/ANSWER message exchange finishes. Starts a mirroing
// session.
void StartInternal(const net::IPEndPoint& receiver_endpoint,
const media::cast::FrameSenderConfig& audio_config,
const media::cast::FrameSenderConfig& video_config);
// Callback for ANSWER response. If the ANSWER is invalid, |observer_| will
// get notified with error, and session is stopped. Otherwise, capturing and
// streaming are started with the selected configs.
void OnAnswer(
const std::string& cast_mode,
const std::vector<media::cast::FrameSenderConfig>& audio_configs,
const std::vector<media::cast::FrameSenderConfig>& video_configs,
const ReceiverResponse& response);
// Called by |message_dispatcher_| when error occurs while parsing the
// responses.
void OnResponseParsingError(const std::string& error_message);
private:
void StopSession();
// Notify |client_| that error occurred and close the session.
// Notify |observer_| that error occurred and close the session.
void ReportError(SessionError error);
// Callback by Audio/VideoSender to indicate encoder status change.
......@@ -67,12 +87,22 @@ class Session final : public RtpStreamClient {
// Callback by media::cast::VideoSender to set a new target playout delay.
void SetTargetPlayoutDelay(base::TimeDelta playout_delay);
// Callback by |start_timeout_timer_|.
void OnOfferAnswerExchangeTimeout();
media::VideoEncodeAccelerator::SupportedProfiles GetSupportedVeaProfiles();
SessionClient* client_ = nullptr;
// Create and send OFFER message.
void CreateAndSendOffer();
// Create on StartInternal().
// Provided by Cast Media Route Provider (MRP).
const int32_t session_id_;
const CastSinkInfo sink_info_;
SessionObserver* observer_ = nullptr;
ResourceProvider* resource_provider_ = nullptr;
MirrorSettings mirror_settings_;
MessageDispatcher message_dispatcher_;
// Created after OFFER/ANSWER exchange succeeds.
std::unique_ptr<AudioRtpStream> audio_stream_;
std::unique_ptr<VideoRtpStream> video_stream_;
std::unique_ptr<VideoCaptureClient> video_capture_client_;
......@@ -81,9 +111,6 @@ class Session final : public RtpStreamClient {
scoped_refptr<base::SingleThreadTaskRunner> audio_encode_thread_ = nullptr;
scoped_refptr<base::SingleThreadTaskRunner> video_encode_thread_ = nullptr;
// Fire if the OFFER/ANSWER exchange times out.
base::OneShotTimer start_timeout_timer_;
base::WeakPtrFactory<Session> weak_factory_;
};
......
......@@ -7,13 +7,14 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "base/test/simple_test_tick_clock.h"
#include "components/mirroring/service/fake_network_service.h"
#include "components/mirroring/service/fake_video_capture_host.h"
#include "components/mirroring/service/interface.h"
#include "components/mirroring/service/mirror_settings.h"
#include "components/mirroring/service/receiver_response.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/net_utility.h"
#include "mojo/public/cpp/bindings/binding.h"
......@@ -27,74 +28,126 @@ using media::cast::Packet;
namespace mirroring {
class SessionTest : public SessionClient, public ::testing::Test {
const int kSessionId = 5;
class SessionTest : public ResourceProvider,
public SessionObserver,
public CastMessageChannel,
public ::testing::Test {
public:
SessionTest() : weak_factory_(this) {
SessionTest() : receiver_endpoint_(media::cast::test::GetFreeLocalPort()) {
testing_clock_.Advance(base::TimeTicks::Now() - base::TimeTicks());
}
~SessionTest() override { scoped_task_environment_.RunUntilIdle(); }
// SessionClient implemenation.
// SessionObserver implemenation.
MOCK_METHOD1(OnError, void(SessionError));
MOCK_METHOD0(DidStart, void());
MOCK_METHOD0(DidStop, void());
MOCK_METHOD0(OnOfferAnswerExchange, void());
// ResourceProvider implemenation.
MOCK_METHOD0(OnGetVideoCaptureHost, void());
MOCK_METHOD0(OnGetNetworkContext, void());
// Called when sends OFFER message.
MOCK_METHOD0(OnOffer, void());
// CastMessageHandler implementation. For outbound messages.
void Send(const CastMessage& message) {
EXPECT_TRUE(message.message_namespace == kWebRtcNamespace ||
message.message_namespace == kRemotingNamespace);
std::string message_type;
auto* found = message.data.FindKey("type");
if (found && found->is_string())
message_type = found->GetString();
if (message_type == "OFFER") {
auto* found = message.data.FindKey("seqNum");
if (found && found->is_int())
offer_sequence_number_ = found->GetInt();
OnOffer();
}
}
void GetVideoCaptureHost(
media::mojom::VideoCaptureHostRequest request) override {
video_host_ = std::make_unique<FakeVideoCaptureHost>(std::move(request));
OnGetVideoCaptureHost();
}
void GetNetWorkContext(
void GetNetworkContext(
network::mojom::NetworkContextRequest request) override {
network_context_ = std::make_unique<MockNetworkContext>(std::move(request));
OnGetNetworkContext();
}
void DoOfferAnswerExchange(
const std::vector<FrameSenderConfig>& audio_configs,
const std::vector<FrameSenderConfig>& video_configs,
GetAnswerCallback callback) override {
OnOfferAnswerExchange();
std::move(callback).Run(FrameSenderConfig(),
media::cast::GetDefaultVideoSenderConfig());
void SendAnswer() {
FrameSenderConfig config = MirrorSettings::GetDefaultVideoConfig(
media::cast::RtpPayloadType::VIDEO_VP8,
media::cast::Codec::CODEC_VIDEO_VP8);
std::vector<FrameSenderConfig> video_configs;
video_configs.emplace_back(config);
auto answer = std::make_unique<Answer>();
answer->udp_port = receiver_endpoint_.port();
answer->send_indexes.push_back(0);
answer->ssrcs.push_back(32);
answer->cast_mode = "mirroring";
ReceiverResponse response;
response.result = "ok";
response.type = ResponseType::ANSWER;
response.sequence_number = offer_sequence_number_;
response.answer = std::move(answer);
session_->OnAnswer("mirroring", std::vector<FrameSenderConfig>(),
video_configs, response);
}
protected:
void CreateSession() {
CastSinkInfo sink_info;
sink_info.ip_address = receiver_endpoint_.address();
sink_info.capability = DeviceCapability::AUDIO_AND_VIDEO;
// Expect to receive OFFER message when session is created.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnError(_)).Times(0);
EXPECT_CALL(*this, OnOffer())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_ = std::make_unique<Session>(
kSessionId, sink_info, gfx::Size(1920, 1080), this, this, this);
run_loop.Run();
}
base::test::ScopedTaskEnvironment scoped_task_environment_;
const net::IPEndPoint receiver_endpoint_;
base::SimpleTestTickClock testing_clock_;
std::unique_ptr<Session> session_;
std::unique_ptr<FakeVideoCaptureHost> video_host_;
std::unique_ptr<MockNetworkContext> network_context_;
base::WeakPtrFactory<SessionTest> weak_factory_;
int32_t offer_sequence_number_ = -1;
private:
DISALLOW_COPY_AND_ASSIGN(SessionTest);
};
TEST_F(SessionTest, Mirroring) {
// Start a mirroring session.
CreateSession();
scoped_task_environment_.RunUntilIdle();
{
// Except mirroing session starts after receiving ANSWER message.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(1);
EXPECT_CALL(*this, OnGetNetworkContext()).Times(1);
EXPECT_CALL(*this, OnOfferAnswerExchange()).Times(1);
EXPECT_CALL(*this, OnError(_)).Times(0);
EXPECT_CALL(*this, DidStart())
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_ =
std::make_unique<Session>(SessionType::AUDIO_AND_VIDEO,
media::cast::test::GetFreeLocalPort(), this);
SendAnswer();
run_loop.Run();
}
scoped_task_environment_.RunUntilIdle();
{
base::RunLoop run_loop;
// Expect to send out some UDP packets.
......@@ -106,7 +159,6 @@ TEST_F(SessionTest, Mirroring) {
video_host_->SendOneFrame(gfx::Size(64, 32), testing_clock_.NowTicks());
run_loop.Run();
}
scoped_task_environment_.RunUntilIdle();
// Stop the session.
......@@ -121,4 +173,22 @@ TEST_F(SessionTest, Mirroring) {
scoped_task_environment_.RunUntilIdle();
}
TEST_F(SessionTest, AnswerTimeout) {
CreateSession();
scoped_task_environment_.RunUntilIdle();
{
// Expect error.
base::RunLoop run_loop;
EXPECT_CALL(*this, OnGetVideoCaptureHost()).Times(0);
EXPECT_CALL(*this, OnGetNetworkContext()).Times(0);
EXPECT_CALL(*this, DidStop()).Times(1);
EXPECT_CALL(*this, OnError(ANSWER_TIME_OUT))
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
session_->OnAnswer("mirroring", std::vector<FrameSenderConfig>(),
std::vector<FrameSenderConfig>(), ReceiverResponse());
run_loop.Run();
}
scoped_task_environment_.RunUntilIdle();
}
} // namespace mirroring
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment