Commit 47581b1f authored by hubbe's avatar hubbe Committed by Commit bot

Cast: Merge common functionality from audio/video sender into frame_sender.

To make things simpler, also make congestion control into an interface with
fixed and adaptive implementations.

BUG=405622

Review URL: https://codereview.chromium.org/542883004

Cr-Commit-Position: refs/heads/master@{#293630}
parent 41dbfb1a
......@@ -15,8 +15,6 @@ namespace media {
namespace cast {
namespace {
const int kNumAggressiveReportsSentAtStart = 100;
// TODO(miu): This should be specified in AudioSenderConfig, but currently it is
// fixed to 100 FPS (i.e., 10 ms per frame), and AudioEncoder assumes this as
// well.
......@@ -29,13 +27,15 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
CastTransportSender* const transport_sender)
: FrameSender(
cast_environment,
true,
transport_sender,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
audio_config.frequency,
audio_config.ssrc,
kAudioFrameRate * 2.0, // We lie to increase max outstanding frames.
audio_config.target_playout_delay),
configured_encoder_bitrate_(audio_config.bitrate),
audio_config.target_playout_delay,
NewFixedCongestionControl(audio_config.bitrate)),
samples_sent_to_encoder_(0),
weak_factory_(this) {
cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
VLOG(1) << "max_unacked_frames " << max_unacked_frames_;
......@@ -48,8 +48,9 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
audio_config.frequency,
audio_config.bitrate,
audio_config.codec,
base::Bind(&AudioSender::SendEncodedAudioFrame,
weak_factory_.GetWeakPtr())));
base::Bind(&FrameSender::SendEncodedFrame,
weak_factory_.GetWeakPtr(),
audio_config.bitrate)));
cast_initialization_status_ = audio_encoder_->InitializationResult();
} else {
NOTREACHED(); // No support for external audio encoding.
......@@ -89,142 +90,17 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
return;
}
audio_encoder_->InsertAudio(audio_bus.Pass(), recorded_time);
}
void AudioSender::SendEncodedAudioFrame(
scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
const uint32 frame_id = encoded_frame->frame_id;
const bool is_first_frame_to_be_sent = last_send_time_.is_null();
last_send_time_ = cast_environment_->Clock()->NowTicks();
last_sent_frame_id_ = frame_id;
// If this is the first frame about to be sent, fake the value of
// |latest_acked_frame_id_| to indicate the receiver starts out all caught up.
// Also, schedule the periodic frame re-send checks.
if (is_first_frame_to_be_sent) {
latest_acked_frame_id_ = frame_id - 1;
ScheduleNextResendCheck();
}
cast_environment_->Logging()->InsertEncodedFrameEvent(
last_send_time_, FRAME_ENCODED, AUDIO_EVENT, encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
encoded_frame->dependency == EncodedFrame::KEY,
configured_encoder_bitrate_);
RecordLatestFrameTimestamps(frame_id,
encoded_frame->reference_time,
encoded_frame->rtp_timestamp);
// At the start of the session, it's important to send reports before each
// frame so that the receiver can properly compute playout times. The reason
// more than one report is sent is because transmission is not guaranteed,
// only best effort, so we send enough that one should almost certainly get
// through.
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
// SendRtcpReport() will schedule future reports to be made if this is the
// last "aggressive report."
++num_aggressive_rtcp_reports_sent_;
const bool is_last_aggressive_report =
(num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
SendRtcpReport(is_last_aggressive_report);
}
if (send_target_playout_delay_) {
encoded_frame->new_playout_delay_ms =
target_playout_delay_.InMilliseconds();
}
transport_sender_->InsertFrame(ssrc_, *encoded_frame);
}
int64 old_frames_sent =
samples_sent_to_encoder_ * kAudioFrameRate / rtp_timebase_;
samples_sent_to_encoder_ += audio_bus->frames();
int64 new_frames_sent =
samples_sent_to_encoder_ * kAudioFrameRate / rtp_timebase_;
frames_in_encoder_ += new_frames_sent - old_frames_sent;
void AudioSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (is_rtt_available()) {
// Having the RTT values implies the receiver sent back a receiver report
// based on it having received a report from here. Therefore, ensure this
// sender stops aggressively sending reports.
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
VLOG(1) << "No longer a need to send reports aggressively (sent "
<< num_aggressive_rtcp_reports_sent_ << ").";
num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
ScheduleNextRtcpReport();
}
}
if (last_send_time_.is_null())
return; // Cannot get an ACK without having first sent a frame.
if (cast_feedback.missing_frames_and_packets.empty()) {
// We only count duplicate ACKs when we have sent newer frames.
if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
latest_acked_frame_id_ != last_sent_frame_id_) {
duplicate_ack_counter_++;
} else {
duplicate_ack_counter_ = 0;
}
// TODO(miu): The values "2" and "3" should be derived from configuration.
if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
VLOG(1) << "Received duplicate ACK for frame " << latest_acked_frame_id_;
ResendForKickstart();
}
} else {
// Only count duplicated ACKs if there is no NACK request in between.
// This is to avoid aggresive resend.
duplicate_ack_counter_ = 0;
}
cast_environment_->Logging()->InsertFrameEvent(
cast_environment_->Clock()->NowTicks(),
FRAME_ACK_RECEIVED,
AUDIO_EVENT,
GetRecordedRtpTimestamp(cast_feedback.ack_frame_id),
cast_feedback.ack_frame_id);
const bool is_acked_out_of_order =
static_cast<int32>(cast_feedback.ack_frame_id -
latest_acked_frame_id_) < 0;
VLOG(2) << "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
<< " for frame " << cast_feedback.ack_frame_id;
if (!is_acked_out_of_order) {
// Cancel resends of acked frames.
std::vector<uint32> cancel_sending_frames;
while (latest_acked_frame_id_ != cast_feedback.ack_frame_id) {
latest_acked_frame_id_++;
cancel_sending_frames.push_back(latest_acked_frame_id_);
}
transport_sender_->CancelSendingFrames(ssrc_, cancel_sending_frames);
latest_acked_frame_id_ = cast_feedback.ack_frame_id;
}
audio_encoder_->InsertAudio(audio_bus.Pass(), recorded_time);
}
bool AudioSender::ShouldDropNextFrame(base::TimeTicks capture_time) const {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
int frames_in_flight = 0;
base::TimeDelta duration_in_flight;
if (!last_send_time_.is_null()) {
frames_in_flight =
static_cast<int32>(last_sent_frame_id_ - latest_acked_frame_id_);
if (frames_in_flight > 0) {
const uint32 oldest_unacked_frame_id = latest_acked_frame_id_ + 1;
duration_in_flight =
capture_time - GetRecordedReferenceTime(oldest_unacked_frame_id);
}
}
VLOG(2) << frames_in_flight
<< " frames in flight; last sent: " << last_sent_frame_id_
<< "; latest acked: " << latest_acked_frame_id_
<< "; duration in flight: "
<< duration_in_flight.InMicroseconds() << " usec ("
<< (target_playout_delay_ > base::TimeDelta() ?
100 * duration_in_flight / target_playout_delay_ :
kint64max) << "%)";
return frames_in_flight >= max_unacked_frames_ ||
duration_in_flight >= target_playout_delay_;
void AudioSender::OnAck(uint32 frame_id) {
}
} // namespace cast
......
......@@ -51,22 +51,17 @@ class AudioSender : public FrameSender,
const base::TimeTicks& recorded_time);
protected:
// Protected for testability.
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
virtual void OnAck(uint32 frame_id) OVERRIDE;
private:
// Returns true if there are too many frames in flight, or if the media
// duration of the frames in flight would be too high by sending the next
// frame. The latter metric is determined from the given |capture_time|
// for the next frame to be encoded and sent.
bool ShouldDropNextFrame(base::TimeTicks capture_time) const;
// Called by the |audio_encoder_| with the next EncodedFrame to send.
void SendEncodedAudioFrame(scoped_ptr<EncodedFrame> audio_frame);
void SendEncodedAudioFrame(int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> audio_frame);
// Encodes AudioBuses into EncodedFrames.
scoped_ptr<AudioEncoder> audio_encoder_;
const int configured_encoder_bitrate_;
uint64 samples_sent_to_encoder_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<AudioSender> weak_factory_;
......
......@@ -22,6 +22,117 @@
namespace media {
namespace cast {
class AdaptiveCongestionControl : public CongestionControl {
public:
AdaptiveCongestionControl(base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames);
virtual ~AdaptiveCongestionControl() OVERRIDE;
virtual void UpdateRtt(base::TimeDelta rtt) OVERRIDE;
// Called when an encoded frame is sent to the transport.
virtual void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when) OVERRIDE;
// Called when we receive an ACK for a frame.
virtual void AckFrame(uint32 frame_id, base::TimeTicks when) OVERRIDE;
// Returns the bitrate we should use for the next frame.
virtual uint32 GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay) OVERRIDE;
private:
struct FrameStats {
FrameStats();
// Time this frame was sent to the transport.
base::TimeTicks sent_time;
// Time this frame was acked.
base::TimeTicks ack_time;
// Size of encoded frame in bits.
size_t frame_size;
};
// Calculate how much "dead air" (idle time) there is between two frames.
static base::TimeDelta DeadTime(const FrameStats& a, const FrameStats& b);
// Get the FrameStats for a given |frame_id|.
// Note: Older FrameStats will be removed automatically.
FrameStats* GetFrameStats(uint32 frame_id);
// Calculate a safe bitrate. This is based on how much we've been
// sending in the past.
double CalculateSafeBitrate();
// For a given frame, calculate when it might be acked.
// (Or return the time it was acked, if it was.)
base::TimeTicks EstimatedAckTime(uint32 frame_id, double bitrate);
// Calculate when we start sending the data for a given frame.
// This is done by calculating when we were done sending the previous
// frame, but obviously can't be less than |sent_time| (if known).
base::TimeTicks EstimatedSendingTime(uint32 frame_id, double bitrate);
base::TickClock* const clock_; // Not owned by this class.
const uint32 max_bitrate_configured_;
const uint32 min_bitrate_configured_;
std::deque<FrameStats> frame_stats_;
uint32 last_frame_stats_;
uint32 last_acked_frame_;
uint32 last_encoded_frame_;
base::TimeDelta rtt_;
size_t history_size_;
size_t acked_bits_in_history_;
base::TimeDelta dead_time_in_history_;
DISALLOW_COPY_AND_ASSIGN(AdaptiveCongestionControl);
};
class FixedCongestionControl : public CongestionControl {
public:
FixedCongestionControl(uint32 bitrate) : bitrate_(bitrate) {}
virtual ~FixedCongestionControl() OVERRIDE {}
virtual void UpdateRtt(base::TimeDelta rtt) OVERRIDE {
}
// Called when an encoded frame is sent to the transport.
virtual void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when) OVERRIDE {
}
// Called when we receive an ACK for a frame.
virtual void AckFrame(uint32 frame_id, base::TimeTicks when) OVERRIDE {
}
// Returns the bitrate we should use for the next frame.
virtual uint32 GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay) OVERRIDE {
return bitrate_;
}
private:
uint32 bitrate_;
DISALLOW_COPY_AND_ASSIGN(FixedCongestionControl);
};
CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames) {
return new AdaptiveCongestionControl(clock,
max_bitrate_configured,
min_bitrate_configured,
max_unacked_frames);
}
CongestionControl* NewFixedCongestionControl(uint32 bitrate) {
return new FixedCongestionControl(bitrate);
}
// This means that we *try* to keep our buffer 90% empty.
// If it is less full, we increase the bandwidth, if it is more
// we decrease the bandwidth. Making this smaller makes the
......@@ -32,13 +143,14 @@ static const double kTargetEmptyBufferFraction = 0.9;
// congestion control adapt slower.
static const size_t kHistorySize = 100;
CongestionControl::FrameStats::FrameStats() : frame_size(0) {
AdaptiveCongestionControl::FrameStats::FrameStats() : frame_size(0) {
}
CongestionControl::CongestionControl(base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames)
AdaptiveCongestionControl::AdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames)
: clock_(clock),
max_bitrate_configured_(max_bitrate_configured),
min_bitrate_configured_(min_bitrate_configured),
......@@ -57,14 +169,15 @@ CongestionControl::CongestionControl(base::TickClock* clock,
}
CongestionControl::~CongestionControl() {}
AdaptiveCongestionControl::~AdaptiveCongestionControl() {}
void CongestionControl::UpdateRtt(base::TimeDelta rtt) {
void AdaptiveCongestionControl::UpdateRtt(base::TimeDelta rtt) {
rtt_ = (7 * rtt_ + rtt) / 8;
}
// Calculate how much "dead air" there is between two frames.
base::TimeDelta CongestionControl::DeadTime(const FrameStats& a,
const FrameStats& b) {
base::TimeDelta AdaptiveCongestionControl::DeadTime(const FrameStats& a,
const FrameStats& b) {
if (b.sent_time > a.ack_time) {
return b.sent_time - a.ack_time;
} else {
......@@ -72,7 +185,7 @@ base::TimeDelta CongestionControl::DeadTime(const FrameStats& a,
}
}
double CongestionControl::CalculateSafeBitrate() {
double AdaptiveCongestionControl::CalculateSafeBitrate() {
double transmit_time =
(GetFrameStats(last_acked_frame_)->ack_time -
frame_stats_.front().sent_time - dead_time_in_history_).InSecondsF();
......@@ -83,8 +196,8 @@ double CongestionControl::CalculateSafeBitrate() {
return acked_bits_in_history_ / std::max(transmit_time, 1E-3);
}
CongestionControl::FrameStats* CongestionControl::GetFrameStats(
uint32 frame_id) {
AdaptiveCongestionControl::FrameStats*
AdaptiveCongestionControl::GetFrameStats(uint32 frame_id) {
int32 offset = static_cast<int32>(frame_id - last_frame_stats_);
DCHECK_LT(offset, static_cast<int32>(kHistorySize));
if (offset > 0) {
......@@ -109,7 +222,8 @@ CongestionControl::FrameStats* CongestionControl::GetFrameStats(
return &frame_stats_[offset];
}
void CongestionControl::AckFrame(uint32 frame_id, base::TimeTicks when) {
void AdaptiveCongestionControl::AckFrame(uint32 frame_id,
base::TimeTicks when) {
FrameStats* frame_stats = GetFrameStats(last_acked_frame_);
while (IsNewerFrameId(frame_id, last_acked_frame_)) {
FrameStats* last_frame_stats = frame_stats;
......@@ -129,9 +243,9 @@ void CongestionControl::AckFrame(uint32 frame_id, base::TimeTicks when) {
}
}
void CongestionControl::SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when) {
void AdaptiveCongestionControl::SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when) {
last_encoded_frame_ = frame_id;
FrameStats* frame_stats = GetFrameStats(frame_id);
DCHECK(frame_stats);
......@@ -139,8 +253,8 @@ void CongestionControl::SendFrameToTransport(uint32 frame_id,
frame_stats->sent_time = when;
}
base::TimeTicks CongestionControl::EstimatedAckTime(uint32 frame_id,
double bitrate) {
base::TimeTicks AdaptiveCongestionControl::EstimatedAckTime(uint32 frame_id,
double bitrate) {
FrameStats* frame_stats = GetFrameStats(frame_id);
DCHECK(frame_stats);
if (frame_stats->ack_time.is_null()) {
......@@ -164,8 +278,9 @@ base::TimeTicks CongestionControl::EstimatedAckTime(uint32 frame_id,
}
}
base::TimeTicks CongestionControl::EstimatedSendingTime(uint32 frame_id,
double bitrate) {
base::TimeTicks AdaptiveCongestionControl::EstimatedSendingTime(
uint32 frame_id,
double bitrate) {
FrameStats* frame_stats = GetFrameStats(frame_id);
DCHECK(frame_stats);
base::TimeTicks ret = EstimatedAckTime(frame_id - 1, bitrate) - rtt_;
......@@ -177,8 +292,8 @@ base::TimeTicks CongestionControl::EstimatedSendingTime(uint32 frame_id,
}
}
uint32 CongestionControl::GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay) {
uint32 AdaptiveCongestionControl::GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay) {
double safe_bitrate = CalculateSafeBitrate();
// Estimate when we might start sending the next frame.
base::TimeDelta time_to_catch_up =
......
......@@ -17,69 +17,30 @@ namespace cast {
class CongestionControl {
public:
CongestionControl(base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames);
virtual ~CongestionControl();
void UpdateRtt(base::TimeDelta rtt);
// Called with latest measured rtt value.
virtual void UpdateRtt(base::TimeDelta rtt) = 0;
// Called when an encoded frame is sent to the transport.
void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when);
virtual void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
base::TimeTicks when) = 0;
// Called when we receive an ACK for a frame.
void AckFrame(uint32 frame_id, base::TimeTicks when);
virtual void AckFrame(uint32 frame_id, base::TimeTicks when) = 0;
// Returns the bitrate we should use for the next frame.
uint32 GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay);
private:
struct FrameStats {
FrameStats();
// Time this frame was sent to the transport.
base::TimeTicks sent_time;
// Time this frame was acked.
base::TimeTicks ack_time;
// Size of encoded frame in bits.
size_t frame_size;
};
// Calculate how much "dead air" (idle time) there is between two frames.
static base::TimeDelta DeadTime(const FrameStats& a, const FrameStats& b);
// Get the FrameStats for a given |frame_id|.
// Note: Older FrameStats will be removed automatically.
FrameStats* GetFrameStats(uint32 frame_id);
// Calculate a safe bitrate. This is based on how much we've been
// sending in the past.
double CalculateSafeBitrate();
// For a given frame, calculate when it might be acked.
// (Or return the time it was acked, if it was.)
base::TimeTicks EstimatedAckTime(uint32 frame_id, double bitrate);
// Calculate when we start sending the data for a given frame.
// This is done by calculating when we were done sending the previous
// frame, but obviously can't be less than |sent_time| (if known).
base::TimeTicks EstimatedSendingTime(uint32 frame_id, double bitrate);
virtual uint32 GetBitrate(base::TimeTicks playout_time,
base::TimeDelta playout_delay) = 0;
};
base::TickClock* const clock_; // Not owned by this class.
const uint32 max_bitrate_configured_;
const uint32 min_bitrate_configured_;
std::deque<FrameStats> frame_stats_;
uint32 last_frame_stats_;
uint32 last_acked_frame_;
uint32 last_encoded_frame_;
base::TimeDelta rtt_;
size_t history_size_;
size_t acked_bits_in_history_;
base::TimeDelta dead_time_in_history_;
CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
size_t max_unacked_frames);
DISALLOW_COPY_AND_ASSIGN(CongestionControl);
};
CongestionControl* NewFixedCongestionControl(uint32 bitrate);
} // namespace cast
} // namespace media
......
......@@ -25,7 +25,7 @@ class CongestionControlTest : public ::testing::Test {
: task_runner_(new test::FakeSingleThreadTaskRunner(&testing_clock_)) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
congestion_control_.reset(new CongestionControl(
congestion_control_.reset(NewAdaptiveCongestionControl(
&testing_clock_, kMaxBitrateConfigured, kMinBitrateConfigured, 10));
}
......
......@@ -4,30 +4,40 @@
#include "media/cast/sender/frame_sender.h"
#include "base/debug/trace_event.h"
namespace media {
namespace cast {
namespace {
const int kMinSchedulingDelayMs = 1;
const int kNumAggressiveReportsSentAtStart = 100;
} // namespace
FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
bool is_audio,
CastTransportSender* const transport_sender,
base::TimeDelta rtcp_interval,
int rtp_timebase,
uint32 ssrc,
double max_frame_rate,
base::TimeDelta playout_delay)
base::TimeDelta playout_delay,
CongestionControl* congestion_control)
: cast_environment_(cast_environment),
transport_sender_(transport_sender),
ssrc_(ssrc),
rtt_available_(false),
rtcp_interval_(rtcp_interval),
max_frame_rate_(max_frame_rate),
frames_in_encoder_(0),
num_aggressive_rtcp_reports_sent_(0),
last_sent_frame_id_(0),
latest_acked_frame_id_(0),
duplicate_ack_counter_(0),
rtp_timebase_(rtp_timebase),
congestion_control_(congestion_control),
is_audio_(is_audio),
weak_factory_(this) {
DCHECK_GT(rtp_timebase_, 0);
SetTargetPlayoutDelay(playout_delay);
......@@ -158,5 +168,185 @@ RtpTimestamp FrameSender::GetRecordedRtpTimestamp(uint32 frame_id) const {
return frame_rtp_timestamps_[frame_id % arraysize(frame_rtp_timestamps_)];
}
void FrameSender::SendEncodedFrame(
int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK_GT(frames_in_encoder_, 0) << " is_audio: " << is_audio_;
frames_in_encoder_--;
const uint32 frame_id = encoded_frame->frame_id;
const bool is_first_frame_to_be_sent = last_send_time_.is_null();
last_send_time_ = cast_environment_->Clock()->NowTicks();
last_sent_frame_id_ = frame_id;
// If this is the first frame about to be sent, fake the value of
// |latest_acked_frame_id_| to indicate the receiver starts out all caught up.
// Also, schedule the periodic frame re-send checks.
if (is_first_frame_to_be_sent) {
latest_acked_frame_id_ = frame_id - 1;
ScheduleNextResendCheck();
}
VLOG_IF(1, encoded_frame->dependency == EncodedFrame::KEY)
<< "Send encoded key frame; frame_id: " << frame_id;
cast_environment_->Logging()->InsertEncodedFrameEvent(
last_send_time_, FRAME_ENCODED,
is_audio_ ? AUDIO_EVENT : VIDEO_EVENT,
encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
encoded_frame->dependency == EncodedFrame::KEY,
requested_bitrate_before_encode);
RecordLatestFrameTimestamps(frame_id,
encoded_frame->reference_time,
encoded_frame->rtp_timestamp);
if (!is_audio_) {
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT1(
"cast_perf_test", "VideoFrameEncoded",
TRACE_EVENT_SCOPE_THREAD,
"rtp_timestamp", encoded_frame->rtp_timestamp);
}
// At the start of the session, it's important to send reports before each
// frame so that the receiver can properly compute playout times. The reason
// more than one report is sent is because transmission is not guaranteed,
// only best effort, so send enough that one should almost certainly get
// through.
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
// SendRtcpReport() will schedule future reports to be made if this is the
// last "aggressive report."
++num_aggressive_rtcp_reports_sent_;
const bool is_last_aggressive_report =
(num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
SendRtcpReport(is_last_aggressive_report);
}
congestion_control_->SendFrameToTransport(
frame_id, encoded_frame->data.size() * 8, last_send_time_);
if (send_target_playout_delay_) {
encoded_frame->new_playout_delay_ms =
target_playout_delay_.InMilliseconds();
}
transport_sender_->InsertFrame(ssrc_, *encoded_frame);
}
void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
if (is_rtt_available()) {
rtt = rtt_;
avg_rtt = avg_rtt_;
min_rtt = min_rtt_;
max_rtt = max_rtt_;
congestion_control_->UpdateRtt(rtt);
// Don't use a RTT lower than our average.
rtt = std::max(rtt, avg_rtt);
// Having the RTT values implies the receiver sent back a receiver report
// based on it having received a report from here. Therefore, ensure this
// sender stops aggressively sending reports.
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
VLOG(1) << "No longer a need to send reports aggressively (sent "
<< num_aggressive_rtcp_reports_sent_ << ").";
num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
ScheduleNextRtcpReport();
}
} else {
// We have no measured value use default.
rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
}
if (last_send_time_.is_null())
return; // Cannot get an ACK without having first sent a frame.
if (cast_feedback.missing_frames_and_packets.empty()) {
OnAck(cast_feedback.ack_frame_id);
// We only count duplicate ACKs when we have sent newer frames.
if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
latest_acked_frame_id_ != last_sent_frame_id_) {
duplicate_ack_counter_++;
} else {
duplicate_ack_counter_ = 0;
}
// TODO(miu): The values "2" and "3" should be derived from configuration.
if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
VLOG(1) << "Received duplicate ACK for frame " << latest_acked_frame_id_;
ResendForKickstart();
}
} else {
// Only count duplicated ACKs if there is no NACK request in between.
// This is to avoid aggresive resend.
duplicate_ack_counter_ = 0;
}
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
congestion_control_->AckFrame(cast_feedback.ack_frame_id, now);
cast_environment_->Logging()->InsertFrameEvent(
now,
FRAME_ACK_RECEIVED,
is_audio_ ? AUDIO_EVENT : VIDEO_EVENT,
GetRecordedRtpTimestamp(cast_feedback.ack_frame_id),
cast_feedback.ack_frame_id);
const bool is_acked_out_of_order =
static_cast<int32>(cast_feedback.ack_frame_id -
latest_acked_frame_id_) < 0;
VLOG(2) << "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
<< " for frame " << cast_feedback.ack_frame_id;
if (!is_acked_out_of_order) {
// Cancel resends of acked frames.
std::vector<uint32> cancel_sending_frames;
while (latest_acked_frame_id_ != cast_feedback.ack_frame_id) {
latest_acked_frame_id_++;
cancel_sending_frames.push_back(latest_acked_frame_id_);
}
transport_sender_->CancelSendingFrames(ssrc_, cancel_sending_frames);
latest_acked_frame_id_ = cast_feedback.ack_frame_id;
}
}
bool FrameSender::ShouldDropNextFrame(base::TimeTicks capture_time) const {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
int frames_in_flight = 0;
base::TimeDelta duration_in_flight;
if (!last_send_time_.is_null()) {
frames_in_flight =
static_cast<int32>(last_sent_frame_id_ - latest_acked_frame_id_);
if (frames_in_flight > 0) {
const uint32 oldest_unacked_frame_id = latest_acked_frame_id_ + 1;
duration_in_flight =
capture_time - GetRecordedReferenceTime(oldest_unacked_frame_id);
}
}
frames_in_flight += frames_in_encoder_;
VLOG(2) << frames_in_flight
<< " frames in flight; last sent: " << last_sent_frame_id_
<< "; latest acked: " << latest_acked_frame_id_
<< "; frames in encoder: " << frames_in_encoder_
<< "; duration in flight: "
<< duration_in_flight.InMicroseconds() << " usec ("
<< (target_playout_delay_ > base::TimeDelta() ?
100 * duration_in_flight / target_playout_delay_ :
kint64max) << "%)";
return frames_in_flight >= max_unacked_frames_ ||
duration_in_flight >= target_playout_delay_;
}
} // namespace cast
} // namespace media
......@@ -15,6 +15,7 @@
#include "base/time/time.h"
#include "media/cast/cast_environment.h"
#include "media/cast/net/rtcp/rtcp.h"
#include "media/cast/sender/congestion_control.h"
namespace media {
namespace cast {
......@@ -22,12 +23,14 @@ namespace cast {
class FrameSender {
public:
FrameSender(scoped_refptr<CastEnvironment> cast_environment,
bool is_audio,
CastTransportSender* const transport_sender,
base::TimeDelta rtcp_interval,
int rtp_timebase,
uint32 ssrc,
double max_frame_rate,
base::TimeDelta playout_delay);
base::TimeDelta playout_delay,
CongestionControl* congestion_control);
virtual ~FrameSender();
// Calling this function is only valid if the receiver supports the
......@@ -38,6 +41,10 @@ class FrameSender {
return target_playout_delay_;
}
// Called by the encoder with the next EncodeFrame to send.
void SendEncodedFrame(int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> encoded_frame);
protected:
// Schedule and execute periodic sending of RTCP report.
void ScheduleNextRtcpReport();
......@@ -78,6 +85,15 @@ class FrameSender {
void ResendCheck();
void ResendForKickstart();
// Protected for testability.
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
// Returns true if there are too many frames in flight, or if the media
// duration of the frames in flight would be too high by sending the next
// frame. The latter metric is determined from the given |capture_time|
// for the next frame to be encoded and sent.
bool ShouldDropNextFrame(base::TimeTicks capture_time) const;
// Record or retrieve a recent history of each frame's timestamps.
// Warning: If a frame ID too far in the past is requested, the getters will
// silently succeed but return incorrect values. Be sure to respect
......@@ -88,6 +104,9 @@ class FrameSender {
base::TimeTicks GetRecordedReferenceTime(uint32 frame_id) const;
RtpTimestamp GetRecordedRtpTimestamp(uint32 frame_id) const;
// Called when we get an ACK for a frame.
virtual void OnAck(uint32 frame_id) = 0;
const base::TimeDelta rtcp_interval_;
// The total amount of time between a frame's capture/recording on the sender
......@@ -108,6 +127,9 @@ class FrameSender {
// new frames shall halt.
int max_unacked_frames_;
// The number of frames currently being processed in |video_encoder_|.
int frames_in_encoder_;
// Counts how many RTCP reports are being "aggressively" sent (i.e., one per
// frame) at the start of the session. Once a threshold is reached, RTCP
// reports are instead sent at the configured interval + random drift.
......@@ -137,10 +159,16 @@ class FrameSender {
// STATUS_VIDEO_INITIALIZED.
CastInitializationStatus cast_initialization_status_;
private:
// RTP timestamp increment representing one second.
const int rtp_timebase_;
// This object controls how we change the bitrate to make sure the
// buffer doesn't overflow.
scoped_ptr<CongestionControl> congestion_control_;
private:
const bool is_audio_;
// Ring buffers to keep track of recent frame timestamps (both in terms of
// local reference time and RTP media time). These should only be accessed
// through the Record/GetXXX() methods.
......
This diff is collapsed.
......@@ -57,36 +57,17 @@ class VideoSender : public FrameSender,
const base::TimeTicks& capture_time);
protected:
// Protected for testability.
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
virtual void OnAck(uint32 frame_id) OVERRIDE;
private:
// Returns true if there are too many frames in flight, or if the media
// duration of the frames in flight would be too high by sending the next
// frame. The latter metric is determined from the given |capture_time|
// for the next frame to be encoded and sent.
bool ShouldDropNextFrame(base::TimeTicks capture_time) const;
// Called by the |video_encoder_| with the next EncodeFrame to send.
void SendEncodedVideoFrame(int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> encoded_frame);
// If this value is non zero then a fixed value is used for bitrate.
// If external video encoder is used then bitrate will be fixed to
// (min_bitrate + max_bitrate) / 2.
const size_t fixed_bitrate_;
// Encodes media::VideoFrame images into EncodedFrames. Per configuration,
// this will point to either the internal software-based encoder or a proxy to
// a hardware-based encoder.
scoped_ptr<VideoEncoder> video_encoder_;
// The number of frames currently being processed in |video_encoder_|.
int frames_in_encoder_;
// When we get close to the max number of un-acked frames, we set lower
// the bitrate drastically to ensure that we catch up. Without this we
// risk getting stuck in a catch-up state forever.
CongestionControl congestion_control_;
// Remember what we set the bitrate to before, no need to set it again if
// we get the same value.
uint32 last_bitrate_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<VideoSender> weak_factory_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment