Commit 182520b8 authored by miu's avatar miu Committed by Commit bot

[Cast] Track audio queued in encoder; account for it in ShouldDropNextFrame().

This change is a prerequisite for transitioning the "max outstanding
frames" decision logic over to a time-based heuristic.  It adds tracking
of the number of audio samples enqueued in the encoder, from which the
needed backlog stats can be determined.

Also, this introduces an abstract method in FrameSender,
GetNumberOfFramesInEncoder(), since this is computed differently for
audio versus video.

BUG=404813

Review URL: https://codereview.chromium.org/545593002

Cr-Commit-Position: refs/heads/master@{#293987}
parent 8d5adf5b
......@@ -56,7 +56,8 @@ class AudioEncoder::ImplBase
cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
buffer_fill_end_(0),
frame_id_(0),
frame_rtp_timestamp_(0) {
frame_rtp_timestamp_(0),
samples_dropped_from_buffer_(0) {
// Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
......@@ -70,6 +71,10 @@ class AudioEncoder::ImplBase
return cast_initialization_status_;
}
int samples_per_frame() const {
return samples_per_frame_;
}
void EncodeAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
......@@ -90,6 +95,7 @@ class AudioEncoder::ImplBase
recorded_time - (frame_capture_time_ + buffer_fill_duration);
if (amount_ahead_by >
base::TimeDelta::FromMilliseconds(kUnderrunThresholdMillis)) {
samples_dropped_from_buffer_ += buffer_fill_end_;
buffer_fill_end_ = 0;
buffer_fill_duration = base::TimeDelta();
const int64 num_frames_missed = amount_ahead_by /
......@@ -129,7 +135,10 @@ class AudioEncoder::ImplBase
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(callback_, base::Passed(&audio_frame)));
base::Bind(callback_,
base::Passed(&audio_frame),
samples_dropped_from_buffer_));
samples_dropped_from_buffer_ = 0;
}
// Reset the internal buffer, frame ID, and timestamps for the next frame.
......@@ -182,6 +191,10 @@ class AudioEncoder::ImplBase
// the RTP timestamps.
base::TimeTicks frame_capture_time_;
// Set to non-zero to indicate the next output frame skipped over audio
// samples in order to recover from an input underrun.
int samples_dropped_from_buffer_;
DISALLOW_COPY_AND_ASSIGN(ImplBase);
};
......@@ -365,11 +378,20 @@ CastInitializationStatus AudioEncoder::InitializationResult() const {
return STATUS_UNSUPPORTED_AUDIO_CODEC;
}
int AudioEncoder::GetSamplesPerFrame() const {
DCHECK(insert_thread_checker_.CalledOnValidThread());
if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
NOTREACHED();
return std::numeric_limits<int>::max();
}
return impl_->samples_per_frame();
}
void AudioEncoder::InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK(insert_thread_checker_.CalledOnValidThread());
DCHECK(audio_bus.get());
if (!impl_.get()) {
if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
NOTREACHED();
return;
}
......
......@@ -20,7 +20,9 @@ namespace cast {
class AudioEncoder {
public:
typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
// Callback to deliver each EncodedFrame, plus the number of audio samples
// skipped since the last frame.
typedef base::Callback<void(scoped_ptr<EncodedFrame>, int)>
FrameEncodedCallback;
AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
......@@ -33,6 +35,8 @@ class AudioEncoder {
CastInitializationStatus InitializationResult() const;
int GetSamplesPerFrame() const;
void InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time);
......
......@@ -39,7 +39,8 @@ class TestEncodedAudioFrameReceiver {
upper_bound_ = upper_bound;
}
void FrameEncoded(scoped_ptr<EncodedFrame> encoded_frame) {
void FrameEncoded(scoped_ptr<EncodedFrame> encoded_frame,
int samples_skipped) {
EXPECT_EQ(encoded_frame->dependency, EncodedFrame::KEY);
EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
encoded_frame->frame_id);
......
......@@ -35,7 +35,7 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
kAudioFrameRate * 2.0, // We lie to increase max outstanding frames.
audio_config.target_playout_delay,
NewFixedCongestionControl(audio_config.bitrate)),
samples_sent_to_encoder_(0),
samples_in_encoder_(0),
weak_factory_(this) {
cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
VLOG(1) << "max_unacked_frames " << max_unacked_frames_;
......@@ -48,7 +48,7 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
audio_config.frequency,
audio_config.bitrate,
audio_config.codec,
base::Bind(&FrameSender::SendEncodedFrame,
base::Bind(&AudioSender::OnEncodedAudioFrame,
weak_factory_.GetWeakPtr(),
audio_config.bitrate)));
cast_initialization_status_ = audio_encoder_->InitializationResult();
......@@ -86,23 +86,39 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
}
DCHECK(audio_encoder_.get()) << "Invalid internal state";
// TODO(miu): An |audio_bus| that represents more duration than a single
// frame's duration can defeat our logic here, causing too much data to become
// enqueued. This will be addressed in a soon-upcoming change.
if (ShouldDropNextFrame(recorded_time)) {
VLOG(1) << "Dropping frame due to too many frames currently in-flight.";
return;
}
int64 old_frames_sent =
samples_sent_to_encoder_ * kAudioFrameRate / rtp_timebase_;
samples_sent_to_encoder_ += audio_bus->frames();
int64 new_frames_sent =
samples_sent_to_encoder_ * kAudioFrameRate / rtp_timebase_;
frames_in_encoder_ += new_frames_sent - old_frames_sent;
samples_in_encoder_ += audio_bus->frames();
audio_encoder_->InsertAudio(audio_bus.Pass(), recorded_time);
}
int AudioSender::GetNumberOfFramesInEncoder() const {
// Note: It's possible for a partial frame to be in the encoder, but returning
// the floor() is good enough for the "design limit" check in FrameSender.
return samples_in_encoder_ / audio_encoder_->GetSamplesPerFrame();
}
void AudioSender::OnAck(uint32 frame_id) {
}
void AudioSender::OnEncodedAudioFrame(
int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame,
int samples_skipped) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
samples_in_encoder_ -= audio_encoder_->GetSamplesPerFrame() + samples_skipped;
DCHECK_GE(samples_in_encoder_, 0);
SendEncodedFrame(encoder_bitrate, encoded_frame.Pass());
}
} // namespace cast
} // namespace media
......@@ -51,17 +51,20 @@ class AudioSender : public FrameSender,
const base::TimeTicks& recorded_time);
protected:
virtual int GetNumberOfFramesInEncoder() const OVERRIDE;
virtual void OnAck(uint32 frame_id) OVERRIDE;
private:
// Called by the |audio_encoder_| with the next EncodedFrame to send.
void SendEncodedAudioFrame(int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> audio_frame);
void OnEncodedAudioFrame(int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame,
int samples_skipped);
// Encodes AudioBuses into EncodedFrames.
scoped_ptr<AudioEncoder> audio_encoder_;
uint64 samples_sent_to_encoder_;
// The number of audio samples enqueued in |audio_encoder_|.
int samples_in_encoder_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<AudioSender> weak_factory_;
......
......@@ -29,7 +29,6 @@ FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
ssrc_(ssrc),
rtcp_interval_(rtcp_interval),
max_frame_rate_(max_frame_rate),
frames_in_encoder_(0),
num_aggressive_rtcp_reports_sent_(0),
last_sent_frame_id_(0),
latest_acked_frame_id_(0),
......@@ -163,15 +162,11 @@ RtpTimestamp FrameSender::GetRecordedRtpTimestamp(uint32 frame_id) const {
return frame_rtp_timestamps_[frame_id % arraysize(frame_rtp_timestamps_)];
}
void FrameSender::SendEncodedFrame(
int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK_GT(frames_in_encoder_, 0) << " is_audio: " << is_audio_;
frames_in_encoder_--;
const uint32 frame_id = encoded_frame->frame_id;
const bool is_first_frame_to_be_sent = last_send_time_.is_null();
......@@ -315,11 +310,11 @@ bool FrameSender::ShouldDropNextFrame(base::TimeTicks capture_time) const {
capture_time - GetRecordedReferenceTime(oldest_unacked_frame_id);
}
}
frames_in_flight += frames_in_encoder_;
frames_in_flight += GetNumberOfFramesInEncoder();
VLOG(2) << frames_in_flight
<< " frames in flight; last sent: " << last_sent_frame_id_
<< "; latest acked: " << latest_acked_frame_id_
<< "; frames in encoder: " << frames_in_encoder_
<< "; frames in encoder: " << GetNumberOfFramesInEncoder()
<< "; duration in flight: "
<< duration_in_flight.InMicroseconds() << " usec ("
<< (target_playout_delay_ > base::TimeDelta() ?
......
......@@ -45,6 +45,13 @@ class FrameSender {
void SendEncodedFrame(int requested_bitrate_before_encode,
scoped_ptr<EncodedFrame> encoded_frame);
protected:
// Returns the number of frames in the encoder's backlog.
virtual int GetNumberOfFramesInEncoder() const = 0;
// Called when we get an ACK for a frame.
virtual void OnAck(uint32 frame_id) = 0;
protected:
// Schedule and execute periodic sending of RTCP report.
void ScheduleNextRtcpReport();
......@@ -92,9 +99,6 @@ class FrameSender {
base::TimeTicks GetRecordedReferenceTime(uint32 frame_id) const;
RtpTimestamp GetRecordedRtpTimestamp(uint32 frame_id) const;
// Called when we get an ACK for a frame.
virtual void OnAck(uint32 frame_id) = 0;
const base::TimeDelta rtcp_interval_;
// The total amount of time between a frame's capture/recording on the sender
......@@ -115,9 +119,6 @@ class FrameSender {
// new frames shall halt.
int max_unacked_frames_;
// The number of frames currently being processed in |video_encoder_|.
int frames_in_encoder_;
// Counts how many RTCP reports are being "aggressively" sent (i.e., one per
// frame) at the start of the session. Once a threshold is reached, RTCP
// reports are instead sent at the configured interval + random drift.
......
......@@ -40,6 +40,7 @@ VideoSender::VideoSender(
video_config.target_playout_delay,
NewFixedCongestionControl(
(video_config.min_bitrate + video_config.max_bitrate) / 2)),
frames_in_encoder_(0),
last_bitrate_(0),
weak_factory_(this) {
cast_initialization_status_ = STATUS_VIDEO_UNINITIALIZED;
......@@ -126,7 +127,7 @@ void VideoSender::InsertRawVideoFrame(
if (video_encoder_->EncodeVideoFrame(
video_frame,
capture_time,
base::Bind(&FrameSender::SendEncodedFrame,
base::Bind(&VideoSender::OnEncodedVideoFrame,
weak_factory_.GetWeakPtr(),
bitrate))) {
frames_in_encoder_++;
......@@ -135,9 +136,24 @@ void VideoSender::InsertRawVideoFrame(
}
}
int VideoSender::GetNumberOfFramesInEncoder() const {
return frames_in_encoder_;
}
void VideoSender::OnAck(uint32 frame_id) {
video_encoder_->LatestFrameIdToReference(frame_id);
}
void VideoSender::OnEncodedVideoFrame(
int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frames_in_encoder_--;
DCHECK_GE(frames_in_encoder_, 0);
SendEncodedFrame(encoder_bitrate, encoded_frame.Pass());
}
} // namespace cast
} // namespace media
......@@ -57,14 +57,22 @@ class VideoSender : public FrameSender,
const base::TimeTicks& capture_time);
protected:
virtual int GetNumberOfFramesInEncoder() const OVERRIDE;
virtual void OnAck(uint32 frame_id) OVERRIDE;
private:
// Called by the |video_encoder_| with the next EncodedFrame to send.
void OnEncodedVideoFrame(int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame);
// Encodes media::VideoFrame images into EncodedFrames. Per configuration,
// this will point to either the internal software-based encoder or a proxy to
// a hardware-based encoder.
scoped_ptr<VideoEncoder> video_encoder_;
// The number of frames queued for encoding, but not yet sent.
int frames_in_encoder_;
// Remember what we set the bitrate to before, no need to set it again if
// we get the same value.
uint32 last_bitrate_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment