Commit a08b3446 authored by Peter Kasting's avatar Peter Kasting Committed by Commit Bot

Use TimeDelta::operator/() more, media/ edition.

Bug: 1104532
Change-Id: I39b9cd3b82b80bcdca324f4ee798c3c280fa6aa2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2354901
Commit-Queue: Peter Kasting <pkasting@chromium.org>
Commit-Queue: Ted Meyer <tmathmeyer@chromium.org>
Auto-Submit: Peter Kasting <pkasting@chromium.org>
Reviewed-by: default avatarTed Meyer <tmathmeyer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#797920}
parent 4e15f003
...@@ -32,10 +32,6 @@ constexpr base::TimeDelta kTimeUntilCrashReset = ...@@ -32,10 +32,6 @@ constexpr base::TimeDelta kTimeUntilCrashReset =
constexpr base::TimeDelta kTimeUntilScheduleReset = constexpr base::TimeDelta kTimeUntilScheduleReset =
base::TimeDelta::FromMinutes(1); base::TimeDelta::FromMinutes(1);
// Decay rate of server crashes, corresponding to a tolerable 'normal' crash
// rate. This means that we will decrement our crash rate by ~1 crash/minute.
const uint32_t kCrashDecayPeriodInMs = 60000;
// Rate at which client creations will be exponentially throttled based on the // Rate at which client creations will be exponentially throttled based on the
// number of media server crashes. // number of media server crashes.
// NOTE: Since our exponential delay formula is 2^(server crashes), 0 server // NOTE: Since our exponential delay formula is 2^(server crashes), 0 server
...@@ -181,8 +177,8 @@ void MediaServiceThrottler::UpdateServerCrashes() { ...@@ -181,8 +177,8 @@ void MediaServiceThrottler::UpdateServerCrashes() {
current_crashes_ = 0.0; current_crashes_ = 0.0;
} else { } else {
// Decay at the rate of 1 crash/minute otherwise. // Decay at the rate of 1 crash/minute otherwise.
double decay = (now - last_current_crash_update_time_).InMillisecondsF() / const double decay = (now - last_current_crash_update_time_) /
kCrashDecayPeriodInMs; base::TimeDelta::FromMinutes(1);
current_crashes_ = std::max(0.0, current_crashes_ - decay); current_crashes_ = std::max(0.0, current_crashes_ - decay);
} }
......
...@@ -48,8 +48,7 @@ class ClockSmoother { ...@@ -48,8 +48,7 @@ class ClockSmoother {
} }
// 0.01 means 1% faster than regular clock. // 0.01 means 1% faster than regular clock.
// -0.02 means 2% slower than regular clock. // -0.02 means 2% slower than regular clock.
double fraction_off = inaccuracy_sum_.InSecondsF() / double fraction_off = inaccuracy_sum_ / inaccuracy_delta_;
inaccuracy_delta_.InSecondsF();
double delta_seconds = delta.InSecondsF(); double delta_seconds = delta.InSecondsF();
delta_seconds += delta_seconds * fraction_off; delta_seconds += delta_seconds * fraction_off;
...@@ -66,10 +65,7 @@ class ClockSmoother { ...@@ -66,10 +65,7 @@ class ClockSmoother {
// 1.01 means 1% faster than regular clock. // 1.01 means 1% faster than regular clock.
// -0.98 means 2% slower than regular clock. // -0.98 means 2% slower than regular clock.
double Rate() const { double Rate() const { return 1.0 + inaccuracy_sum_ / inaccuracy_delta_; }
return 1.0 + inaccuracy_sum_.InSecondsF() /
inaccuracy_delta_.InSecondsF();
}
private: private:
base::TimeDelta clock_accuracy_; base::TimeDelta clock_accuracy_;
...@@ -214,13 +210,13 @@ void AudioShifter::Pull(AudioBus* output, ...@@ -214,13 +210,13 @@ void AudioShifter::Pull(AudioBus* output,
running_ = true; running_ = true;
double steady_ratio = output_clock_smoother_->Rate() / double steady_ratio = output_clock_smoother_->Rate() /
input_clock_smoother_->Rate(); input_clock_smoother_->Rate();
double time_difference = (playout_time - stream_time).InSecondsF(); const base::TimeDelta time_difference = playout_time - stream_time;
double adjustment_time = adjustment_time_.InSecondsF();
// This is the ratio we would need to get perfect sync after // This is the ratio we would need to get perfect sync after
// |adjustment_time| has passed. // |adjustment_time_| has passed.
double slow_ratio = steady_ratio + time_difference / adjustment_time; double slow_ratio = steady_ratio + time_difference / adjustment_time_;
slow_ratio = base::ClampToRange(slow_ratio, 0.9, 1.1); slow_ratio = base::ClampToRange(slow_ratio, 0.9, 1.1);
adjustment_time = output->frames() / static_cast<double>(rate_); const base::TimeDelta adjustment_time = base::TimeDelta::FromSecondsD(
output->frames() / static_cast<double>(rate_));
// This is ratio we we'd need get perfect sync at the end of the // This is ratio we we'd need get perfect sync at the end of the
// current output audiobus. // current output audiobus.
double fast_ratio = steady_ratio + time_difference / adjustment_time; double fast_ratio = steady_ratio + time_difference / adjustment_time;
......
...@@ -160,7 +160,7 @@ bool BufferedDataSourceHostImpl::CanPlayThrough( ...@@ -160,7 +160,7 @@ bool BufferedDataSourceHostImpl::CanPlayThrough(
} }
if (current_position > media_duration) if (current_position > media_duration)
return true; return true;
double fraction = current_position.InSecondsF() / media_duration.InSecondsF(); double fraction = current_position / media_duration;
int64_t byte_pos = total_bytes_ * fraction; int64_t byte_pos = total_bytes_ * fraction;
if (byte_pos < 0) if (byte_pos < 0)
byte_pos = 0; byte_pos = 0;
......
...@@ -407,8 +407,7 @@ class AnimatedContentSamplerParameterizedTest ...@@ -407,8 +407,7 @@ class AnimatedContentSamplerParameterizedTest
return; return;
} }
const double expected_sampling_ratio = const double expected_sampling_ratio =
GetParam().content_period.InSecondsF() / GetParam().content_period / ComputeExpectedSamplingPeriod();
ComputeExpectedSamplingPeriod().InSecondsF();
const int total_frames = count_dropped_frames_ + count_sampled_frames_; const int total_frames = count_dropped_frames_ + count_sampled_frames_;
EXPECT_NEAR(total_frames * expected_sampling_ratio, count_sampled_frames_, EXPECT_NEAR(total_frames * expected_sampling_ratio, count_sampled_frames_,
1.5); 1.5);
......
...@@ -61,8 +61,7 @@ double FractionFromExpectedFrameRate(base::TimeDelta delta, int frame_rate) { ...@@ -61,8 +61,7 @@ double FractionFromExpectedFrameRate(base::TimeDelta delta, int frame_rate) {
DCHECK_GT(frame_rate, 0); DCHECK_GT(frame_rate, 0);
const base::TimeDelta expected_delta = const base::TimeDelta expected_delta =
base::TimeDelta::FromSeconds(1) / frame_rate; base::TimeDelta::FromSeconds(1) / frame_rate;
return (delta - expected_delta).InMillisecondsF() / return (delta - expected_delta) / expected_delta;
expected_delta.InMillisecondsF();
} }
// Returns the next-higher TimeTicks value. // Returns the next-higher TimeTicks value.
......
...@@ -43,11 +43,9 @@ void ClockDriftSmoother::Update(base::TimeTicks now, ...@@ -43,11 +43,9 @@ void ClockDriftSmoother::Update(base::TimeTicks now,
// |now| is not monotonically non-decreasing. // |now| is not monotonically non-decreasing.
NOTREACHED(); NOTREACHED();
} else { } else {
const double elapsed_us = const base::TimeDelta elapsed = now - last_update_time_;
static_cast<double>((now - last_update_time_).InMicroseconds());
last_update_time_ = now; last_update_time_ = now;
const double weight = const double weight = elapsed / (elapsed + time_constant_);
elapsed_us / (elapsed_us + time_constant_.InMicroseconds());
estimate_us_ = weight * measured_offset.InMicroseconds() + estimate_us_ = weight * measured_offset.InMicroseconds() +
(1.0 - weight) * estimate_us_; (1.0 - weight) * estimate_us_;
} }
......
...@@ -383,16 +383,14 @@ void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const { ...@@ -383,16 +383,14 @@ void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const {
if (capture_latency_datapoints_ > 0) { if (capture_latency_datapoints_ > 0) {
double avg_capture_latency_ms = double avg_capture_latency_ms =
total_capture_latency_.InMillisecondsF() / total_capture_latency_.InMillisecondsF() / capture_latency_datapoints_;
capture_latency_datapoints_;
stats_map->insert( stats_map->insert(
std::make_pair(AVG_CAPTURE_LATENCY_MS, avg_capture_latency_ms)); std::make_pair(AVG_CAPTURE_LATENCY_MS, avg_capture_latency_ms));
} }
if (encode_time_datapoints_ > 0) { if (encode_time_datapoints_ > 0) {
double avg_encode_time_ms = double avg_encode_time_ms =
total_encode_time_.InMillisecondsF() / total_encode_time_.InMillisecondsF() / encode_time_datapoints_;
encode_time_datapoints_;
stats_map->insert( stats_map->insert(
std::make_pair(AVG_ENCODE_TIME_MS, avg_encode_time_ms)); std::make_pair(AVG_ENCODE_TIME_MS, avg_encode_time_ms));
} }
...@@ -407,16 +405,14 @@ void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const { ...@@ -407,16 +405,14 @@ void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const {
if (network_latency_datapoints_ > 0) { if (network_latency_datapoints_ > 0) {
double avg_network_latency_ms = double avg_network_latency_ms =
total_network_latency_.InMillisecondsF() / total_network_latency_.InMillisecondsF() / network_latency_datapoints_;
network_latency_datapoints_;
stats_map->insert( stats_map->insert(
std::make_pair(AVG_NETWORK_LATENCY_MS, avg_network_latency_ms)); std::make_pair(AVG_NETWORK_LATENCY_MS, avg_network_latency_ms));
} }
if (packet_latency_datapoints_ > 0) { if (packet_latency_datapoints_ > 0) {
double avg_packet_latency_ms = double avg_packet_latency_ms =
total_packet_latency_.InMillisecondsF() / total_packet_latency_.InMillisecondsF() / packet_latency_datapoints_;
packet_latency_datapoints_;
stats_map->insert( stats_map->insert(
std::make_pair(AVG_PACKET_LATENCY_MS, avg_packet_latency_ms)); std::make_pair(AVG_PACKET_LATENCY_MS, avg_packet_latency_ms));
} }
......
...@@ -156,8 +156,7 @@ class AudioEncoder::ImplBase ...@@ -156,8 +156,7 @@ class AudioEncoder::ImplBase
// Compute encoder utilization as the real-world time elapsed divided // Compute encoder utilization as the real-world time elapsed divided
// by the signal duration. // by the signal duration.
audio_frame->encoder_utilization = audio_frame->encoder_utilization =
(base::TimeTicks::Now() - start_time).InSecondsF() / (base::TimeTicks::Now() - start_time) / frame_duration_;
frame_duration_.InSecondsF();
TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(), TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(),
"encoder_utilization", "encoder_utilization",
......
...@@ -410,8 +410,7 @@ int AdaptiveCongestionControl::GetBitrate(base::TimeTicks playout_time, ...@@ -410,8 +410,7 @@ int AdaptiveCongestionControl::GetBitrate(base::TimeTicks playout_time,
playout_time - playout_time -
EstimatedSendingTime(last_enqueued_frame_ + 1, safe_bitrate); EstimatedSendingTime(last_enqueued_frame_ + 1, safe_bitrate);
double empty_buffer_fraction = double empty_buffer_fraction = time_to_catch_up / playout_delay;
time_to_catch_up.InSecondsF() / playout_delay.InSecondsF();
empty_buffer_fraction = std::min(empty_buffer_fraction, 1.0); empty_buffer_fraction = std::min(empty_buffer_fraction, 1.0);
empty_buffer_fraction = std::max(empty_buffer_fraction, 0.0); empty_buffer_fraction = std::max(empty_buffer_fraction, 0.0);
......
...@@ -288,7 +288,7 @@ void Vp8Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame, ...@@ -288,7 +288,7 @@ void Vp8Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
// frame duration. // frame duration.
const base::TimeDelta processing_time = base::TimeTicks::Now() - start_time; const base::TimeDelta processing_time = base::TimeTicks::Now() - start_time;
encoded_frame->encoder_utilization = encoded_frame->encoder_utilization =
processing_time.InSecondsF() / predicted_frame_duration.InSecondsF(); processing_time / predicted_frame_duration;
// Compute lossy utilization. The VP8 encoder took an estimated guess at what // Compute lossy utilization. The VP8 encoder took an estimated guess at what
// quantizer value would produce an encoded frame size as close to the target // quantizer value would produce an encoded frame size as close to the target
......
...@@ -419,7 +419,7 @@ base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) { ...@@ -419,7 +419,7 @@ base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
} }
base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) { base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
return base::TimeDelta::FromSecondsD(timestamp.InSecondsF() / playback_rate_); return timestamp / playback_rate_;
} }
base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) { base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
......
...@@ -249,8 +249,8 @@ bool AudioFileReader::OnNewFrame( ...@@ -249,8 +249,8 @@ bool AudioFileReader::OnNewFrame(
frames_read / static_cast<double>(sample_rate_)); frames_read / static_cast<double>(sample_rate_));
if (pkt_duration < frame_duration && pkt_duration > base::TimeDelta()) { if (pkt_duration < frame_duration && pkt_duration > base::TimeDelta()) {
const int new_frames_read = frames_read * (pkt_duration.InSecondsF() / const int new_frames_read =
frame_duration.InSecondsF()); base::ClampFloor(frames_read * (pkt_duration / frame_duration));
DVLOG(2) << "Shrinking AAC frame from " << frames_read << " to " DVLOG(2) << "Shrinking AAC frame from " << frames_read << " to "
<< new_frames_read << " based on packet duration."; << new_frames_read << " based on packet duration.";
frames_read = new_frames_read; frames_read = new_frames_read;
......
...@@ -229,8 +229,7 @@ bool VideoCadenceEstimator::UpdateBresenhamCadenceEstimate( ...@@ -229,8 +229,7 @@ bool VideoCadenceEstimator::UpdateBresenhamCadenceEstimate(
} }
double current_cadence = bm_.perfect_cadence_.value_or(0.0); double current_cadence = bm_.perfect_cadence_.value_or(0.0);
double new_cadence = double new_cadence = frame_duration / render_interval;
frame_duration.InMicrosecondsF() / render_interval.InMicrosecondsF();
DCHECK(new_cadence >= 0.0); DCHECK(new_cadence >= 0.0);
double cadence_relative_diff = std::abs(current_cadence - new_cadence) / double cadence_relative_diff = std::abs(current_cadence - new_cadence) /
...@@ -275,8 +274,7 @@ VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence( ...@@ -275,8 +274,7 @@ VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence(
base::TimeDelta max_acceptable_drift, base::TimeDelta max_acceptable_drift,
base::TimeDelta* time_until_max_drift) const { base::TimeDelta* time_until_max_drift) const {
// The perfect cadence is the number of render intervals per frame. // The perfect cadence is the number of render intervals per frame.
const double perfect_cadence = const double perfect_cadence = frame_duration / render_interval;
frame_duration.InSecondsF() / render_interval.InSecondsF();
// This case is very simple, just return a single frame cadence, because it // This case is very simple, just return a single frame cadence, because it
// is impossible for us to accumulate drift as large as max_acceptable_drift // is impossible for us to accumulate drift as large as max_acceptable_drift
...@@ -295,8 +293,8 @@ VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence( ...@@ -295,8 +293,8 @@ VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence(
// We want to construct a cadence pattern to approximate the perfect cadence // We want to construct a cadence pattern to approximate the perfect cadence
// while ensuring error doesn't accumulate too quickly. // while ensuring error doesn't accumulate too quickly.
const double drift_ratio = max_acceptable_drift.InSecondsF() / const double drift_ratio =
minimum_time_until_max_drift_.InSecondsF(); max_acceptable_drift / minimum_time_until_max_drift_;
const double minimum_acceptable_cadence = const double minimum_acceptable_cadence =
perfect_cadence / (1.0 + drift_ratio); perfect_cadence / (1.0 + drift_ratio);
const double maximum_acceptable_cadence = const double maximum_acceptable_cadence =
......
...@@ -518,8 +518,7 @@ void VideoRendererImpl::UpdateLatencyHintBufferingCaps_Locked( ...@@ -518,8 +518,7 @@ void VideoRendererImpl::UpdateLatencyHintBufferingCaps_Locked(
return; return;
int latency_hint_frames = int latency_hint_frames =
base::ClampRound(latency_hint_->InMicrosecondsF() / base::ClampRound(*latency_hint_ / average_frame_duration);
average_frame_duration.InMicrosecondsF());
std::string clamp_string; std::string clamp_string;
if (latency_hint_frames > kAbsoluteMaxFrames) { if (latency_hint_frames > kAbsoluteMaxFrames) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment