Commit dc22e80e authored by Mina Almasry's avatar Mina Almasry Committed by Commit Bot

Reland "[Chromecast] Add support for mixer compatible CMA backend"

This is a reland of ca8bb6e7

Original change's description:
> [Chromecast] Add support for mixer compatible CMA backend
>
> enable_video_with_mixed_audio gn flag has been added to turn on the new
> CMA backend.
>
> Created libcast_media_1.0_avsync, a target that implements this new CMA
> backend.
>
> AudioDecoderForMixer now owns an instance of a new interface, AvSync.
> AudioDecoderForMixer will notify AvSync of its current status, and
> AvSync will call into a new interface, VideoDecoderForMixer.
> VideoDecoderForMixer will give AvSync enough control over the video
> clock.
>
> BUG=internal 73746352
>
> Change-Id: Id7f991ec9606b6f04dbb68f819ad5bf1c3da9cce
> Reviewed-on: https://chromium-review.googlesource.com/947258
> Reviewed-by: Sergey Volk <servolk@chromium.org>
> Reviewed-by: Kenneth MacKay <kmackay@chromium.org>
> Commit-Queue: Mina Almasry <almasrymina@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#541293}

Bug: internal 73746352
Change-Id: I7bca4c61ec5ef0338116f57d235eb74fa88133b6
Reviewed-on: https://chromium-review.googlesource.com/952167Reviewed-by: default avatarSergey Volk <servolk@chromium.org>
Commit-Queue: Mina Almasry <almasrymina@chromium.org>
Cr-Commit-Position: refs/heads/master@{#541527}
parent c8718204
......@@ -75,6 +75,10 @@ declare_args() {
device_logs_provider_package = ""
device_logs_provider_class = ""
}
# Set to true to enable a CMA media backend that allows mixed audio to be
# output with sync'd video.
enable_video_with_mixed_audio = false
}
declare_args() {
......
......@@ -34,6 +34,7 @@ cast_source_set("test_support") {
deps = [
"//base",
"//chromecast/media/cma/backend",
"//chromecast/media/cma/backend:null_video",
"//chromecast/media/cma/base",
"//media:test_support",
"//testing/gmock",
......
......@@ -93,7 +93,7 @@ buildflag_header("audio_buildflags") {
# Implementation of video decoder that discards decoder buffers.
# Used on audio platforms for media streams containing video.
cast_source_set("null") {
cast_source_set("null_video") {
sources = [
"video_decoder_null.cc",
"video_decoder_null.h",
......@@ -129,6 +129,28 @@ cast_source_set("audio_helpers") {
]
}
cast_source_set("av_sync_dummy") {
sources = [
"av_sync_dummy.cc",
]
deps = [
"//base",
"//chromecast/public",
]
}
cast_source_set("audio_codec_support") {
sources = [
"media_codec_support_cast_audio.cc",
]
deps = [
"//base",
"//chromecast/public",
"//chromecast/public/media:media",
]
}
cast_source_set("for_mixer_audio") {
sources = [
"audio_decoder_for_mixer.cc",
......@@ -141,7 +163,6 @@ cast_source_set("for_mixer_audio") {
"direct_mixer_source.h",
"filter_group.cc",
"filter_group.h",
"media_codec_support_cast_audio.cc",
"media_pipeline_backend_for_mixer.cc",
"media_pipeline_backend_for_mixer.h",
"mixer_input.cc",
......@@ -163,7 +184,6 @@ cast_source_set("for_mixer_audio") {
deps = [
":audio_buildflags",
":audio_helpers",
":null",
":public",
"//base",
"//chromecast/base",
......@@ -197,7 +217,9 @@ test("cast_audio_backend_unittests") {
deps = [
":audio_helpers",
":av_sync_dummy",
":for_mixer_audio",
":null_video",
":public",
"//base",
"//base/test:run_all_unittests",
......
......@@ -18,7 +18,10 @@ cast_shared_library("libcast_media_1.0_audio") {
deps = [
"//base",
"//chromecast/base",
"//chromecast/media/cma/backend:audio_codec_support",
"//chromecast/media/cma/backend:av_sync_dummy",
"//chromecast/media/cma/backend:for_mixer_audio",
"//chromecast/media/cma/backend:null_video",
"//chromecast/public",
"//chromecast/public/media",
"//media",
......
......@@ -30,7 +30,7 @@ cast_source_set("cast_media_android") {
"//base",
"//chromecast:chromecast_buildflags",
"//chromecast/base",
"//chromecast/media/cma/backend:null",
"//chromecast/media/cma/backend:null_video",
"//chromecast/media/cma/base",
"//chromecast/media/cma/decoder",
"//chromecast/public/media",
......
......@@ -4,8 +4,6 @@
#include "chromecast/media/cma/backend/audio_decoder_for_mixer.h"
#include <time.h>
#include <algorithm>
#include <limits>
......@@ -16,6 +14,7 @@
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "chromecast/base/task_runner_impl.h"
#include "chromecast/media/cma/backend/av_sync.h"
#include "chromecast/media/cma/backend/media_pipeline_backend_for_mixer.h"
#include "chromecast/media/cma/base/decoder_buffer_adapter.h"
#include "chromecast/media/cma/base/decoder_buffer_base.h"
......@@ -26,14 +25,6 @@
#include "media/base/sample_format.h"
#include "media/filters/audio_renderer_algorithm.h"
#if defined(OS_LINUX)
#include "chromecast/media/cma/backend/audio_buildflags.h"
#endif // defined(OS_LINUX)
#if defined(OS_FUCHSIA)
#include <zircon/syscalls.h>
#endif // defined(OS_FUCHSIA)
#define TRACE_FUNCTION_ENTRY0() TRACE_EVENT0("cma", __FUNCTION__)
#define TRACE_FUNCTION_ENTRY1(arg1) \
......@@ -64,22 +55,6 @@ const int64_t kInvalidTimestamp = std::numeric_limits<int64_t>::min();
const int64_t kNoPendingOutput = -1;
#if defined(OS_LINUX)
int64_t MonotonicClockNow() {
timespec now = {0, 0};
#if BUILDFLAG(ALSA_MONOTONIC_RAW_TSTAMPS)
clock_gettime(CLOCK_MONOTONIC_RAW, &now);
#else
clock_gettime(CLOCK_MONOTONIC, &now);
#endif
return static_cast<int64_t>(now.tv_sec) * 1000000 + now.tv_nsec / 1000;
}
#else
int64_t MonotonicClockNow() {
return zx_clock_get(ZX_CLOCK_MONOTONIC) / 1000;
}
#endif
} // namespace
AudioDecoderForMixer::RateShifterInfo::RateShifterInfo(float playback_rate)
......@@ -104,6 +79,7 @@ AudioDecoderForMixer::AudioDecoderForMixer(
pending_output_frames_(kNoPendingOutput),
volume_multiplier_(1.0f),
pool_(new ::media::AudioBufferMemoryPool()),
av_sync_(AvSync::Create(backend->GetTaskRunner(), backend)),
weak_factory_(this) {
TRACE_FUNCTION_ENTRY0();
DCHECK(backend_);
......@@ -215,7 +191,7 @@ int64_t AudioDecoderForMixer::GetCurrentPts() const {
return kInvalidTimestamp;
DCHECK(!rate_shifter_info_.empty());
int64_t now = MonotonicClockNow();
int64_t now = backend_->MonotonicClockNow();
int64_t estimate =
last_push_pts_ +
std::min(static_cast<int64_t>((now - last_push_timestamp_) *
......@@ -437,7 +413,7 @@ void AudioDecoderForMixer::OnBufferDecoded(
DCHECK(!pushed_eos_);
pushed_eos_ = true;
}
mixer_input_->WritePcm(decoded);
WritePcmWrapper(decoded);
return;
}
......@@ -508,7 +484,7 @@ void AudioDecoderForMixer::PushRateShifted() {
scoped_refptr<DecoderBufferBase> eos_buffer(
new DecoderBufferAdapter(::media::DecoderBuffer::CreateEOSBuffer()));
mixer_input_->WritePcm(eos_buffer);
WritePcmWrapper(eos_buffer);
}
return;
}
......@@ -539,7 +515,7 @@ void AudioDecoderForMixer::PushRateShifted() {
rate_shifter_output_->channel(c), channel_data_size);
}
pending_output_frames_ = out_frames;
mixer_input_->WritePcm(output_buffer);
WritePcmWrapper(output_buffer);
if (rate_shifter_info_.size() > 1 &&
rate_info->output_frames == possible_output_frames) {
......@@ -611,5 +587,13 @@ void AudioDecoderForMixer::OnEos() {
delegate_->OnEndOfStream();
}
void AudioDecoderForMixer::WritePcmWrapper(
const scoped_refptr<DecoderBufferBase>& buffer) {
av_sync_->NotifyAudioBufferPushed(
buffer->end_of_stream() ? INT64_MAX : buffer->timestamp(),
GetRenderingDelay());
mixer_input_->WritePcm(buffer);
}
} // namespace media
} // namespace chromecast
......@@ -28,6 +28,7 @@ class AudioRendererAlgorithm;
namespace chromecast {
namespace media {
class AvSync;
class DecoderBufferBase;
class MediaPipelineBackendForMixer;
......@@ -85,6 +86,7 @@ class AudioDecoderForMixer : public MediaPipelineBackend::AudioDecoder,
bool BypassDecoder() const;
bool ShouldStartClock() const;
void UpdateStatistics(Statistics delta);
void WritePcmWrapper(const scoped_refptr<DecoderBufferBase>& buffer);
MediaPipelineBackendForMixer* const backend_;
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
......@@ -118,6 +120,8 @@ class AudioDecoderForMixer : public MediaPipelineBackend::AudioDecoder,
scoped_refptr<::media::AudioBufferMemoryPool> pool_;
std::unique_ptr<AvSync> av_sync_;
base::WeakPtrFactory<AudioDecoderForMixer> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderForMixer);
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_AV_SYNC_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_AV_SYNC_H_
#include <stdint.h>
#include "base/memory/scoped_refptr.h"
#include "chromecast/public/media/media_pipeline_backend.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace chromecast {
namespace media {
class MediaPipelineBackendForMixer;
class AvSync {
public:
static std::unique_ptr<AvSync> Create(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
MediaPipelineBackendForMixer* backend);
virtual ~AvSync() = default;
virtual void NotifyAudioBufferPushed(
int64_t buffer_timestamp,
MediaPipelineBackend::AudioDecoder::RenderingDelay delay) = 0;
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_AV_SYNC_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/av_sync.h"
#include "base/single_thread_task_runner.h"
namespace chromecast {
namespace media {
class AvSyncDummy : public AvSync {
public:
AvSyncDummy();
// AvSync implementation:
void NotifyAudioBufferPushed(
int64_t buffer_timestamp,
MediaPipelineBackend::AudioDecoder::RenderingDelay delay) override;
};
std::unique_ptr<AvSync> AvSync::Create(
const scoped_refptr<base::SingleThreadTaskRunner> task_runner,
MediaPipelineBackendForMixer* const backend) {
return std::make_unique<AvSyncDummy>();
}
AvSyncDummy::AvSyncDummy() {}
void AvSyncDummy::NotifyAudioBufferPushed(
int64_t buffer_timestamp,
MediaPipelineBackend::AudioDecoder::RenderingDelay delay) {}
} // namespace media
} // namespace chromecast
......@@ -16,7 +16,10 @@ cast_source_set("media_backend") {
deps = [
"//base",
"//chromecast/base",
"//chromecast/media/cma/backend:audio_codec_support",
"//chromecast/media/cma/backend:av_sync_dummy",
"//chromecast/media/cma/backend:for_mixer_audio",
"//chromecast/media/cma/backend:null_video",
"//chromecast/public",
"//chromecast/public/media",
"//media",
......
......@@ -4,11 +4,21 @@
#include "chromecast/media/cma/backend/media_pipeline_backend_for_mixer.h"
#include <time.h>
#include <limits>
#include "build/build_config.h"
#include "chromecast/base/task_runner_impl.h"
#include "chromecast/media/cma/backend/audio_decoder_for_mixer.h"
#include "chromecast/media/cma/backend/video_decoder_null.h"
#include "chromecast/media/cma/backend/video_decoder_for_mixer.h"
#if defined(OS_LINUX)
#include "chromecast/media/cma/backend/audio_buildflags.h"
#endif // defined(OS_LINUX)
#if defined(OS_FUCHSIA)
#include <zircon/syscalls.h>
#endif // defined(OS_FUCHSIA)
namespace chromecast {
namespace media {
......@@ -33,7 +43,8 @@ MediaPipelineBackendForMixer::CreateVideoDecoder() {
DCHECK_EQ(kStateUninitialized, state_);
if (video_decoder_)
return nullptr;
video_decoder_ = std::make_unique<VideoDecoderNull>();
video_decoder_ = VideoDecoderForMixer::Create(params_);
DCHECK(video_decoder_.get());
return video_decoder_.get();
}
......@@ -47,8 +58,13 @@ bool MediaPipelineBackendForMixer::Initialize() {
bool MediaPipelineBackendForMixer::Start(int64_t start_pts) {
DCHECK_EQ(kStateInitialized, state_);
if (audio_decoder_ && !audio_decoder_->Start(start_pts))
return false;
if (video_decoder_ && !video_decoder_->Start(start_pts, true))
return false;
state_ = kStatePlaying;
return true;
}
......@@ -58,6 +74,8 @@ void MediaPipelineBackendForMixer::Stop() {
<< "Invalid state " << state_;
if (audio_decoder_)
audio_decoder_->Stop();
if (video_decoder_)
video_decoder_->Stop();
state_ = kStateInitialized;
}
......@@ -66,6 +84,8 @@ bool MediaPipelineBackendForMixer::Pause() {
DCHECK_EQ(kStatePlaying, state_);
if (audio_decoder_ && !audio_decoder_->Pause())
return false;
// TODO(almasrymina): Implement pause/resume.
state_ = kStatePaused;
return true;
}
......@@ -74,6 +94,8 @@ bool MediaPipelineBackendForMixer::Resume() {
DCHECK_EQ(kStatePaused, state_);
if (audio_decoder_ && !audio_decoder_->Resume())
return false;
// TODO(almasrymina): Implement pause/resume.
state_ = kStatePlaying;
return true;
}
......@@ -109,5 +131,21 @@ MediaPipelineBackendForMixer::GetTaskRunner() const {
return static_cast<TaskRunnerImpl*>(params_.task_runner)->runner();
}
#if defined(OS_LINUX)
int64_t MediaPipelineBackendForMixer::MonotonicClockNow() const {
timespec now = {0, 0};
#if BUILDFLAG(ALSA_MONOTONIC_RAW_TSTAMPS)
clock_gettime(CLOCK_MONOTONIC_RAW, &now);
#else
clock_gettime(CLOCK_MONOTONIC, &now);
#endif
return static_cast<int64_t>(now.tv_sec) * 1000000 + now.tv_nsec / 1000;
}
#elif defined(OS_FUCHSIA)
int64_t MediaPipelineBackendForMixer::MonotonicClockNow() const {
return zx_clock_get(ZX_CLOCK_MONOTONIC) / 1000;
}
#endif
} // namespace media
} // namespace chromecast
......@@ -10,7 +10,6 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "chromecast/public/media/media_pipeline_backend.h"
#include "chromecast/public/media/media_pipeline_device_params.h"
#include "chromecast/public/volume_control.h"
......@@ -23,7 +22,7 @@ namespace chromecast {
namespace media {
class AudioDecoderForMixer;
class VideoDecoderNull;
class VideoDecoderForMixer;
// CMA Backend implementation for audio devices.
class MediaPipelineBackendForMixer : public MediaPipelineBackend {
......@@ -47,6 +46,11 @@ class MediaPipelineBackendForMixer : public MediaPipelineBackend {
std::string DeviceId() const;
AudioContentType ContentType() const;
const scoped_refptr<base::SingleThreadTaskRunner>& GetTaskRunner() const;
VideoDecoderForMixer* video_decoder() const { return video_decoder_.get(); }
AudioDecoderForMixer* audio_decoder() const { return audio_decoder_.get(); }
// Gets current time on the same clock as the rendering delay timestamp.
int64_t MonotonicClockNow() const;
private:
// State variable for DCHECKing caller correctness.
......@@ -59,7 +63,7 @@ class MediaPipelineBackendForMixer : public MediaPipelineBackend {
State state_;
const MediaPipelineDeviceParams params_;
std::unique_ptr<VideoDecoderNull> video_decoder_;
std::unique_ptr<VideoDecoderForMixer> video_decoder_;
std::unique_ptr<AudioDecoderForMixer> audio_decoder_;
DISALLOW_COPY_AND_ASSIGN(MediaPipelineBackendForMixer);
......
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/chromecast_build.gni")
import("//chromecast/chromecast.gni")
import("//media/media_options.gni")
cast_shared_library("libcast_media_1.0_avsync") {
sources = [
"av_sync_video.cc",
"cast_media_shlib.cc",
]
deps = [
"//chromecast/media/cma/backend:for_mixer_audio",
"//chromecast/media/cma/backend/alsa:cma_backend_support",
"//chromecast/public",
"//chromecast/public/media",
"//media",
]
libs = [ "videodecoderformixer" ]
}
cast_source_set("unit_tests") {
# TODO(almasrymina): b/73746352
}
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/media/cma/backend/video/av_sync_video.h"
#include <iomanip>
#include "base/bind.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "chromecast/media/cma/backend/media_pipeline_backend_for_mixer.h"
#include "chromecast/media/cma/backend/video_decoder_for_mixer.h"
namespace chromecast {
namespace media {
namespace {
// Threshold where the audio and video pts are far enough apart such that we
// want to do a small correction.
const int kSoftCorrectionThresholdUs = 16000;
// Threshold where the audio and video pts are far enough apart such that we
// want to do a hard correction.
const int kHardCorrectionThresholdUs = 200000;
// When doing a soft correction, we will do so by changing the rate of video
// playback. These constants define the multiplier in either direction.
const double kRateReduceMultiplier = 0.9;
const double kRateIncreaseMultiplier = 1.1;
// Length of time after which data is forgotten from our linear regression
// models.
const int kLinearRegressionDataLifetimeUs = 500000;
// Time interval between AV sync upkeeps.
constexpr base::TimeDelta kAvSyncUpkeepInterval =
base::TimeDelta::FromMilliseconds(10);
} // namespace
std::unique_ptr<AvSync> AvSync::Create(
const scoped_refptr<base::SingleThreadTaskRunner> task_runner,
MediaPipelineBackendForMixer* const backend) {
return std::make_unique<AvSyncVideo>(task_runner, backend);
}
AvSyncVideo::AvSyncVideo(
const scoped_refptr<base::SingleThreadTaskRunner> task_runner,
MediaPipelineBackendForMixer* const backend)
: audio_pts_(
new WeightedMovingLinearRegression(kLinearRegressionDataLifetimeUs)),
video_pts_(
new WeightedMovingLinearRegression(kLinearRegressionDataLifetimeUs)),
error_(
new WeightedMovingLinearRegression(kLinearRegressionDataLifetimeUs)),
task_runner_(task_runner),
backend_(backend) {
DCHECK(backend_);
}
void AvSyncVideo::NotifyAudioBufferPushed(
int64_t buffer_timestamp,
MediaPipelineBackend::AudioDecoder::RenderingDelay delay) {
if (delay.timestamp_microseconds == INT64_MIN ||
buffer_timestamp == INT64_MAX)
return;
int64_t absolute_ts = delay.delay_microseconds + delay.timestamp_microseconds;
audio_pts_->AddSample(delay.timestamp_microseconds,
buffer_timestamp - (delay.delay_microseconds), 1.0);
if (!setup_video_clock_ && backend_->video_decoder()) {
// TODO(almasrymina): If we don't have a valid delay at the start of
// playback, we should push silence to the mixer to get a valid delay
// before we start content playback.
LOG(INFO) << "Got valid delay. buffer_timestamp=" << buffer_timestamp
<< " delay.delay_microseconds=" << delay.delay_microseconds
<< " delay.timestamp_microseconds="
<< delay.timestamp_microseconds;
backend_->video_decoder()->SetCurrentPts(
((int64_t)buffer_timestamp) -
(absolute_ts - backend_->MonotonicClockNow()));
setup_video_clock_ = true;
timer_.Start(FROM_HERE, kAvSyncUpkeepInterval, this,
&AvSyncVideo::UpkeepAvSync);
}
}
// TODO(almasrymina): this code is the core of the av sync logic, and the
// current state is that it seems to work very well in local testing under very
// extreme conditions. Nevertheless, much of the constants here are arbitrary,
// and should be optimized:
// - It's arbitrary to move the rate of the video clock by 0.1 for corrections.
// This value should probably depend on the current error.
// - Hard correction value of 200ms is arbitrary.
// - Current requirements for number of samples in the linear regression is
// arbitrary.
void AvSyncVideo::UpkeepAvSync() {
DCHECK(setup_video_clock_);
if (!backend_->video_decoder()) {
VLOG(4) << "No video decoder available.";
return;
}
int64_t now = backend_->MonotonicClockNow(); // 'now'...
video_pts_->AddSample(now, backend_->video_decoder()->GetCurrentPts(), 1.0);
if (video_pts_->num_samples() < 5 || audio_pts_->num_samples() < 20) {
VLOG(4) << "Linear regression samples too little."
<< " video_pts_->num_samples()=" << video_pts_->num_samples()
<< " audio_pts_->num_samples()=" << audio_pts_->num_samples();
return;
}
int64_t current_vpts;
int64_t current_apts;
double error;
double vpts_slope;
double apts_slope;
video_pts_->EstimateY(now, &current_vpts, &error);
audio_pts_->EstimateY(now, &current_apts, &error);
video_pts_->EstimateSlope(&vpts_slope, &error);
audio_pts_->EstimateSlope(&apts_slope, &error);
error_->AddSample(now, current_apts - current_vpts, 1.0);
if (error_->num_samples() < 5) {
VLOG(4)
<< "Error linear regression samples too little. error_->num_samples()="
<< error_->num_samples();
return;
}
int64_t difference;
error_->EstimateY(now, &difference, &error);
VLOG(4) << "Pts_monitor."
<< " current_apts=" << current_apts / 1000
<< " current_vpts=" << std::setw(5) << current_vpts / 1000
<< " difference=" << std::setw(5) << difference / 1000
<< " wall_time=" << std::setw(5) << now / 1000
<< " apts_slope=" << std::setw(10) << apts_slope
<< " vpts_slope=" << std::setw(10) << vpts_slope;
// Seems the ideal value here depends on the frame rate.
if (abs(difference) > kSoftCorrectionThresholdUs) {
VLOG(2) << "Correction."
<< " current_apts=" << current_apts / 1000
<< " current_vpts=" << std::setw(5) << current_vpts / 1000
<< " difference=" << std::setw(5) << difference / 1000
<< " wall_time=" << std::setw(5) << now / 1000
<< " apts_slope=" << std::setw(10) << apts_slope
<< " vpts_slope=" << std::setw(10) << vpts_slope;
if (abs(difference) > kHardCorrectionThresholdUs) {
// Do a hard correction.
audio_pts_->EstimateY(backend_->MonotonicClockNow(), &current_apts,
&error);
backend_->video_decoder()->SetCurrentPts(current_apts);
backend_->video_decoder()->SetPlaybackRate(apts_slope);
current_video_playback_rate_ = apts_slope;
} else {
// Do a soft correction.
double factor = current_vpts > current_apts ? kRateReduceMultiplier
: kRateIncreaseMultiplier;
current_video_playback_rate_ *= factor;
backend_->video_decoder()->SetPlaybackRate(current_video_playback_rate_);
}
video_pts_.reset(
new WeightedMovingLinearRegression(kLinearRegressionDataLifetimeUs));
error_.reset(
new WeightedMovingLinearRegression(kLinearRegressionDataLifetimeUs));
} else {
// We're in sync. Reset rate.
// TODO(almasrymina): is this call correct? Probably not for extreme cases
// where the video clock drifts significantly relative to monotonic_raw.
// Instead of setting the playback rate to apts_slope, we should aim to
// find the video playback rate at which vtps_slope == apts_slope. These
// are slightly different values since the video playback rate is probably
// not phase locked at all with monotonic_raw.
backend_->video_decoder()->SetPlaybackRate(apts_slope);
current_video_playback_rate_ = apts_slope;
}
}
AvSyncVideo::~AvSyncVideo() = default;
} // namespace media
} // namespace chromecast
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_AV_SYNC_VIDEO_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_AV_SYNC_VIDEO_H_
#include <cstdint>
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "base/timer/timer.h"
#include "chromecast/base/statistics/weighted_moving_linear_regression.h"
#include "chromecast/media/cma/backend/av_sync.h"
namespace base {
class SingleThreadTaskRunner;
}
namespace chromecast {
namespace media {
class MediaPipelineBackendForMixer;
class AvSyncVideo : public AvSync {
public:
AvSyncVideo(const scoped_refptr<base::SingleThreadTaskRunner> task_runner,
MediaPipelineBackendForMixer* const backend);
~AvSyncVideo() override;
// AvSync implementation:
void NotifyAudioBufferPushed(
int64_t buffer_timestamp,
MediaPipelineBackend::AudioDecoder::RenderingDelay delay) override;
private:
void UpkeepAvSync();
base::RepeatingTimer timer_;
bool setup_video_clock_ = false;
// TODO(almasrymina): having a linear regression for the audio pts is
// dangerous, because glitches in the audio or intentional changes in the
// playback rate will propagate to the regression at a delay. Consider
// reducing the lifetime of data or firing an event to the av sync module
// that will reset the linear regression model.
std::unique_ptr<WeightedMovingLinearRegression> audio_pts_;
std::unique_ptr<WeightedMovingLinearRegression> video_pts_;
std::unique_ptr<WeightedMovingLinearRegression> error_;
double current_video_playback_rate_ = 1.0;
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
MediaPipelineBackendForMixer* const backend_;
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_AV_SYNC_VIDEO_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/public/cast_media_shlib.h"
#include "base/at_exit.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "chromecast/base/task_runner_impl.h"
#include "chromecast/public/graphics_types.h"
#include "chromecast/public/media/media_capabilities_shlib.h"
#include "chromecast/media/cma/backend/media_pipeline_backend_for_mixer.h"
namespace chromecast {
namespace media {
base::AtExitManager g_at_exit_manager;
std::unique_ptr<base::ThreadTaskRunnerHandle> g_thread_task_runner_handle;
MediaPipelineBackend* CastMediaShlib::CreateMediaPipelineBackend(
const MediaPipelineDeviceParams& params) {
// Set up the static reference in base::ThreadTaskRunnerHandle::Get
// for the media thread in this shared library. We can extract the
// SingleThreadTaskRunner passed in from cast_shell for this.
if (!base::ThreadTaskRunnerHandle::IsSet()) {
DCHECK(!g_thread_task_runner_handle);
const scoped_refptr<base::SingleThreadTaskRunner> task_runner =
static_cast<TaskRunnerImpl*>(params.task_runner)->runner();
DCHECK(task_runner->BelongsToCurrentThread());
g_thread_task_runner_handle.reset(
new base::ThreadTaskRunnerHandle(task_runner));
}
return new MediaPipelineBackendForMixer(params);
}
void CastMediaShlib::Finalize() {
g_thread_task_runner_handle.reset();
}
double CastMediaShlib::GetMediaClockRate() {
return 0.0;
}
double CastMediaShlib::MediaClockRatePrecision() {
return 0.0;
}
void CastMediaShlib::MediaClockRateRange(double* minimum_rate,
double* maximum_rate) {
*minimum_rate = 0.0;
*maximum_rate = 1.0;
}
bool CastMediaShlib::SetMediaClockRate(double new_rate) {
return false;
}
bool CastMediaShlib::SupportsMediaClockRateChange() {
return false;
}
bool MediaCapabilitiesShlib::IsSupportedAudioConfig(const AudioConfig& config) {
switch (config.codec) {
case kCodecPCM:
case kCodecPCM_S16BE:
case kCodecAAC:
case kCodecMP3:
case kCodecVorbis:
return true;
default:
break;
}
return false;
}
} // namespace media
} // namespace chromecast
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_DECODER_FOR_MIXER_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_DECODER_FOR_MIXER_H_
#include <memory>
#include "chromecast/public/media/media_pipeline_backend.h"
#include "chromecast/public/media/media_pipeline_device_params.h"
namespace chromecast {
namespace media {
// This class represents a video decoder that exposes additional functionality
// that allows a caller to control the rate and state of the video playback
// with enough granularity to be able to sync it to the audio.
//
// The default implementation of this is in VideoDecoderNull. On no-video
// platforms, that implementation is used.
//
// On video platforms that need to use the mixer, you may override this class
// and link in an implementation of VideoDecoderForMixer::Create.
class VideoDecoderForMixer : public MediaPipelineBackend::VideoDecoder {
public:
static std::unique_ptr<VideoDecoderForMixer> Create(
const MediaPipelineDeviceParams& params);
~VideoDecoderForMixer() override {}
// Initializes the VideoDecoderForMixer. Called after allocation and before
// Start is called. Gives the implementation a chance to initialize any
// resources.
virtual void Initialize() = 0;
// When called, playback is expected to start from |start_pts|.
//
// start_pts: the pts to start playing at.
// need_avsync: deprecated. Don't use or implement.
// TODO(almasrymina): remove deprecated.
virtual bool Start(int64_t start_pts, bool need_avsync) = 0;
// Stop playback.
virtual void Stop() = 0;
// Pause playback.
virtual bool Pause() = 0;
// Resume playback.
virtual bool Resume() = 0;
// Get the current video PTS. This will typically be the pts of the last
// video frame displayed.
virtual int64_t GetCurrentPts() const = 0;
// Set the playback rate. This is used to sync the audio to the video. This
// call will change the rate of play of video in the following manner:
//
// rate = 1.0 -> 1 second of video pts is played for each 1 second of
// wallclock time.
// rate = 1.5 -> 1.5 seconds of video pts is played for each 1 second of
// wallclock time.
// etc.
virtual bool SetPlaybackRate(float rate) = 0;
// Sets the current pts to the provided value. If |pts| is greater than the
// current pts, all video frames in between will be dropped. If |pts| is less
// than the current pts, all video frames in this pts range will be repeated.
// Implementation is encouraged to smooth out this transition, such that
// minimal jitter in the video is shown, but that is not necessary.
virtual bool SetCurrentPts(int64_t pts) = 0;
};
} // namespace media
} // namespace chromecast
#endif // CHROMECAST_MEDIA_CMA_BACKEND_VIDEO_DECODER_FOR_MIXER_H_
......@@ -4,6 +4,8 @@
#include "chromecast/media/cma/backend/video_decoder_null.h"
#include <memory>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
......@@ -13,6 +15,11 @@
namespace chromecast {
namespace media {
std::unique_ptr<VideoDecoderForMixer> VideoDecoderForMixer::Create(
const MediaPipelineDeviceParams& params) {
return std::make_unique<VideoDecoderNull>();
}
VideoDecoderNull::VideoDecoderNull()
: delegate_(nullptr), weak_factory_(this) {}
......@@ -45,5 +52,33 @@ void VideoDecoderNull::OnEndOfStream() {
delegate_->OnEndOfStream();
}
void VideoDecoderNull::Initialize() {}
bool VideoDecoderNull::Start(int64_t start_pts, bool need_avsync) {
return true;
}
void VideoDecoderNull::Stop() {}
bool VideoDecoderNull::Pause() {
return true;
}
bool VideoDecoderNull::Resume() {
return true;
}
int64_t VideoDecoderNull::GetCurrentPts() const {
return 0;
}
bool VideoDecoderNull::SetPlaybackRate(float rate) {
return true;
}
bool VideoDecoderNull::SetCurrentPts(int64_t pts) {
return true;
}
} // namespace media
} // namespace chromecast
......@@ -9,12 +9,12 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "chromecast/public/media/media_pipeline_backend.h"
#include "chromecast/media/cma/backend/video_decoder_for_mixer.h"
namespace chromecast {
namespace media {
class VideoDecoderNull : public MediaPipelineBackend::VideoDecoder {
class VideoDecoderNull : public VideoDecoderForMixer {
public:
VideoDecoderNull();
~VideoDecoderNull() override;
......@@ -26,6 +26,15 @@ class VideoDecoderNull : public MediaPipelineBackend::VideoDecoder {
void GetStatistics(Statistics* statistics) override;
bool SetConfig(const VideoConfig& config) override;
void Initialize() override;
bool Start(int64_t start_pts, bool need_avsync) override;
void Stop() override;
bool Pause() override;
bool Resume() override;
int64_t GetCurrentPts() const override;
bool SetPlaybackRate(float rate) override;
bool SetCurrentPts(int64_t pts) override;
private:
void OnEndOfStream();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment