Commit 26bcc610 authored by chcunningham's avatar chcunningham Committed by Commit Bot

(RELAND) media: Disable opus decode phase inversion for mono output.

Encoders may use "phase inversion" to improve stereo compression. But
this creates artifacts if the decoded stereo is later down-mixed to
mono. This CL sets a flag on the decoder to disable the phase inversion
decode step whenever the HW is detected as mono.

Bug:806219
TBR:dalecurtis@chromium.org

Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: Icff0387c7fc0fc0e0e31657176721cee5abf6fd9
Reviewed-on: https://chromium-review.googlesource.com/1038617Reviewed-by: default avatarDale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarChrome Cunningham <chcunningham@chromium.org>
Commit-Queue: Chrome Cunningham <chcunningham@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555257}
parent c0f34fbc
......@@ -558,7 +558,7 @@ deps = {
},
'src/third_party/ffmpeg':
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + '156e91a4f377b985b6455155a8d4ba0f7608a96a',
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + '90210b5e10d3917567a3025e4853704bfefd8384',
'src/third_party/flac':
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '7d0f5b3a173ffe98db08057d1f52b7787569e0a6',
......
......@@ -10,15 +10,7 @@
namespace media {
AudioDecoderConfig::AudioDecoderConfig()
: codec_(kUnknownAudioCodec),
sample_format_(kUnknownSampleFormat),
bytes_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
samples_per_second_(0),
bytes_per_frame_(0),
codec_delay_(0),
should_discard_decoder_delay_(true) {}
AudioDecoderConfig::AudioDecoderConfig() {}
AudioDecoderConfig::AudioDecoderConfig(
AudioCodec codec,
......
......@@ -102,17 +102,32 @@ class MEDIA_EXPORT AudioDecoderConfig {
should_discard_decoder_delay_ = false;
}
// Optionally set by renderer to provide hardware layout when playback
// starts. Intentionally not part of IsValid(). Layout is not updated for
// device changes - use with care!
void set_target_output_channel_layout(ChannelLayout output_layout) {
target_output_channel_layout_ = output_layout;
}
ChannelLayout target_output_channel_layout() const {
return target_output_channel_layout_;
}
private:
AudioCodec codec_;
SampleFormat sample_format_;
int bytes_per_channel_;
ChannelLayout channel_layout_;
int channels_;
int samples_per_second_;
int bytes_per_frame_;
AudioCodec codec_ = kUnknownAudioCodec;
SampleFormat sample_format_ = kUnknownSampleFormat;
int bytes_per_channel_ = 0;
int samples_per_second_ = 0;
int bytes_per_frame_ = 0;
std::vector<uint8_t> extra_data_;
EncryptionScheme encryption_scheme_;
// Layout and count of the *stream* being decoded.
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_UNSUPPORTED;
int channels_ = 0;
// Layout of the output hardware. Optionally set. See setter comments.
ChannelLayout target_output_channel_layout_ = CHANNEL_LAYOUT_NONE;
// |seek_preroll_| is the duration of the data that the decoder must decode
// before the decoded data is valid.
base::TimeDelta seek_preroll_;
......@@ -120,11 +135,11 @@ class MEDIA_EXPORT AudioDecoderConfig {
// |codec_delay_| is the number of frames the decoder should discard before
// returning decoded data. This value can include both decoder delay as well
// as padding added during encoding.
int codec_delay_;
int codec_delay_ = 0;
// Indicates if a decoder should implicitly discard decoder delay without it
// being explicitly marked in discard padding.
bool should_discard_decoder_delay_;
bool should_discard_decoder_delay_ = true;
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
......
......@@ -11,6 +11,7 @@
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "build/build_config.h"
#include "media/base/channel_layout.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
......@@ -59,7 +60,7 @@ class AudioDecoderSelectorTest : public ::testing::Test {
};
AudioDecoderSelectorTest()
: traits_(&media_log_),
: traits_(&media_log_, CHANNEL_LAYOUT_STEREO),
demuxer_stream_(
new StrictMock<MockDemuxerStream>(DemuxerStream::AUDIO)) {
// |cdm_context_| and |decryptor_| are conditionally created in
......
......@@ -90,7 +90,7 @@ void DecoderSelector<StreamType>::SelectDecoder(
waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
decoders_ = create_decoders_cb_.Run();
config_ = StreamTraits::GetDecoderConfig(input_stream_);
config_ = traits_->GetDecoderConfig(input_stream_);
InitializeDecoder();
}
......@@ -197,7 +197,7 @@ void DecoderSelector<StreamType>::DecryptingDemuxerStreamInitDone(
// try to see whether any decoder can decrypt-and-decode the encrypted stream
// directly. So in both cases, we'll initialize the decoders.
input_stream_ = decrypted_stream_.get();
config_ = StreamTraits::GetDecoderConfig(input_stream_);
config_ = traits_->GetDecoderConfig(input_stream_);
DCHECK(!config_.is_encrypted());
// If we're here we tried all the decoders w/ is_encrypted=true, try again
......
......@@ -42,10 +42,11 @@ const char* GetTraceString<DemuxerStream::AUDIO>() {
template <DemuxerStream::Type StreamType>
DecoderStream<StreamType>::DecoderStream(
std::unique_ptr<DecoderStreamTraits<StreamType>> traits,
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
CreateDecodersCB create_decoders_cb,
MediaLog* media_log)
: traits_(media_log),
: traits_(std::move(traits)),
task_runner_(task_runner),
create_decoders_cb_(std::move(create_decoders_cb)),
media_log_(media_log),
......@@ -111,7 +112,7 @@ void DecoderStream<StreamType>::Initialize(
statistics_cb_ = statistics_cb;
waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
traits_.OnStreamReset(stream_);
traits_->OnStreamReset(stream_);
state_ = STATE_INITIALIZING;
SelectDecoder();
......@@ -171,7 +172,7 @@ void DecoderStream<StreamType>::Reset(const base::Closure& closure) {
}
ClearOutputs();
traits_.OnStreamReset(stream_);
traits_->OnStreamReset(stream_);
// It's possible to have received a DECODE_ERROR and entered STATE_ERROR right
// before a Reset() is executed. If we are still waiting for a demuxer read,
......@@ -275,7 +276,7 @@ void DecoderStream<StreamType>::SelectDecoder() {
task_runner_, create_decoders_cb_, media_log_);
decoder_selector_->SelectDecoder(
&traits_, stream_, cdm_context, blacklisted_decoder,
traits_.get(), stream_, cdm_context, blacklisted_decoder,
base::BindRepeating(&DecoderStream<StreamType>::OnDecoderSelected,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&DecoderStream<StreamType>::OnDecodeOutputReady,
......@@ -335,7 +336,7 @@ void DecoderStream<StreamType>::OnDecoderSelected(
}
// Send logs and statistics updates including the decoder name.
traits_.ReportStatistics(statistics_cb_, 0);
traits_->ReportStatistics(statistics_cb_, 0);
media_log_->SetBooleanProperty(GetStreamTypeString() + "_dds",
!!decrypting_demuxer_stream_);
media_log_->SetStringProperty(GetStreamTypeString() + "_decoder",
......@@ -344,7 +345,7 @@ void DecoderStream<StreamType>::OnDecoderSelected(
MEDIA_LOG(INFO, media_log_)
<< "Selected " << decoder_->GetDisplayName() << " for "
<< GetStreamTypeString() << " decoding, config: "
<< StreamTraits::GetDecoderConfig(stream_).AsHumanReadableString();
<< traits_->GetDecoderConfig(stream_).AsHumanReadableString();
if (state_ == STATE_REINITIALIZING_DECODER) {
CompleteDecoderReinitialization(true);
......@@ -399,7 +400,7 @@ void DecoderStream<StreamType>::DecodeInternal(
DCHECK(!reset_cb_);
DCHECK(buffer);
traits_.OnDecode(*buffer);
traits_->OnDecode(*buffer);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
......@@ -492,7 +493,7 @@ void DecoderStream<StreamType>::OnDecodeDone(int buffer_size,
case DecodeStatus::OK:
// Any successful decode counts!
if (buffer_size > 0)
traits_.ReportStatistics(statistics_cb_, buffer_size);
traits_->ReportStatistics(statistics_cb_, buffer_size);
if (state_ == STATE_NORMAL) {
if (end_of_stream) {
......@@ -540,7 +541,7 @@ void DecoderStream<StreamType>::OnDecodeOutputReady(
// If the frame should be dropped, exit early and decode another frame.
decoder_produced_a_frame_ = true;
if (traits_.OnDecodeDone(output) == PostDecodeAction::DROP)
if (traits_->OnDecodeDone(output) == PostDecodeAction::DROP)
return;
if (prepare_cb_ && output->timestamp() + AverageDuration() >=
......@@ -682,8 +683,8 @@ void DecoderStream<StreamType>::OnBufferReady(
// lost frames if we were to fallback then).
pending_buffers_.clear();
const DecoderConfig& config = StreamTraits::GetDecoderConfig(stream_);
traits_.OnConfigChanged(config);
const DecoderConfig& config = traits_->GetDecoderConfig(stream_);
traits_->OnConfigChanged(config);
MEDIA_LOG(INFO, media_log_)
<< GetStreamTypeString()
......@@ -737,8 +738,8 @@ void DecoderStream<StreamType>::ReinitializeDecoder() {
state_ = STATE_REINITIALIZING_DECODER;
// Decoders should not need a new CDM during reinitialization.
traits_.InitializeDecoder(
decoder_.get(), StreamTraits::GetDecoderConfig(stream_),
traits_->InitializeDecoder(
decoder_.get(), traits_->GetDecoderConfig(stream_),
stream_->liveness() == DemuxerStream::LIVENESS_LIVE, cdm_context_,
base::BindRepeating(&DecoderStream<StreamType>::OnDecoderReinitialized,
weak_factory_.GetWeakPtr()),
......
......@@ -62,7 +62,8 @@ class MEDIA_EXPORT DecoderStream {
using ReadCB =
base::RepeatingCallback<void(Status, const scoped_refptr<Output>&)>;
DecoderStream(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
DecoderStream(std::unique_ptr<DecoderStreamTraits<StreamType>> traits,
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
CreateDecodersCB create_decoders_cb,
MediaLog* media_log);
virtual ~DecoderStream();
......@@ -213,7 +214,7 @@ class MEDIA_EXPORT DecoderStream {
void MaybePrepareAnotherOutput();
void OnPreparedOutputReady(const scoped_refptr<Output>& frame);
DecoderStreamTraits<StreamType> traits_;
std::unique_ptr<DecoderStreamTraits<StreamType>> traits_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
CreateDecodersCB create_decoders_cb_;
......
......@@ -35,17 +35,20 @@ scoped_refptr<DecoderStreamTraits<DemuxerStream::AUDIO>::OutputType>
return OutputType::CreateEOSBuffer();
}
// static
DecoderStreamTraits<DemuxerStream::AUDIO>::DecoderStreamTraits(
MediaLog* media_log,
ChannelLayout initial_hw_layout)
: media_log_(media_log), initial_hw_layout_(initial_hw_layout) {}
DecoderStreamTraits<DemuxerStream::AUDIO>::DecoderConfigType
DecoderStreamTraits<DemuxerStream::AUDIO>::GetDecoderConfig(
DemuxerStream* stream) {
return stream->audio_decoder_config();
auto config = stream->audio_decoder_config();
// Demuxer is not aware of hw layout, so we set it here.
config.set_target_output_channel_layout(initial_hw_layout_);
return config;
}
DecoderStreamTraits<DemuxerStream::AUDIO>::DecoderStreamTraits(
MediaLog* media_log)
: media_log_(media_log) {}
void DecoderStreamTraits<DemuxerStream::AUDIO>::ReportStatistics(
const StatisticsCB& statistics_cb,
int bytes_decoded) {
......@@ -114,18 +117,17 @@ DecoderStreamTraits<DemuxerStream::VIDEO>::CreateEOSOutput() {
return OutputType::CreateEOSFrame();
}
// static
DecoderStreamTraits<DemuxerStream::VIDEO>::DecoderStreamTraits(
MediaLog* media_log)
// Randomly selected number of samples to keep.
: keyframe_distance_average_(16) {}
DecoderStreamTraits<DemuxerStream::VIDEO>::DecoderConfigType
DecoderStreamTraits<DemuxerStream::VIDEO>::GetDecoderConfig(
DemuxerStream* stream) {
return stream->video_decoder_config();
}
DecoderStreamTraits<DemuxerStream::VIDEO>::DecoderStreamTraits(
MediaLog* media_log)
// Randomly selected number of samples to keep.
: keyframe_distance_average_(16) {}
void DecoderStreamTraits<DemuxerStream::VIDEO>::ReportStatistics(
const StatisticsCB& statistics_cb,
int bytes_decoded) {
......
......@@ -9,6 +9,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
#include "media/base/cdm_context.h"
#include "media/base/channel_layout.h"
#include "media/base/demuxer_stream.h"
#include "media/base/moving_average.h"
#include "media/base/pipeline_status.h"
......@@ -43,9 +44,8 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
static std::string ToString();
static bool NeedsBitstreamConversion(DecoderType* decoder);
static scoped_refptr<OutputType> CreateEOSOutput();
static DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
explicit DecoderStreamTraits(MediaLog* media_log);
DecoderStreamTraits(MediaLog* media_log, ChannelLayout initial_hw_layout);
void ReportStatistics(const StatisticsCB& statistics_cb, int bytes_decoded);
void InitializeDecoder(
......@@ -56,6 +56,7 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
const InitCB& init_cb,
const OutputCB& output_cb,
const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb);
DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
void OnDecode(const DecoderBuffer& buffer);
PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
void OnStreamReset(DemuxerStream* stream);
......@@ -67,6 +68,9 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
// drift.
std::unique_ptr<AudioTimestampValidator> audio_ts_validator_;
MediaLog* media_log_;
// HW layout at the time pipeline was started. Will not reflect possible
// device changes.
ChannelLayout initial_hw_layout_;
PipelineStatistics stats_;
};
......@@ -83,10 +87,10 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::VIDEO> {
static std::string ToString();
static bool NeedsBitstreamConversion(DecoderType* decoder);
static scoped_refptr<OutputType> CreateEOSOutput();
static DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
explicit DecoderStreamTraits(MediaLog* media_log);
DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
void ReportStatistics(const StatisticsCB& statistics_cb, int bytes_decoded);
void InitializeDecoder(
DecoderType* decoder,
......@@ -97,7 +101,6 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::VIDEO> {
const OutputCB& output_cb,
const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb);
void OnDecode(const DecoderBuffer& buffer);
PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
void OnStreamReset(DemuxerStream* stream);
void OnConfigChanged(const DecoderConfigType& config) {}
......
......@@ -302,17 +302,29 @@ bool FFmpegAudioDecoder::ConfigureDecoder(const AudioDecoderConfig& config) {
if (!config.should_discard_decoder_delay())
codec_context_->flags2 |= AV_CODEC_FLAG2_SKIP_MANUAL;
if (config.codec() == kCodecOpus)
AVDictionary* codec_options = NULL;
if (config.codec() == kCodecOpus) {
codec_context_->request_sample_fmt = AV_SAMPLE_FMT_FLT;
// Disable phase inversion to avoid artifacts in mono downmix. See
// http://crbug.com/806219
if (config.target_output_channel_layout() == CHANNEL_LAYOUT_MONO) {
int result = av_dict_set(&codec_options, "apply_phase_inv", "0", 0);
DCHECK_GE(result, 0);
}
}
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
if (!codec ||
avcodec_open2(codec_context_.get(), codec, &codec_options) < 0) {
DLOG(ERROR) << "Could not initialize audio decoder: "
<< codec_context_->codec_id;
ReleaseFFmpegResources();
state_ = kUninitialized;
return false;
}
// Verify avcodec_open2() used all given options.
DCHECK_EQ(0, av_dict_count(codec_options));
// Success!
av_sample_format_ = codec_context_->sample_fmt;
......
......@@ -83,6 +83,7 @@ class VideoFrameStreamTest
num_decoded_bytes_unreported_(0),
has_no_key_(false) {
video_frame_stream_.reset(new VideoFrameStream(
std::make_unique<VideoFrameStream::StreamTraits>(&media_log_),
message_loop_.task_runner(),
base::Bind(&VideoFrameStreamTest::CreateVideoDecodersForTest,
base::Unretained(this)),
......
......@@ -375,7 +375,14 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
current_decoder_config_ = stream->audio_decoder_config();
DCHECK(current_decoder_config_.IsValidConfig());
auto output_device_info = sink_->GetOutputDeviceInfo();
const AudioParameters& hw_params = output_device_info.output_params();
ChannelLayout hw_channel_layout =
hw_params.IsValid() ? hw_params.channel_layout() : CHANNEL_LAYOUT_NONE;
audio_buffer_stream_ = std::make_unique<AudioBufferStream>(
std::make_unique<AudioBufferStream::StreamTraits>(media_log_,
hw_channel_layout),
task_runner_, create_audio_decoders_cb_, media_log_);
audio_buffer_stream_->set_config_change_observer(base::Bind(
......@@ -385,8 +392,6 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
// failed.
init_cb_ = BindToCurrentLoop(init_cb);
auto output_device_info = sink_->GetOutputDeviceInfo();
const AudioParameters& hw_params = output_device_info.output_params();
AudioCodec codec = stream->audio_decoder_config().codec();
if (auto* mc = GetMediaClient())
is_passthrough_ = mc->IsSupportedBitstreamAudioCodec(codec);
......
......@@ -223,6 +223,7 @@ void VideoRendererImpl::Initialize(
DCHECK(!time_progressing_);
video_frame_stream_.reset(new VideoFrameStream(
std::make_unique<VideoFrameStream::StreamTraits>(media_log_),
task_runner_, create_video_decoders_cb_, media_log_));
video_frame_stream_->set_config_change_observer(base::Bind(
&VideoRendererImpl::OnConfigChange, weak_factory_.GetWeakPtr()));
......
......@@ -992,6 +992,26 @@ TEST_P(MSEPipelineIntegrationTest, BasicPlaybackOpusWebmTrimmingHashed) {
EXPECT_HASH_EQ(kOpusEndTrimmingHash_3, GetAudioHash());
}
TEST_F(PipelineIntegrationTest, BasicPlaybackOpusWebmHashed_MonoOutput) {
ASSERT_EQ(PIPELINE_OK,
Start("bunny-opus-intensity-stereo.webm", kHashed | kMonoOutput));
// File should have stereo output, which we know to be encoded using
// "phase intensity". Downmixing such files to MONO produces artifcats unless
// the decoder performs the downmix, which disables "phase inversion".
// See http://crbug.com/806219
AudioDecoderConfig config =
demuxer_->GetFirstStream(DemuxerStream::AUDIO)->audio_decoder_config();
ASSERT_EQ(config.channel_layout(), CHANNEL_LAYOUT_STEREO);
Play();
ASSERT_TRUE(WaitUntilOnEnded());
// Hash has very slight differences when phase inversion is enabled.
EXPECT_HASH_EQ("-2.36,-1.64,0.84,1.55,1.51,-0.90,", GetAudioHash());
}
TEST_F(PipelineIntegrationTest, BasicPlaybackOpusPrerollExceedsCodecDelay) {
ASSERT_EQ(PIPELINE_OK, Start("bear-opus.webm", kHashed));
......
......@@ -147,6 +147,13 @@ PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
base::RunLoop().RunUntilIdle();
}
void PipelineIntegrationTestBase::ParseTestTypeFlags(uint8_t flags) {
hashing_enabled_ = flags & kHashed;
clockless_playback_ = !(flags & kNoClockless);
webaudio_attached_ = flags & kWebAudio;
mono_output_ = flags & kMonoOutput;
}
// TODO(xhwang): Method definitions in this file needs to be reordered.
void PipelineIntegrationTestBase::OnSeeked(base::TimeDelta seek_time,
......@@ -230,9 +237,7 @@ PipelineStatus PipelineIntegrationTestBase::StartInternal(
uint8_t test_type,
CreateVideoDecodersCB prepend_video_decoders_cb,
CreateAudioDecodersCB prepend_audio_decoders_cb) {
hashing_enabled_ = test_type & kHashed;
clockless_playback_ = !(test_type & kNoClockless);
webaudio_attached_ = test_type & kWebAudio;
ParseTestTypeFlags(test_type);
EXPECT_CALL(*this, OnMetadata(_))
.Times(AtMost(1))
......@@ -464,18 +469,23 @@ std::unique_ptr<Renderer> PipelineIntegrationTestBase::CreateRenderer(
false, &media_log_, nullptr));
if (!clockless_playback_) {
DCHECK(!mono_output_) << " NullAudioSink doesn't specify output parameters";
audio_sink_ =
new NullAudioSink(scoped_task_environment_.GetMainThreadTaskRunner());
} else {
clockless_audio_sink_ = new ClocklessAudioSink(OutputDeviceInfo(
"", OUTPUT_DEVICE_STATUS_OK,
// Don't allow the audio renderer to resample buffers if hashing is
// enabled:
hashing_enabled_
? AudioParameters()
: AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_STEREO, 44100, 16, 512)));
if (webaudio_attached_) {
ChannelLayout output_layout =
mono_output_ ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
clockless_audio_sink_ = new ClocklessAudioSink(
OutputDeviceInfo("", OUTPUT_DEVICE_STATUS_OK,
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
output_layout, 44100, 16, 512)));
// Say "not optimized for hardware parameters" to disallow renderer
// resampling. Hashed tests need this avoid platform dependent floating
// point precision differences.
if (webaudio_attached_ || hashing_enabled_) {
clockless_audio_sink_->SetIsOptimizedForHardwareParametersForTesting(
false);
}
......@@ -579,8 +589,7 @@ PipelineStatus PipelineIntegrationTestBase::StartPipelineWithMediaSource(
MockMediaSource* source,
uint8_t test_type,
FakeEncryptedMedia* encrypted_media) {
hashing_enabled_ = test_type & kHashed;
clockless_playback_ = !(test_type & kNoClockless);
ParseTestTypeFlags(test_type);
if (!(test_type & kExpectDemuxerFailure))
EXPECT_CALL(*source, InitSegmentReceivedMock(_)).Times(AtLeast(1));
......
......@@ -88,8 +88,12 @@ class PipelineIntegrationTestBase : public Pipeline::Client {
kExpectDemuxerFailure = 4,
kUnreliableDuration = 8,
kWebAudio = 16,
kMonoOutput = 32,
};
// Setup method to intialize various state according to flags.
void ParseTestTypeFlags(uint8_t flags);
// Starts the pipeline with a file specified by |filename|, optionally with a
// CdmContext or a |test_type|, returning the final status code after it has
// started. |filename| points at a test file located under media/test/data/.
......@@ -163,6 +167,7 @@ class PipelineIntegrationTestBase : public Pipeline::Client {
bool hashing_enabled_;
bool clockless_playback_;
bool webaudio_attached_;
bool mono_output_;
std::unique_ptr<Demuxer> demuxer_;
std::unique_ptr<DataSource> data_source_;
std::unique_ptr<PipelineImpl> pipeline_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment