Guard against midstream audio configuration changes.

In WebAudio this can lead to oob reads.  In HTML5 this will cause
garbled audio or oob reads depending on the sample format.  The
guards in place just ensure channel count nor sample format change
between demux and decode.

Video already has similar guards in place via the checks done in
VideoFrame::IsValidConfig during buffer allocation.

BUG=166565
TEST=unit tests pass under ASAN.


Review URL: https://chromiumcodereview.appspot.com/12224114

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@182027 0039d316-1c4b-4281-b951-d872f2087c98
parent 7688968a
...@@ -15,21 +15,16 @@ namespace media { ...@@ -15,21 +15,16 @@ namespace media {
AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol) AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol)
: codec_context_(NULL), : codec_context_(NULL),
stream_index_(0), stream_index_(0),
protocol_(protocol) { protocol_(protocol),
channels_(0),
sample_rate_(0),
av_sample_format_(0) {
} }
AudioFileReader::~AudioFileReader() { AudioFileReader::~AudioFileReader() {
Close(); Close();
} }
int AudioFileReader::channels() const {
return codec_context_->channels;
}
int AudioFileReader::sample_rate() const {
return codec_context_->sample_rate;
}
base::TimeDelta AudioFileReader::duration() const { base::TimeDelta AudioFileReader::duration() const {
const AVRational av_time_base = {1, AV_TIME_BASE}; const AVRational av_time_base = {1, AV_TIME_BASE};
...@@ -110,6 +105,11 @@ bool AudioFileReader::Open() { ...@@ -110,6 +105,11 @@ bool AudioFileReader::Open() {
return false; return false;
} }
// Store initial values to guard against midstream configuration changes.
channels_ = codec_context_->channels;
sample_rate_ = codec_context_->sample_rate;
av_sample_format_ = codec_context_->sample_fmt;
return true; return true;
} }
...@@ -179,6 +179,22 @@ int AudioFileReader::Read(AudioBus* audio_bus) { ...@@ -179,6 +179,22 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
break; break;
} }
if (av_frame->sample_rate != sample_rate_ ||
av_frame->channels != channels_ ||
av_frame->format != av_sample_format_) {
DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame->sample_rate << " vs "
<< sample_rate_
<< ", Channels: " << av_frame->channels << " vs "
<< channels_
<< ", Sample Format: " << av_frame->format << " vs "
<< av_sample_format_;
// This is an unrecoverable error, so bail out.
continue_decoding = false;
break;
}
// Truncate, if necessary, if the destination isn't big enough. // Truncate, if necessary, if the destination isn't big enough.
if (current_frame + frames_read > audio_bus->frames()) if (current_frame + frames_read > audio_bus->frames())
frames_read = audio_bus->frames() - current_frame; frames_read = audio_bus->frames() - current_frame;
......
...@@ -43,8 +43,8 @@ class MEDIA_EXPORT AudioFileReader { ...@@ -43,8 +43,8 @@ class MEDIA_EXPORT AudioFileReader {
int Read(AudioBus* audio_bus); int Read(AudioBus* audio_bus);
// These methods can be called once Open() has been called. // These methods can be called once Open() has been called.
int channels() const; int channels() const { return channels_; }
int sample_rate() const; int sample_rate() const { return sample_rate_; }
// Please note that duration() and number_of_frames() attempt to be accurate, // Please note that duration() and number_of_frames() attempt to be accurate,
// but are only estimates. For some encoded formats, the actual duration // but are only estimates. For some encoded formats, the actual duration
...@@ -58,6 +58,11 @@ class MEDIA_EXPORT AudioFileReader { ...@@ -58,6 +58,11 @@ class MEDIA_EXPORT AudioFileReader {
AVCodecContext* codec_context_; AVCodecContext* codec_context_;
int stream_index_; int stream_index_;
FFmpegURLProtocol* protocol_; FFmpegURLProtocol* protocol_;
int channels_;
int sample_rate_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
DISALLOW_COPY_AND_ASSIGN(AudioFileReader); DISALLOW_COPY_AND_ASSIGN(AudioFileReader);
}; };
......
...@@ -73,11 +73,19 @@ class AudioFileReaderTest : public testing::Test { ...@@ -73,11 +73,19 @@ class AudioFileReaderTest : public testing::Test {
ReadAndVerify(hash, trimmed_frames); ReadAndVerify(hash, trimmed_frames);
} }
void RunFailingTest(const char* fn) { void RunTestFailingDemux(const char* fn) {
Initialize(fn); Initialize(fn);
EXPECT_FALSE(reader_->Open()); EXPECT_FALSE(reader_->Open());
} }
void RunTestFailingDecode(const char* fn) {
Initialize(fn);
EXPECT_TRUE(reader_->Open());
scoped_ptr<AudioBus> decoded_audio_data = AudioBus::Create(
reader_->channels(), reader_->number_of_frames());
EXPECT_EQ(reader_->Read(decoded_audio_data.get()), 0);
}
protected: protected:
scoped_refptr<DecoderBuffer> data_; scoped_refptr<DecoderBuffer> data_;
scoped_ptr<InMemoryUrlProtocol> protocol_; scoped_ptr<InMemoryUrlProtocol> protocol_;
...@@ -91,7 +99,7 @@ TEST_F(AudioFileReaderTest, WithoutOpen) { ...@@ -91,7 +99,7 @@ TEST_F(AudioFileReaderTest, WithoutOpen) {
} }
TEST_F(AudioFileReaderTest, InvalidFile) { TEST_F(AudioFileReaderTest, InvalidFile) {
RunFailingTest("ten_byte_file"); RunTestFailingDemux("ten_byte_file");
} }
TEST_F(AudioFileReaderTest, WithVideo) { TEST_F(AudioFileReaderTest, WithVideo) {
...@@ -134,10 +142,14 @@ TEST_F(AudioFileReaderTest, AAC) { ...@@ -134,10 +142,14 @@ TEST_F(AudioFileReaderTest, AAC) {
RunTest("sfx.m4a", NULL, 1, 44100, RunTest("sfx.m4a", NULL, 1, 44100,
base::TimeDelta::FromMicroseconds(312001), 13759, 13312); base::TimeDelta::FromMicroseconds(312001), 13759, 13312);
} }
TEST_F(AudioFileReaderTest, MidStreamConfigChangesFail) {
RunTestFailingDecode("midstream_config_change.mp3");
}
#endif #endif
TEST_F(AudioFileReaderTest, VorbisInvalidChannelLayout) { TEST_F(AudioFileReaderTest, VorbisInvalidChannelLayout) {
RunFailingTest("9ch.ogg"); RunTestFailingDemux("9ch.ogg");
} }
TEST_F(AudioFileReaderTest, WaveValidFourChannelLayout) { TEST_F(AudioFileReaderTest, WaveValidFourChannelLayout) {
......
...@@ -218,7 +218,7 @@ class ChunkDemuxerStream : public DemuxerStream { ...@@ -218,7 +218,7 @@ class ChunkDemuxerStream : public DemuxerStream {
// Append() belong to a media segment that starts at |start_timestamp|. // Append() belong to a media segment that starts at |start_timestamp|.
void OnNewMediaSegment(TimeDelta start_timestamp); void OnNewMediaSegment(TimeDelta start_timestamp);
// Called when mid-stream config updates occur. // Called when midstream config updates occur.
// Returns true if the new config is accepted. // Returns true if the new config is accepted.
// Returns false if the new config should trigger an error. // Returns false if the new config should trigger an error.
bool UpdateAudioConfig(const AudioDecoderConfig& config); bool UpdateAudioConfig(const AudioDecoderConfig& config);
......
...@@ -43,7 +43,9 @@ FFmpegAudioDecoder::FFmpegAudioDecoder( ...@@ -43,7 +43,9 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
codec_context_(NULL), codec_context_(NULL),
bits_per_channel_(0), bits_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_NONE), channel_layout_(CHANNEL_LAYOUT_NONE),
channels_(0),
samples_per_second_(0), samples_per_second_(0),
av_sample_format_(0),
bytes_per_frame_(0), bytes_per_frame_(0),
last_input_timestamp_(kNoTimestamp()), last_input_timestamp_(kNoTimestamp()),
output_bytes_to_drop_(0), output_bytes_to_drop_(0),
...@@ -303,6 +305,11 @@ bool FFmpegAudioDecoder::ConfigureDecoder() { ...@@ -303,6 +305,11 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
output_timestamp_helper_.reset(new AudioTimestampHelper( output_timestamp_helper_.reset(new AudioTimestampHelper(
config.bytes_per_frame(), config.samples_per_second())); config.bytes_per_frame(), config.samples_per_second()));
bytes_per_frame_ = config.bytes_per_frame(); bytes_per_frame_ = config.bytes_per_frame();
// Store initial values to guard against midstream configuration changes.
channels_ = codec_context_->channels;
av_sample_format_ = codec_context_->sample_fmt;
return true; return true;
} }
...@@ -387,10 +394,16 @@ void FFmpegAudioDecoder::RunDecodeLoop( ...@@ -387,10 +394,16 @@ void FFmpegAudioDecoder::RunDecodeLoop(
int decoded_audio_size = 0; int decoded_audio_size = 0;
if (frame_decoded) { if (frame_decoded) {
int output_sample_rate = av_frame_->sample_rate; if (av_frame_->sample_rate != samples_per_second_ ||
if (output_sample_rate != samples_per_second_) { av_frame_->channels != channels_ ||
DLOG(ERROR) << "Output sample rate (" << output_sample_rate av_frame_->format != av_sample_format_) {
<< ") doesn't match expected rate " << samples_per_second_; DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame_->sample_rate << " vs "
<< samples_per_second_
<< ", Channels: " << av_frame_->channels << " vs "
<< channels_
<< ", Sample Format: " << av_frame_->format << " vs "
<< av_sample_format_;
// This is an unrecoverable error, so bail out. // This is an unrecoverable error, so bail out.
QueuedAudioBuffer queue_entry = { kDecodeError, NULL }; QueuedAudioBuffer queue_entry = { kDecodeError, NULL };
......
...@@ -65,8 +65,12 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder { ...@@ -65,8 +65,12 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
// Decoded audio format. // Decoded audio format.
int bits_per_channel_; int bits_per_channel_;
ChannelLayout channel_layout_; ChannelLayout channel_layout_;
int channels_;
int samples_per_second_; int samples_per_second_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
// Used for computing output timestamps. // Used for computing output timestamps.
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_; scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
int bytes_per_frame_; int bytes_per_frame_;
......
...@@ -660,6 +660,15 @@ TEST_F(PipelineIntegrationTest, ...@@ -660,6 +660,15 @@ TEST_F(PipelineIntegrationTest,
EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError()); EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
source.Abort(); source.Abort();
} }
// Verify files which change configuration midstream fail gracefully.
TEST_F(PipelineIntegrationTest, MidStreamConfigChangesFail) {
ASSERT_TRUE(Start(
GetTestDataFilePath("midstream_config_change.mp3"), PIPELINE_OK));
Play();
ASSERT_EQ(WaitUntilEndedOrError(), PIPELINE_ERROR_DECODE);
}
#endif #endif
TEST_F(PipelineIntegrationTest, BasicPlayback_16x9AspectRatio) { TEST_F(PipelineIntegrationTest, BasicPlayback_16x9AspectRatio) {
......
...@@ -87,6 +87,8 @@ FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host) ...@@ -87,6 +87,8 @@ FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host)
av_frame_(NULL), av_frame_(NULL),
bits_per_channel_(0), bits_per_channel_(0),
samples_per_second_(0), samples_per_second_(0),
channels_(0),
av_sample_format_(0),
bytes_per_frame_(0), bytes_per_frame_(0),
last_input_timestamp_(media::kNoTimestamp()), last_input_timestamp_(media::kNoTimestamp()),
output_bytes_to_drop_(0) { output_bytes_to_drop_(0) {
...@@ -154,6 +156,10 @@ bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { ...@@ -154,6 +156,10 @@ bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) {
serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_); serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_);
is_initialized_ = true; is_initialized_ = true;
// Store initial values to guard against midstream configuration changes.
channels_ = codec_context_->channels;
av_sample_format_ = codec_context_->sample_fmt;
return true; return true;
} }
...@@ -269,10 +275,16 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( ...@@ -269,10 +275,16 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
int decoded_audio_size = 0; int decoded_audio_size = 0;
if (frame_decoded) { if (frame_decoded) {
int output_sample_rate = av_frame_->sample_rate; if (av_frame_->sample_rate != samples_per_second_ ||
if (output_sample_rate != samples_per_second_) { av_frame_->channels != channels_ ||
DLOG(ERROR) << "Output sample rate (" << output_sample_rate av_frame_->format != av_sample_format_) {
<< ") doesn't match expected rate " << samples_per_second_; DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame_->sample_rate << " vs "
<< samples_per_second_
<< ", Channels: " << av_frame_->channels << " vs "
<< channels_
<< ", Sample Format: " << av_frame_->format << " vs "
<< av_sample_format_;
return cdm::kDecodeError; return cdm::kDecodeError;
} }
......
...@@ -69,6 +69,10 @@ class FFmpegCdmAudioDecoder { ...@@ -69,6 +69,10 @@ class FFmpegCdmAudioDecoder {
// Audio format. // Audio format.
int bits_per_channel_; int bits_per_channel_;
int samples_per_second_; int samples_per_second_;
int channels_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
// Used for computing output timestamps. // Used for computing output timestamps.
scoped_ptr<media::AudioTimestampHelper> output_timestamp_helper_; scoped_ptr<media::AudioTimestampHelper> output_timestamp_helper_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment