Commit cde6f1bc authored by wolenetz@chromium.org's avatar wolenetz@chromium.org

MSE: Populate WebM missing duration with DefaultDuration, derived, or default

This change modifes WebM stream parser to estimate missing WebM frame
durations using the following logic:
1) If the frame was a BlockGroup, it should already have a duration. Use
it. (This is previous behavior; no other WebM frames previously had
duration.)
2) If the frame was a SimpleBlock, derive its duration as follows:
2a) If the frame's TrackEntry had a DefaultDuration, use that value
capped at a precision no greater than TimeCodes with TimeCodeScale
applied.
2b) Otherwise, if there is a subsequent frame in the cluster, set the
duration to the difference in timestamps.
2c) Otherwise, use the maximum frame duration for the track encountered
so far, if any.
2d) Otherwise, use a hardcoded value.
Note, 2b-2c, for WebM audio, ideally would be calculated based on the
track's codebook. This is left for a future CL.

Adds related WebM track parser unit tests. Adjusts
existing ChunkDemuxerTests, PipelineIntegrationTests, and
WebMClusterParserTests based on new duration logic.

R=acolwell@chromium.org
TEST=All media_unittests and http/tests/media layout tests pass locally on Linux
BUG=351166

Review URL: https://codereview.chromium.org/213253006

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260247 0039d316-1c4b-4281-b951-d872f2087c98
parent cb71bf9e
......@@ -542,14 +542,14 @@ class ChunkDemuxerTest : public testing::Test {
// bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
// The resulting video stream returns data from each file for the following
// time ranges.
// bear-320x240.webm : [0-501) [801-2737)
// bear-320x240.webm : [0-501) [801-2736)
// bear-640x360.webm : [527-793)
//
// bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
// bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
// The resulting audio stream returns data from each file for the following
// time ranges.
// bear-320x240.webm : [0-524) [779-2737)
// bear-320x240.webm : [0-524) [779-2736)
// bear-640x360.webm : [527-759)
bool InitDemuxerWithConfigChangeData() {
scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
......@@ -565,7 +565,11 @@ class ChunkDemuxerTest : public testing::Test {
// Append the whole bear1 file.
AppendData(bear1->data(), bear1->data_size());
CheckExpectedRanges(kSourceId, "{ [0,2737) }");
// Last audio frame has timestamp 2721 and duration 24 (estimated from max
// seen so far for audio track).
// Last video frame has timestamp 2703 and duration 33 (from TrackEntry
// DefaultDuration for video track).
CheckExpectedRanges(kSourceId, "{ [0,2736) }");
// Append initialization segment for bear2.
// Note: Offsets here and below are derived from
......@@ -577,13 +581,13 @@ class ChunkDemuxerTest : public testing::Test {
// Append a media segment that goes from [0.527000, 1.014000).
AppendData(bear2->data() + 55290, 18785);
CheckExpectedRanges(kSourceId, "{ [0,1028) [1201,2737) }");
CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
// Append initialization segment for bear1 & fill gap with [779-1197)
// segment.
AppendData(bear1->data(), 4370);
AppendData(bear1->data() + 72737, 28183);
CheckExpectedRanges(kSourceId, "{ [0,2737) }");
CheckExpectedRanges(kSourceId, "{ [0,2736) }");
MarkEndOfStream(PIPELINE_OK);
return true;
......
......@@ -59,8 +59,8 @@ const int kAppendWholeFile = -1;
// Constants for the Media Source config change tests.
const int kAppendTimeSec = 1;
const int kAppendTimeMs = kAppendTimeSec * 1000;
const int k320WebMFileDurationMs = 2737;
const int k640WebMFileDurationMs = 2763;
const int k320WebMFileDurationMs = 2736;
const int k640WebMFileDurationMs = 2762;
const int kOpusEndTrimmingWebMFileDurationMs = 2771;
const int kVP9WebMFileDurationMs = 2703;
const int kVP8AWebMFileDurationMs = 2700;
......@@ -655,7 +655,10 @@ TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_Encrypted_WebM) {
EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
EXPECT_EQ(kAppendTimeMs + k640WebMFileDurationMs,
// The "+ 1" is due to estimated audio and video frame durations on the last
// frames appended. The unencrypted file has a TrackEntry DefaultDuration
// field for the video track, but the encrypted file does not.
EXPECT_EQ(kAppendTimeMs + k640WebMFileDurationMs + 1,
pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
Play();
......@@ -715,7 +718,10 @@ TEST_F(PipelineIntegrationTest,
EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
// The second video was not added, so its time has not been added.
EXPECT_EQ(k320WebMFileDurationMs,
// The "+ 1" is due to estimated audio and video frame durations on the last
// frames appended. The unencrypted file has a TrackEntry DefaultDuration
// field for the video track, but the encrypted file does not.
EXPECT_EQ(k320WebMFileDurationMs + 1,
pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
Play();
......@@ -1126,7 +1132,7 @@ TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_AudioOnly) {
TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_VideoOnly) {
ASSERT_TRUE(TestSeekDuringRead("bear-320x240-video-only.webm", kVideoOnlyWebM,
32768,
base::TimeDelta::FromMilliseconds(200),
base::TimeDelta::FromMilliseconds(167),
base::TimeDelta::FromMilliseconds(1668),
0x1C896, 65536));
}
......
......@@ -15,10 +15,21 @@
#include "media/formats/webm/webm_crypto_helpers.h"
#include "media/formats/webm/webm_webvtt_parser.h"
// Arbitrarily-chosen numbers to estimate the duration of a buffer if none is
// set and there is not enough information to get a better estimate.
// TODO(wolenetz/acolwell): Parse audio codebook to determine missing audio
// frame durations. See http://crbug.com/351166.
static int kDefaultAudioBufferDurationInMs = 23; // Common 1k samples @44.1kHz
static int kDefaultVideoBufferDurationInMs = 42; // Low 24fps to reduce stalls
namespace media {
WebMClusterParser::WebMClusterParser(
int64 timecode_scale, int audio_track_num, int video_track_num,
int64 timecode_scale,
int audio_track_num,
base::TimeDelta audio_default_duration,
int video_track_num,
base::TimeDelta video_default_duration,
const WebMTracksParser::TextTracks& text_tracks,
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
......@@ -38,13 +49,14 @@ WebMClusterParser::WebMClusterParser(
cluster_timecode_(-1),
cluster_start_time_(kNoTimestamp()),
cluster_ended_(false),
audio_(audio_track_num, false),
video_(video_track_num, true),
audio_(audio_track_num, false, audio_default_duration),
video_(video_track_num, true, video_default_duration),
log_cb_(log_cb) {
for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
it != text_tracks.end();
++it) {
text_track_map_.insert(std::make_pair(it->first, Track(it->first, false)));
text_track_map_.insert(std::make_pair(
it->first, Track(it->first, false, kNoTimestamp())));
}
}
......@@ -62,8 +74,8 @@ void WebMClusterParser::Reset() {
}
int WebMClusterParser::Parse(const uint8* buf, int size) {
audio_.Reset();
video_.Reset();
audio_.ClearBuffersButKeepLastIfMissingDuration();
video_.ClearBuffersButKeepLastIfMissingDuration();
ResetTextTracks();
int result = parser_.Parse(buf, size);
......@@ -99,6 +111,18 @@ int WebMClusterParser::Parse(const uint8* buf, int size) {
return result;
}
const WebMClusterParser::BufferQueue& WebMClusterParser::GetAudioBuffers() {
if (cluster_ended_)
audio_.ApplyDurationDefaultOrEstimateIfNeeded();
return audio_.buffers();
}
const WebMClusterParser::BufferQueue& WebMClusterParser::GetVideoBuffers() {
if (cluster_ended_)
video_.ApplyDurationDefaultOrEstimateIfNeeded();
return video_.buffers();
}
const WebMClusterParser::TextBufferQueueMap&
WebMClusterParser::GetTextBuffers() {
// Translate our |text_track_map_| into |text_buffers_map_|, inserting rows in
......@@ -107,6 +131,9 @@ WebMClusterParser::GetTextBuffers() {
for (TextTrackMap::const_iterator itr = text_track_map_.begin();
itr != text_track_map_.end();
++itr) {
// Per OnBlock(), all text buffers should already have valid durations, so
// there is no need to call
// itr->second.ApplyDurationDefaultOrEstimateIfNeeded() here.
const BufferQueue& text_buffers = itr->second.buffers();
if (!text_buffers.empty())
text_buffers_map_.insert(std::make_pair(itr->first, text_buffers));
......@@ -390,9 +417,14 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
return track->AddBuffer(buffer);
}
WebMClusterParser::Track::Track(int track_num, bool is_video)
WebMClusterParser::Track::Track(int track_num, bool is_video,
base::TimeDelta default_duration)
: track_num_(track_num),
is_video_(is_video) {
is_video_(is_video),
default_duration_(default_duration),
estimated_next_frame_duration_(kNoTimestamp()) {
DCHECK(default_duration_ == kNoTimestamp() ||
default_duration_ > base::TimeDelta());
}
WebMClusterParser::Track::~Track() {}
......@@ -405,14 +437,66 @@ bool WebMClusterParser::Track::AddBuffer(
<< " kf " << buffer->IsKeyframe()
<< " size " << buffer->data_size();
buffers_.push_back(buffer);
return true;
if (last_added_buffer_missing_duration_) {
base::TimeDelta derived_duration =
buffer->timestamp() - last_added_buffer_missing_duration_->timestamp();
last_added_buffer_missing_duration_->set_duration(derived_duration);
DVLOG(2) << "AddBuffer() : applied derived duration to held-back buffer : "
<< " ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->IsKeyframe()
<< " size " << last_added_buffer_missing_duration_->data_size();
scoped_refptr<StreamParserBuffer> updated_buffer =
last_added_buffer_missing_duration_;
last_added_buffer_missing_duration_ = NULL;
if (!QueueBuffer(updated_buffer))
return false;
}
if (buffer->duration() == kNoTimestamp()) {
last_added_buffer_missing_duration_ = buffer;
DVLOG(2) << "AddBuffer() : holding back buffer that is missing duration";
return true;
}
return QueueBuffer(buffer);
}
void WebMClusterParser::Track::Reset() {
void WebMClusterParser::Track::ApplyDurationDefaultOrEstimateIfNeeded() {
if (!last_added_buffer_missing_duration_)
return;
last_added_buffer_missing_duration_->set_duration(
GetDurationDefaultOrEstimate());
DVLOG(2) << "ApplyDurationDefaultOrEstimateIfNeeded() : new dur : "
<< " ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->IsKeyframe()
<< " size " << last_added_buffer_missing_duration_->data_size();
// Don't use the applied duration as a future estimation (don't use
// QueueBuffer() here.)
buffers_.push_back(last_added_buffer_missing_duration_);
last_added_buffer_missing_duration_ = NULL;
}
void WebMClusterParser::Track::ClearBuffersButKeepLastIfMissingDuration() {
// Note that |estimated_next_frame_duration_| is not reset, so it can be
// reused on subsequent buffers added to this instance.
buffers_.clear();
}
void WebMClusterParser::Track::Reset() {
ClearBuffersButKeepLastIfMissingDuration();
last_added_buffer_missing_duration_ = NULL;
}
bool WebMClusterParser::Track::IsKeyframe(const uint8* data, int size) const {
// For now, assume that all blocks are keyframes for datatypes other than
// video. This is a valid assumption for Vorbis, WebVTT, & Opus.
......@@ -436,6 +520,45 @@ bool WebMClusterParser::Track::IsKeyframe(const uint8* data, int size) const {
return true;
}
bool WebMClusterParser::Track::QueueBuffer(
const scoped_refptr<StreamParserBuffer>& buffer) {
DCHECK(!last_added_buffer_missing_duration_);
base::TimeDelta duration = buffer->duration();
if (duration < base::TimeDelta() || duration == kNoTimestamp()) {
DVLOG(2) << "QueueBuffer() : Invalid buffer duration: "
<< duration.InSecondsF();
return false;
}
estimated_next_frame_duration_ = std::max(duration,
estimated_next_frame_duration_);
buffers_.push_back(buffer);
return true;
}
base::TimeDelta WebMClusterParser::Track::GetDurationDefaultOrEstimate() {
base::TimeDelta duration = default_duration_;
if (duration != kNoTimestamp()) {
DVLOG(3) << __FUNCTION__ << " : using TrackEntry DefaultDuration";
} else if (estimated_next_frame_duration_ != kNoTimestamp()) {
DVLOG(3) << __FUNCTION__ << " : using estimated duration";
duration = estimated_next_frame_duration_;
} else {
DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration";
if (is_video_) {
duration = base::TimeDelta::FromMilliseconds(
kDefaultVideoBufferDurationInMs);
} else {
duration = base::TimeDelta::FromMilliseconds(
kDefaultAudioBufferDurationInMs);
}
}
DCHECK(duration > base::TimeDelta());
DCHECK(duration != kNoTimestamp());
return duration;
}
void WebMClusterParser::ResetTextTracks() {
text_buffers_map_.clear();
for (TextTrackMap::iterator it = text_track_map_.begin();
......
......@@ -28,7 +28,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// Helper class that manages per-track state.
class Track {
public:
Track(int track_num, bool is_video);
Track(int track_num, bool is_video, base::TimeDelta default_duration);
~Track();
int track_num() const { return track_num_; }
......@@ -36,9 +36,27 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
return buffers_;
}
// If |last_added_buffer_missing_duration_| is set, updates its duration
// relative to |buffer|'s timestamp, and adds it to |buffers_| and unsets
// |last_added_buffer_missing_duration_|. Then, if |buffer| is missing
// duration, saves |buffer| into |last_added_buffer_missing_duration_|, or
// otherwise adds |buffer| to |buffers_|.
bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// Clears all buffer state.
// If |last_added_buffer_missing_duration_| is set, updates its duration
// to be the first non-kNoTimestamp() value of |default_duration_|,
// |estimated_next_frame_duration_|, or an arbitrary default, then adds it
// to |buffers_| and unsets |last_added_buffer_missing_duration_|. (This
// method helps stream parser emit all buffers in a media segment before
// signaling end of segment.)
void ApplyDurationDefaultOrEstimateIfNeeded();
// Clears all buffer state, except a possibly held-aside buffer that is
// missing duration.
void ClearBuffersButKeepLastIfMissingDuration();
// Clears all buffer state, including any possibly held-aside buffer that
// was missing duration.
void Reset();
// Helper function used to inspect block data to determine if the
......@@ -48,9 +66,27 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
bool IsKeyframe(const uint8* data, int size) const;
private:
// Helper that sanity-checks |buffer| duration, updates
// |estimated_next_frame_duration_|, and adds |buffer| to |buffers_|.
// Returns false if |buffer| failed sanity check and therefore was not added
// to |buffers_|. Returns true otherwise.
bool QueueBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// Helper that calculates the buffer duration to use in
// ApplyDurationDefaultOrEstimateIfNeeded().
base::TimeDelta GetDurationDefaultOrEstimate();
int track_num_;
std::deque<scoped_refptr<StreamParserBuffer> > buffers_;
bool is_video_;
scoped_refptr<StreamParserBuffer> last_added_buffer_missing_duration_;
// If kNoTimestamp(), then |estimated_next_frame_duration_| will be used.
base::TimeDelta default_duration_;
// If kNoTimestamp(), then a default value will be used. This estimate is
// the maximum duration seen or derived so far for this track, and is valid
// only if |default_duration_| is kNoTimestamp().
base::TimeDelta estimated_next_frame_duration_;
};
typedef std::map<int, Track> TextTrackMap;
......@@ -61,7 +97,9 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
WebMClusterParser(int64 timecode_scale,
int audio_track_num,
base::TimeDelta audio_default_duration,
int video_track_num,
base::TimeDelta video_default_duration,
const WebMTracksParser::TextTracks& text_tracks,
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
......@@ -80,8 +118,13 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
int Parse(const uint8* buf, int size);
base::TimeDelta cluster_start_time() const { return cluster_start_time_; }
const BufferQueue& audio_buffers() const { return audio_.buffers(); }
const BufferQueue& video_buffers() const { return video_.buffers(); }
// Get the buffers resulting from Parse().
// If the parse reached the end of cluster and the last buffer was held aside
// due to missing duration, the buffer is given an estimated duration and
// included in the result.
const BufferQueue& GetAudioBuffers();
const BufferQueue& GetVideoBuffers();
// Constructs and returns a subset of |text_track_map_| containing only
// tracks with non-empty buffer queues produced by the last Parse().
......
......@@ -36,7 +36,7 @@ struct BlockInfo {
static const BlockInfo kDefaultBlockInfo[] = {
{ kAudioTrackNum, 0, 23, true },
{ kAudioTrackNum, 23, 23, true },
{ kVideoTrackNum, 33, 34, true },
{ kVideoTrackNum, 33, 34, true }, // Assumes not using DefaultDuration
{ kAudioTrackNum, 46, 23, true },
{ kVideoTrackNum, 67, 33, false },
{ kAudioTrackNum, 69, 23, false },
......@@ -121,13 +121,7 @@ static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
scoped_refptr<StreamParserBuffer> buffer = (*buffers)[(*offset)++];
EXPECT_EQ(block_info[i].timestamp, buffer->timestamp().InMilliseconds());
if (!block_info[i].use_simple_block)
EXPECT_NE(kNoTimestamp(), buffer->duration());
if (buffer->duration() != kNoTimestamp())
EXPECT_EQ(block_info[i].duration, buffer->duration().InMilliseconds());
EXPECT_EQ(block_info[i].duration, buffer->duration().InMilliseconds());
EXPECT_EQ(expected_type, buffer->type());
EXPECT_EQ(block_info[i].track_num, buffer->track_id());
}
......@@ -147,8 +141,8 @@ static bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
else
text_buffers = &no_text_buffers;
return VerifyBuffers(parser->audio_buffers(),
parser->video_buffers(),
return VerifyBuffers(parser->GetAudioBuffers(),
parser->GetVideoBuffers(),
*text_buffers,
block_info,
block_count);
......@@ -206,7 +200,9 @@ class WebMClusterParserTest : public testing::Test {
WebMClusterParserTest()
: parser_(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
WebMTracksParser::TextTracks(),
std::set<int64>(),
std::string(),
......@@ -272,8 +268,8 @@ TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
continue;
}
AppendToEnd(parser_->audio_buffers(), &audio_buffers);
AppendToEnd(parser_->video_buffers(), &video_buffers);
AppendToEnd(parser_->GetAudioBuffers(), &audio_buffers);
AppendToEnd(parser_->GetVideoBuffers(), &video_buffers);
parse_size = default_parse_size;
......@@ -338,7 +334,9 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
WebMTracksParser::TextTracks(),
ignored_tracks,
std::string(),
......@@ -348,19 +346,19 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
const BlockInfo kInputBlockInfo[] = {
{ kAudioTrackNum, 0, 23, true },
{ kAudioTrackNum, 23, 23, true },
{ kVideoTrackNum, 33, 33, true },
{ kVideoTrackNum, 33, 34, true },
{ kTextTrackNum, 33, 99, true },
{ kAudioTrackNum, 46, 23, true },
{ kVideoTrackNum, 67, 33, true },
{ kVideoTrackNum, 67, 34, true },
};
int input_block_count = arraysize(kInputBlockInfo);
const BlockInfo kOutputBlockInfo[] = {
{ kAudioTrackNum, 0, 23, true },
{ kAudioTrackNum, 23, 23, true },
{ kVideoTrackNum, 33, 33, true },
{ kVideoTrackNum, 33, 34, true },
{ kAudioTrackNum, 46, 23, true },
{ kVideoTrackNum, 67, 33, true },
{ kVideoTrackNum, 67, 34, true },
};
int output_block_count = arraysize(kOutputBlockInfo);
......@@ -382,7 +380,9 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
text_tracks,
std::set<int64>(),
std::string(),
......@@ -392,11 +392,11 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
const BlockInfo kInputBlockInfo[] = {
{ kAudioTrackNum, 0, 23, true },
{ kAudioTrackNum, 23, 23, true },
{ kVideoTrackNum, 33, 33, true },
{ kVideoTrackNum, 33, 34, true },
{ kTextTrackNum, 33, 42, false },
{ kAudioTrackNum, 46, 23, true },
{ kTextTrackNum, 55, 44, false },
{ kVideoTrackNum, 67, 33, true },
{ kVideoTrackNum, 67, 34, true },
};
int input_block_count = arraysize(kInputBlockInfo);
......@@ -418,7 +418,9 @@ TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
text_tracks,
std::set<int64>(),
std::string(),
......@@ -454,7 +456,9 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
text_tracks,
std::set<int64>(),
std::string(),
......@@ -464,11 +468,11 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
const BlockInfo kInputBlockInfo[] = {
{ kAudioTrackNum, 0, 23, true },
{ kAudioTrackNum, 23, 23, true },
{ kVideoTrackNum, 33, 33, true },
{ kVideoTrackNum, 33, 34, true },
{ kSubtitleTextTrackNum, 33, 42, false },
{ kAudioTrackNum, 46, 23, true },
{ kCaptionTextTrackNum, 55, 44, false },
{ kVideoTrackNum, 67, 33, true },
{ kVideoTrackNum, 67, 34, true },
{ kSubtitleTextTrackNum, 67, 33, false },
};
int input_block_count = arraysize(kInputBlockInfo);
......@@ -498,7 +502,9 @@ TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
WebMTracksParser::TextTracks(),
std::set<int64>(),
std::string(),
......@@ -506,8 +512,8 @@ TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
LogCB()));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_EQ(1UL, parser_->video_buffers().size());
scoped_refptr<StreamParserBuffer> buffer = parser_->video_buffers()[0];
ASSERT_EQ(1UL, parser_->GetVideoBuffers().size());
scoped_refptr<StreamParserBuffer> buffer = parser_->GetVideoBuffers()[0];
VerifyEncryptedBuffer(buffer);
}
......@@ -517,7 +523,9 @@ TEST_F(WebMClusterParserTest, ParseBadEncryptedBlock) {
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
kNoTimestamp(),
kVideoTrackNum,
kNoTimestamp(),
WebMTracksParser::TextTracks(),
std::set<int64>(),
std::string(),
......
......@@ -180,11 +180,11 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
bytes_parsed += result;
double timecode_scale_in_us = info_parser.timecode_scale() / 1000.0;
base::TimeDelta duration = kInfiniteDuration();
if (info_parser.duration() > 0) {
double mult = info_parser.timecode_scale() / 1000.0;
int64 duration_in_us = info_parser.duration() * mult;
int64 duration_in_us = info_parser.duration() * timecode_scale_in_us;
duration = base::TimeDelta::FromMicroseconds(duration_in_us);
}
......@@ -203,10 +203,13 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
return -1;
}
cluster_parser_.reset(new WebMClusterParser(
info_parser.timecode_scale(),
tracks_parser.audio_track_num(),
tracks_parser.GetAudioDefaultDuration(timecode_scale_in_us),
tracks_parser.video_track_num(),
tracks_parser.GetVideoDefaultDuration(timecode_scale_in_us),
tracks_parser.text_tracks(),
tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
......@@ -263,9 +266,10 @@ int WebMStreamParser::ParseCluster(const uint8* data, int size) {
new_segment_cb_.Run();
}
const BufferQueue& audio_buffers = cluster_parser_->audio_buffers();
const BufferQueue& video_buffers = cluster_parser_->video_buffers();
const BufferQueue& audio_buffers = cluster_parser_->GetAudioBuffers();
const BufferQueue& video_buffers = cluster_parser_->GetVideoBuffers();
const TextBufferQueueMap& text_map = cluster_parser_->GetTextBuffers();
bool cluster_ended = cluster_parser_->cluster_ended();
if ((!audio_buffers.empty() || !video_buffers.empty() || !text_map.empty()) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment