Consider text tracks in the frame processor for new media segments.

Text tracks were not considered along with audio and video tracks
when determining the media segment start time.  This results in
Append()s coming in later with timestamps before the segment start
time.

The issue issue is resolved and a DCHECK() added to SBS::Append() to
prevent this from happening in the future.

BUG=356805
TEST=media_unittests passes after DCHECK() added to Append().
NOTRY=true

Review URL: https://codereview.chromium.org/222783007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@266636 0039d316-1c4b-4281-b951-d872f2087c98
parent 08f10d90
...@@ -56,6 +56,7 @@ bool LegacyFrameProcessor::ProcessFrames( ...@@ -56,6 +56,7 @@ bool LegacyFrameProcessor::ProcessFrames(
StreamParser::BufferQueue filtered_audio; StreamParser::BufferQueue filtered_audio;
StreamParser::BufferQueue filtered_video; StreamParser::BufferQueue filtered_video;
StreamParser::TextBufferQueueMap filtered_text;
if (audio_track) { if (audio_track) {
AdjustBufferTimestamps(audio_buffers, offset); AdjustBufferTimestamps(audio_buffers, offset);
...@@ -71,25 +72,49 @@ bool LegacyFrameProcessor::ProcessFrames( ...@@ -71,25 +72,49 @@ bool LegacyFrameProcessor::ProcessFrames(
new_media_segment, &filtered_video); new_media_segment, &filtered_video);
} }
if ((!filtered_audio.empty() || !filtered_video.empty()) && // The earliest timestamp in the filtered buffers will be used for the segment
*new_media_segment) { // start timestamp.
// Find the earliest timestamp in the filtered buffers and use that for the base::TimeDelta segment_timestamp = kInfiniteDuration();
// segment start timestamp.
base::TimeDelta segment_timestamp = kNoTimestamp(); // Process any buffers for each of the text tracks in the map.
for (StreamParser::TextBufferQueueMap::const_iterator itr = text_map.begin();
itr != text_map.end();
++itr) {
const StreamParser::BufferQueue& text_buffers = itr->second;
if (text_buffers.empty())
continue;
if (!filtered_audio.empty()) if (!FilterTextBuffers(itr->first,
segment_timestamp = filtered_audio.front()->GetDecodeTimestamp(); append_window_start,
append_window_end,
offset,
text_buffers,
new_media_segment,
&segment_timestamp,
&filtered_text)) {
return false;
}
}
if ((!filtered_audio.empty() || !filtered_video.empty() ||
!filtered_text.empty()) &&
*new_media_segment) {
if (!filtered_audio.empty()) {
segment_timestamp = std::min(filtered_audio.front()->GetDecodeTimestamp(),
segment_timestamp);
}
if (!filtered_video.empty() && if (!filtered_video.empty()) {
(segment_timestamp == kNoTimestamp() || segment_timestamp = std::min(filtered_video.front()->GetDecodeTimestamp(),
filtered_video.front()->GetDecodeTimestamp() < segment_timestamp)) { segment_timestamp);
segment_timestamp = filtered_video.front()->GetDecodeTimestamp();
} }
*new_media_segment = false; *new_media_segment = false;
DCHECK(segment_timestamp != kInfiniteDuration());
for (TrackBufferMap::iterator itr = track_buffers_.begin(); for (TrackBufferMap::iterator itr = track_buffers_.begin();
itr != track_buffers_.end(); ++itr) { itr != track_buffers_.end();
++itr) {
itr->second->stream()->OnNewMediaSegment(segment_timestamp); itr->second->stream()->OnNewMediaSegment(segment_timestamp);
} }
} }
...@@ -104,26 +129,17 @@ bool LegacyFrameProcessor::ProcessFrames( ...@@ -104,26 +129,17 @@ bool LegacyFrameProcessor::ProcessFrames(
return false; return false;
} }
if (text_map.empty()) if (!filtered_text.empty()) {
return true; for (StreamParser::TextBufferQueueMap::const_iterator itr =
filtered_text.begin();
// Process any buffers for each of the text tracks in the map. itr != filtered_text.end();
bool all_text_buffers_empty = true; ++itr) {
for (StreamParser::TextBufferQueueMap::const_iterator itr = text_map.begin(); MseTrackBuffer* track = FindTrack(itr->first);
itr != text_map.end(); if (!track || !AppendAndUpdateDuration(track->stream(), itr->second))
++itr) { return false;
const StreamParser::BufferQueue text_buffers = itr->second;
if (text_buffers.empty())
continue;
all_text_buffers_empty = false;
if (!OnTextBuffers(itr->first, append_window_start, append_window_end,
offset, text_buffers, new_media_segment)) {
return false;
} }
} }
DCHECK(!all_text_buffers_empty);
return true; return true;
} }
...@@ -221,13 +237,15 @@ bool LegacyFrameProcessor::AppendAndUpdateDuration( ...@@ -221,13 +237,15 @@ bool LegacyFrameProcessor::AppendAndUpdateDuration(
return true; return true;
} }
bool LegacyFrameProcessor::OnTextBuffers( bool LegacyFrameProcessor::FilterTextBuffers(
StreamParser::TrackId text_track_id, StreamParser::TrackId text_track_id,
base::TimeDelta append_window_start, base::TimeDelta append_window_start,
base::TimeDelta append_window_end, base::TimeDelta append_window_end,
base::TimeDelta timestamp_offset, base::TimeDelta timestamp_offset,
const StreamParser::BufferQueue& buffers, const StreamParser::BufferQueue& buffers,
bool* new_media_segment) { bool* new_media_segment,
base::TimeDelta* lowest_segment_timestamp,
StreamParser::TextBufferQueueMap* filtered_text) {
DCHECK(!buffers.empty()); DCHECK(!buffers.empty());
DCHECK(text_track_id != kAudioTrackId && text_track_id != kVideoTrackId); DCHECK(text_track_id != kAudioTrackId && text_track_id != kVideoTrackId);
DCHECK(new_media_segment); DCHECK(new_media_segment);
...@@ -239,14 +257,22 @@ bool LegacyFrameProcessor::OnTextBuffers( ...@@ -239,14 +257,22 @@ bool LegacyFrameProcessor::OnTextBuffers(
AdjustBufferTimestamps(buffers, timestamp_offset); AdjustBufferTimestamps(buffers, timestamp_offset);
StreamParser::BufferQueue filtered_buffers; StreamParser::BufferQueue filtered_buffers;
track->set_needs_random_access_point(false); FilterWithAppendWindow(append_window_start,
FilterWithAppendWindow(append_window_start, append_window_end, append_window_end,
buffers, track, new_media_segment, &filtered_buffers); buffers,
track,
if (filtered_buffers.empty()) new_media_segment,
return true; &filtered_buffers);
if (!filtered_buffers.empty()) {
*lowest_segment_timestamp =
std::min(*lowest_segment_timestamp,
filtered_buffers.front()->GetDecodeTimestamp());
DCHECK(filtered_text->find(text_track_id) == filtered_text->end());
filtered_text->insert(std::make_pair(text_track_id, filtered_buffers));
}
return AppendAndUpdateDuration(track->stream(), filtered_buffers); return true;
} }
} // namespace media } // namespace media
...@@ -63,18 +63,21 @@ class MEDIA_EXPORT LegacyFrameProcessor : public FrameProcessorBase { ...@@ -63,18 +63,21 @@ class MEDIA_EXPORT LegacyFrameProcessor : public FrameProcessorBase {
// Helper function for Legacy ProcessFrames() when new text buffers have been // Helper function for Legacy ProcessFrames() when new text buffers have been
// parsed. // parsed.
// Applies |timestamp_offset| to all buffers in |buffers|, filters |buffers| // Applies |timestamp_offset| to all buffers in |buffers|, filters |buffers|
// with append window, and then appends the modified and filtered buffers to // with append window, and stores those filtered buffers into |filtered_text|
// the stream associated with the track having |text_track_id|. If any of // based on |text_track_id|. If any of |buffers| are filtered out by append
// |buffers| are filtered out by append window, then |*new_media_segment| is // window, then |*new_media_segment| is set true.
// set true. // Updates |lowest_segment_timestamp| to be the earliest decode timestamp of
// all buffers in |filtered_text|.
// Returns true on a successful call. Returns false if an error occurred while // Returns true on a successful call. Returns false if an error occurred while
// processing the buffers. // processing the buffers.
bool OnTextBuffers(StreamParser::TrackId text_track_id, bool FilterTextBuffers(StreamParser::TrackId text_track_id,
base::TimeDelta append_window_start, base::TimeDelta append_window_start,
base::TimeDelta append_window_end, base::TimeDelta append_window_end,
base::TimeDelta timestamp_offset, base::TimeDelta timestamp_offset,
const StreamParser::BufferQueue& buffers, const StreamParser::BufferQueue& buffers,
bool* new_media_segment); bool* new_media_segment,
base::TimeDelta* lowest_segment_timestamp,
StreamParser::TextBufferQueueMap* filtered_text);
IncreaseDurationCB increase_duration_cb_; IncreaseDurationCB increase_duration_cb_;
......
...@@ -447,6 +447,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) { ...@@ -447,6 +447,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
DCHECK(!buffers.empty()); DCHECK(!buffers.empty());
DCHECK(media_segment_start_time_ != kNoTimestamp()); DCHECK(media_segment_start_time_ != kNoTimestamp());
DCHECK(media_segment_start_time_ <= buffers.front()->GetDecodeTimestamp());
DCHECK(!end_of_stream_); DCHECK(!end_of_stream_);
// New media segments must begin with a keyframe. // New media segments must begin with a keyframe.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment