Commit efae0344 authored by servolk's avatar servolk Committed by Commit bot

Implement InitSegmentReceived algorithm in blink

This CL begins moving MSE init segment received algorithm implementation
to blink level, so that it could be shared across different
implementations (e.g. between Chrome and Opera). The old init segment
algorithm is in MediaSourceState::OnNewConfigs, and for now it needs to
be kept, since it does other important things that must be done on the
Chromium media pipeline level atm (e.g. creating track buffers and
demuxer streams).

BUG=620881

Review-Url: https://codereview.chromium.org/1678523003
Cr-Commit-Position: refs/heads/master@{#403286}
parent 7be6ed95
...@@ -185,6 +185,8 @@ void WebSourceBufferImpl::InitSegmentReceived( ...@@ -185,6 +185,8 @@ void WebSourceBufferImpl::InitSegmentReceived(
blink::WebSourceBufferClient::MediaTrackInfo trackInfo; blink::WebSourceBufferClient::MediaTrackInfo trackInfo;
trackInfo.trackType = mediaTrackTypeToBlink(track->type()); trackInfo.trackType = mediaTrackTypeToBlink(track->type());
trackInfo.id = blink::WebString::fromUTF8(track->id()); trackInfo.id = blink::WebString::fromUTF8(track->id());
trackInfo.byteStreamTrackID = blink::WebString::fromUTF8(
base::UintToString(track->bytestream_track_id()));
trackInfo.kind = blink::WebString::fromUTF8(track->kind()); trackInfo.kind = blink::WebString::fromUTF8(track->kind());
trackInfo.label = blink::WebString::fromUTF8(track->label()); trackInfo.label = blink::WebString::fromUTF8(track->label());
trackInfo.language = blink::WebString::fromUTF8(track->language()); trackInfo.language = blink::WebString::fromUTF8(track->language());
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
assert_equals(sourceBuffer.videoTracks.length, 1, "videoTracks.length"); assert_equals(sourceBuffer.videoTracks.length, 1, "videoTracks.length");
assert_equals(sourceBuffer.videoTracks[0].kind, "main", "videoTrack.kind"); assert_equals(sourceBuffer.videoTracks[0].kind, "main", "videoTrack.kind");
assert_equals(sourceBuffer.videoTracks[0].label, "", "videoTrack.label"); assert_equals(sourceBuffer.videoTracks[0].label, "", "videoTrack.label");
assert_equals(sourceBuffer.videoTracks[0].language, "eng", "videoTrack.language"); assert_equals(sourceBuffer.videoTracks[0].language, "", "videoTrack.language");
assert_equals(sourceBuffer.videoTracks[0].sourceBuffer, sourceBuffer, "videoTrack.sourceBuffer"); assert_equals(sourceBuffer.videoTracks[0].sourceBuffer, sourceBuffer, "videoTrack.sourceBuffer");
// The first video track is selected by default. // The first video track is selected by default.
assert_true(sourceBuffer.videoTracks[0].selected, "sourceBuffer.videoTracks[0].selected"); assert_true(sourceBuffer.videoTracks[0].selected, "sourceBuffer.videoTracks[0].selected");
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
assert_equals(sourceBuffer.audioTracks.length, 1, "audioTracks.length"); assert_equals(sourceBuffer.audioTracks.length, 1, "audioTracks.length");
assert_equals(sourceBuffer.audioTracks[0].kind, "main", "audioTrack.kind"); assert_equals(sourceBuffer.audioTracks[0].kind, "main", "audioTrack.kind");
assert_equals(sourceBuffer.audioTracks[0].label, "", "audioTrack.label"); assert_equals(sourceBuffer.audioTracks[0].label, "", "audioTrack.label");
assert_equals(sourceBuffer.audioTracks[0].language, "eng", "audioTrack.language"); assert_equals(sourceBuffer.audioTracks[0].language, "", "audioTrack.language");
assert_equals(sourceBuffer.audioTracks[0].sourceBuffer, sourceBuffer, "audioTrack.sourceBuffer"); assert_equals(sourceBuffer.audioTracks[0].sourceBuffer, sourceBuffer, "audioTrack.sourceBuffer");
// The first audio track is enabled by default. // The first audio track is enabled by default.
assert_true(sourceBuffer.audioTracks[0].enabled, "sourceBuffer.audioTracks[0].enabled"); assert_true(sourceBuffer.audioTracks[0].enabled, "sourceBuffer.audioTracks[0].enabled");
......
<!DOCTYPE html>
<html>
<head>
<script src="/w3c/resources/testharness.js"></script>
<script src="/w3c/resources/testharnessreport.js"></script>
<script src="mediasource-util.js"></script>
</head>
<body>
<script>
function verifyTrackProperties(trackType, track, expectedTrackInfo) {
assert_equals(track.kind, expectedTrackInfo.kind, trackType + ".kind");
assert_equals(track.label, expectedTrackInfo.label, trackType + ".label");
assert_equals(track.language, expectedTrackInfo.language, trackType + ".language");
}
function loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, successCallback)
{
var initSegment = MediaSourceUtil.extractSegmentData(mediaData, segmentInfo.init);
test.expectEvent(sourceBuffer.audioTracks, "addtrack", "sourceBuffer.audioTracks addtrack event");
test.expectEvent(sourceBuffer.videoTracks, "addtrack", "sourceBuffer.videoTracks addtrack event");
test.expectEvent(mediaElement.audioTracks, "addtrack", "mediaElement.audioTracks addtrack event");
test.expectEvent(mediaElement.videoTracks, "addtrack", "mediaElement.videoTracks addtrack event");
test.expectEvent(mediaElement, "loadedmetadata", "loadedmetadata done.");
test.expectEvent(sourceBuffer, "updateend", "initSegment append ended.");
sourceBuffer.appendBuffer(initSegment);
test.waitForExpectedEvents(function()
{
assert_equals(sourceBuffer.audioTracks.length, 1, "audioTracks.length");
verifyTrackProperties("audio", sourceBuffer.audioTracks[0], expectedAudioTrackInfo);
assert_equals(sourceBuffer.audioTracks[0].sourceBuffer, sourceBuffer, "audioTrack.sourceBuffer");
// The first audio track is enabled by default.
assert_true(sourceBuffer.audioTracks[0].enabled, "sourceBuffer.audioTracks[0].enabled");
assert_equals(sourceBuffer.videoTracks.length, 1, "videoTracks.length");
verifyTrackProperties("video", sourceBuffer.videoTracks[0], expectedVideoTrackInfo);
assert_equals(sourceBuffer.videoTracks[0].sourceBuffer, sourceBuffer, "videoTrack.sourceBuffer");
// The first video track is selected by default.
assert_true(sourceBuffer.videoTracks[0].selected, "sourceBuffer.videoTracks[0].selected");
assert_not_equals(sourceBuffer.audioTracks[0].id, sourceBuffer.videoTracks[0].id, "track ids must be unique");
assert_equals(mediaElement.videoTracks.length, 1, "videoTracks.length");
assert_equals(mediaElement.videoTracks[0], sourceBuffer.videoTracks[0], "mediaElement.videoTrack == sourceBuffer.videoTrack");
assert_equals(mediaElement.audioTracks.length, 1, "audioTracks.length");
assert_equals(mediaElement.audioTracks[0], sourceBuffer.audioTracks[0], "mediaElement.audioTrack == sourceBuffer.audioTrack");
successCallback();
});
}
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var expectedAudioTrackInfo = { kind: "main", label: "", language: "" };
var expectedVideoTrackInfo = { kind: "main", label: "", language: "" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func(function ()
{
// Now append the same exact init segment again. This should succeed, but no new tracks should
// be added to the SourceBuffer or MediaElement.
test.failOnEvent(sourceBuffer.audioTracks, "addtrack", "Unexpected addtrack event on sourceBuffer.audioTracks for second init segment");
test.failOnEvent(sourceBuffer.videoTracks, "addtrack", "Unexpected addtrack event on sourceBuffer.videoTracks for second init segment");
test.failOnEvent(mediaElement.audioTracks, "addtrack", "Unexpected addtrack event on mediaElement.audioTracks for second init segment");
test.failOnEvent(mediaElement.videoTracks, "addtrack", "Unexpected addtrack event on mediaElement.videoTracks for second init segment");
test.expectEvent(sourceBuffer, "updateend", "InitSegment append ended.");
var initSegment = MediaSourceUtil.extractSegmentData(mediaData, segmentInfo.init);
sourceBuffer.appendBuffer(initSegment);
test.waitForExpectedEvents(function()
{
assert_equals(mediaElement.audioTracks.length, 1, "mediaElement.audioTracks.length");
assert_equals(mediaElement.videoTracks.length, 1, "mediaElement.videoTracks.length");
assert_equals(sourceBuffer.audioTracks.length, 1, "sourceBuffer.audioTracks.length");
assert_equals(sourceBuffer.videoTracks.length, 1, "sourceBuffer.videoTracks.length");
test.done();
});
}));
}, "Same init segment appended twice");
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var originalTrackDefaults = [
new TrackDefault("audio", "audio-language", "audio-label", ["main"], "2"),
new TrackDefault("video", "video-language", "video-label", ["main"], "1")
];
var trackDefaultList = new TrackDefaultList(originalTrackDefaults.slice());
sourceBuffer.trackDefaults = trackDefaultList;
var expectedAudioTrackInfo = { kind: "main", label: "audio-label", language: "audio-language" };
var expectedVideoTrackInfo = { kind: "main", label: "video-label", language: "video-language" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func_done());
}, "Track defaults processing in the init segment algorithm (explicit bytestream ids)");
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var originalTrackDefaults = [
new TrackDefault("audio", "audio-language", "audio-label", ["main"], ""),
new TrackDefault("video", "video-language", "video-label", ["main"], "")
];
var trackDefaultList = new TrackDefaultList(originalTrackDefaults.slice());
sourceBuffer.trackDefaults = trackDefaultList;
var expectedAudioTrackInfo = { kind: "main", label: "audio-label", language: "audio-language" };
var expectedVideoTrackInfo = { kind: "main", label: "video-label", language: "video-language" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func_done());
}, "Track defaults processing in the init segment algorithm (no bytestream ids)");
</script>
</body>
</html>
...@@ -577,78 +577,244 @@ T* findExistingTrackById(const TrackListBase<T>& trackList, const String& id) ...@@ -577,78 +577,244 @@ T* findExistingTrackById(const TrackListBase<T>& trackList, const String& id)
return trackList.getTrackById(id); return trackList.getTrackById(id);
} }
WebVector<WebMediaPlayer::TrackId> SourceBuffer::initializationSegmentReceived(const WebVector<MediaTrackInfo>& newTracks) const TrackDefault* SourceBuffer::getTrackDefault(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const
{
// This is a helper for implementation of default track label and default track language algorithms.
// defaultTrackLabel spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-label
// defaultTrackLanguage spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-language
// 1. If trackDefaults contains a TrackDefault object with a type attribute equal to type and a byteStreamTrackID attribute equal to byteStreamTrackID,
// then return the value of the label/language attribute on this matching object and abort these steps.
// 2. If trackDefaults contains a TrackDefault object with a type attribute equal to type and a byteStreamTrackID attribute equal to an empty string,
// then return the value of the label/language attribute on this matching object and abort these steps.
// 3. Return an empty string to the caller
const TrackDefault* trackDefaultWithEmptyBytestreamId = nullptr;
for (unsigned i = 0; i < m_trackDefaults->length(); ++i) {
const TrackDefault* trackDefault = m_trackDefaults->item(i);
if (trackDefault->type() != trackType)
continue;
if (trackDefault->byteStreamTrackID() == byteStreamTrackID)
return trackDefault;
if (!trackDefaultWithEmptyBytestreamId && trackDefault->byteStreamTrackID() == "")
trackDefaultWithEmptyBytestreamId = trackDefault;
}
return trackDefaultWithEmptyBytestreamId;
}
AtomicString SourceBuffer::defaultTrackLabel(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const
{
// Spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-label
const TrackDefault* trackDefault = getTrackDefault(trackType, byteStreamTrackID);
return trackDefault ? AtomicString(trackDefault->label()) : "";
}
AtomicString SourceBuffer::defaultTrackLanguage(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const
{
// Spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-language
const TrackDefault* trackDefault = getTrackDefault(trackType, byteStreamTrackID);
return trackDefault ? AtomicString(trackDefault->language()) : "";
}
bool SourceBuffer::initializationSegmentReceived(const WebVector<MediaTrackInfo>& newTracks)
{ {
SBLOG << __FUNCTION__ << " this=" << this << " tracks=" << newTracks.size(); SBLOG << __FUNCTION__ << " this=" << this << " tracks=" << newTracks.size();
DCHECK(m_source); DCHECK(m_source);
DCHECK(m_source->mediaElement()); DCHECK(m_source->mediaElement());
DCHECK(m_updating); DCHECK(m_updating);
// TODO(servolk): Implement proper 'initialization segment received' algorithm according to MSE spec: if (!RuntimeEnabledFeatures::audioVideoTracksEnabled()) {
// https://w3c.github.io/media-source/#sourcebuffer-init-segment-received if (!m_firstInitializationSegmentReceived) {
WebVector<WebMediaPlayer::TrackId> result(newTracks.size()); m_source->setSourceBufferActive(this);
unsigned resultIdx = 0; m_firstInitializationSegmentReceived = true;
for (const auto& trackInfo : newTracks) {
if (!RuntimeEnabledFeatures::audioVideoTracksEnabled()) {
static unsigned nextTrackId = 0;
StringBuilder stringBuilder;
stringBuilder.appendNumber(++nextTrackId);
result[resultIdx++] = stringBuilder.toString();
continue;
} }
return true;
}
const TrackBase* trackBase = nullptr; // Implementation of Initialization Segment Received, see
// https://w3c.github.io/media-source/#sourcebuffer-init-segment-received
// Sort newTracks into audio and video tracks to facilitate implementation
// of subsequent steps of this algorithm.
Vector<MediaTrackInfo> newAudioTracks;
Vector<MediaTrackInfo> newVideoTracks;
for (const MediaTrackInfo& trackInfo : newTracks) {
const TrackBase* track = nullptr;
if (trackInfo.trackType == WebMediaPlayer::AudioTrack) { if (trackInfo.trackType == WebMediaPlayer::AudioTrack) {
AudioTrack* audioTrack = nullptr; newAudioTracks.append(trackInfo);
if (!m_firstInitializationSegmentReceived) { if (m_firstInitializationSegmentReceived)
audioTrack = AudioTrack::create(trackInfo.id, trackInfo.kind, trackInfo.label, trackInfo.language, false); track = findExistingTrackById(audioTracks(), trackInfo.id);
SourceBufferTrackBaseSupplement::setSourceBuffer(*audioTrack, this);
audioTracks().add(audioTrack);
m_source->mediaElement()->audioTracks().add(audioTrack);
} else {
audioTrack = findExistingTrackById(audioTracks(), trackInfo.id);
DCHECK(audioTrack);
}
trackBase = audioTrack;
result[resultIdx++] = audioTrack->id();
} else if (trackInfo.trackType == WebMediaPlayer::VideoTrack) { } else if (trackInfo.trackType == WebMediaPlayer::VideoTrack) {
VideoTrack* videoTrack = nullptr; newVideoTracks.append(trackInfo);
if (!m_firstInitializationSegmentReceived) { if (m_firstInitializationSegmentReceived)
videoTrack = VideoTrack::create(trackInfo.id, trackInfo.kind, trackInfo.label, trackInfo.language, false); track = findExistingTrackById(videoTracks(), trackInfo.id);
SourceBufferTrackBaseSupplement::setSourceBuffer(*videoTrack, this);
videoTracks().add(videoTrack);
m_source->mediaElement()->videoTracks().add(videoTrack);
} else {
videoTrack = findExistingTrackById(videoTracks(), trackInfo.id);
DCHECK(videoTrack);
}
trackBase = videoTrack;
result[resultIdx++] = videoTrack->id();
} else { } else {
SBLOG << __FUNCTION__ << " this=" << this << " failed: unsupported track type " << trackInfo.trackType;
// TODO(servolk): Add handling of text tracks.
NOTREACHED(); NOTREACHED();
} }
(void)trackBase; if (m_firstInitializationSegmentReceived && !track) {
SBLOG << __FUNCTION__ << " this=" << this << " failed: tracks mismatch the first init segment.";
return false;
}
#if !LOG_DISABLED #if !LOG_DISABLED
const char* logActionStr = m_firstInitializationSegmentReceived ? "using existing" : "added";
const char* logTrackTypeStr = (trackInfo.trackType == WebMediaPlayer::AudioTrack) ? "audio" : "video"; const char* logTrackTypeStr = (trackInfo.trackType == WebMediaPlayer::AudioTrack) ? "audio" : "video";
SBLOG << __FUNCTION__ << "(" << this << ") " << logActionStr << " " SBLOG << __FUNCTION__ << " this=" << this << " : " << logTrackTypeStr << " track "
<< logTrackTypeStr << " Track " << trackBase << " id=" << String(trackBase->id()) << " id=" << String(trackInfo.id) << " byteStreamTrackID=" << String(trackInfo.byteStreamTrackID)
<< " label=" << trackBase->label() << " lang=" << trackBase->language(); << " kind=" << String(trackInfo.kind) << " label=" << String(trackInfo.label) << " language=" << String(trackInfo.language);
#endif #endif
} }
// 1. Update the duration attribute if it currently equals NaN:
// TODO(servolk): Pass also stream duration into initSegmentReceived.
// 2. If the initialization segment has no audio, video, or text tracks, then run the append error algorithm with the decode error parameter set to true and abort these steps.
if (newTracks.size() == 0) {
SBLOG << __FUNCTION__ << " this=" << this << " failed: no tracks found in the init segment.";
// The append error algorithm will be called at the top level after we return false here to indicate failure.
return false;
}
// 3. If the first initialization segment received flag is true, then run the following steps:
if (m_firstInitializationSegmentReceived) {
// 3.1 Verify the following properties. If any of the checks fail then run the append error algorithm with the decode error parameter set to true and abort these steps.
bool tracksMatchFirstInitSegment = true;
// - The number of audio, video, and text tracks match what was in the first initialization segment.
if (newAudioTracks.size() != audioTracks().length() || newVideoTracks.size() != videoTracks().length()) {
tracksMatchFirstInitSegment = false;
}
// - The codecs for each track, match what was specified in the first initialization segment.
// This is currently done in MediaSourceState::OnNewConfigs.
// - If more than one track for a single type are present (ie 2 audio tracks), then the Track IDs match the ones in the first initialization segment.
if (tracksMatchFirstInitSegment && newAudioTracks.size() > 1) {
for (size_t i = 0; i < newAudioTracks.size(); ++i) {
const String& newTrackId = newVideoTracks[i].id;
if (newTrackId != String(audioTracks().anonymousIndexedGetter(i)->id())) {
tracksMatchFirstInitSegment = false;
break;
}
}
}
if (tracksMatchFirstInitSegment && newVideoTracks.size() > 1) {
for (size_t i = 0; i < newVideoTracks.size(); ++i) {
const String& newTrackId = newVideoTracks[i].id;
if (newTrackId != String(videoTracks().anonymousIndexedGetter(i)->id())) {
tracksMatchFirstInitSegment = false;
break;
}
}
}
if (!tracksMatchFirstInitSegment) {
SBLOG << __FUNCTION__ << " this=" << this << " failed: tracks mismatch the first init segment.";
// The append error algorithm will be called at the top level after we return false here to indicate failure.
return false;
}
// 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers.
// This is done in Chromium code in stream parsers and demuxer implementations.
// 3.3 Set the need random access point flag on all track buffers to true.
// This is done in Chromium code, see MediaSourceState::OnNewConfigs.
}
// 4. Let active track flag equal false.
m_activeTrack = false;
// 5. If the first initialization segment received flag is false, then run the following steps:
if (!m_firstInitializationSegmentReceived) { if (!m_firstInitializationSegmentReceived) {
// 5. If active track flag equals true, then run the following steps: // 5.1 If the initialization segment contains tracks with codecs the user agent does not support, then run the append error algorithm with the decode error parameter set to true and abort these steps.
// 5.1. Add this SourceBuffer to activeSourceBuffers. // This is done in Chromium code, see MediaSourceState::OnNewConfigs.
// 5.2. Queue a task to fire a simple event named addsourcebuffer at
// 5.2 For each audio track in the initialization segment, run following steps:
for (const MediaTrackInfo& trackInfo : newAudioTracks) {
// 5.2.1 Let audio byte stream track ID be the Track ID for the current track being processed.
const auto& byteStreamTrackID = trackInfo.byteStreamTrackID;
// 5.2.2 Let audio language be a BCP 47 language tag for the language specified in the initialization segment for this track or an empty string if no language info is present.
WebString language = trackInfo.language;
// 5.2.3 If audio language equals an empty string or the 'und' BCP 47 value, then run the default track language algorithm with byteStreamTrackID set to
// audio byte stream track ID and type set to "audio" and assign the value returned by the algorithm to audio language.
if (language.isEmpty() || language == "und")
language = defaultTrackLanguage(TrackDefault::audioKeyword(), byteStreamTrackID);
// 5.2.4 Let audio label be a label specified in the initialization segment for this track or an empty string if no label info is present.
WebString label = trackInfo.label;
// 5.3.5 If audio label equals an empty string, then run the default track label algorithm with byteStreamTrackID set to audio byte stream track ID and
// type set to "audio" and assign the value returned by the algorithm to audio label.
if (label.isEmpty())
label = defaultTrackLabel(TrackDefault::audioKeyword(), byteStreamTrackID);
// 5.2.6 Let audio kinds be an array of kind strings specified in the initialization segment for this track or an empty array if no kind information is provided.
const auto& kind = trackInfo.kind;
// 5.2.7 TODO(servolk): Implement track kind processing.
// 5.2.8.2 Let new audio track be a new AudioTrack object.
AudioTrack* audioTrack = AudioTrack::create(byteStreamTrackID, kind, label, language, false);
SourceBufferTrackBaseSupplement::setSourceBuffer(*audioTrack, this);
// 5.2.8.7 If audioTracks.length equals 0, then run the following steps:
if (audioTracks().length() == 0) {
// 5.2.8.7.1 Set the enabled property on new audio track to true.
audioTrack->setEnabled(true);
// 5.2.8.7.2 Set active track flag to true.
m_activeTrack = true;
}
// 5.2.8.8 Add new audio track to the audioTracks attribute on this SourceBuffer object.
// 5.2.8.9 Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object referenced by the audioTracks attribute on this SourceBuffer object.
audioTracks().add(audioTrack);
// 5.2.8.10 Add new audio track to the audioTracks attribute on the HTMLMediaElement.
// 5.2.8.11 Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object referenced by the audioTracks attribute on the HTMLMediaElement.
m_source->mediaElement()->audioTracks().add(audioTrack);
}
// 5.3. For each video track in the initialization segment, run following steps:
for (const MediaTrackInfo& trackInfo : newVideoTracks) {
// 5.3.1 Let video byte stream track ID be the Track ID for the current track being processed.
const auto& byteStreamTrackID = trackInfo.byteStreamTrackID;
// 5.3.2 Let video language be a BCP 47 language tag for the language specified in the initialization segment for this track or an empty string if no language info is present.
WebString language = trackInfo.language;
// 5.3.3 If video language equals an empty string or the 'und' BCP 47 value, then run the default track language algorithm with byteStreamTrackID set to
// video byte stream track ID and type set to "video" and assign the value returned by the algorithm to video language.
if (language.isEmpty() || language == "und")
language = defaultTrackLanguage(TrackDefault::videoKeyword(), byteStreamTrackID);
// 5.3.4 Let video label be a label specified in the initialization segment for this track or an empty string if no label info is present.
WebString label = trackInfo.label;
// 5.3.5 If video label equals an empty string, then run the default track label algorithm with byteStreamTrackID set to video byte stream track ID and
// type set to "video" and assign the value returned by the algorithm to video label.
if (label.isEmpty())
label = defaultTrackLabel(TrackDefault::videoKeyword(), byteStreamTrackID);
// 5.3.6 Let video kinds be an array of kind strings specified in the initialization segment for this track or an empty array if no kind information is provided.
const auto& kind = trackInfo.kind;
// 5.3.7 TODO(servolk): Implement track kind processing.
// 5.3.8.2 Let new video track be a new VideoTrack object.
VideoTrack* videoTrack = VideoTrack::create(byteStreamTrackID, kind, label, language, false);
SourceBufferTrackBaseSupplement::setSourceBuffer(*videoTrack, this);
// 5.3.8.7 If videoTracks.length equals 0, then run the following steps:
if (videoTracks().length() == 0) {
// 5.3.8.7.1 Set the selected property on new audio track to true.
videoTrack->setSelected(true);
// 5.3.8.7.2 Set active track flag to true.
m_activeTrack = true;
}
// 5.3.8.8 Add new video track to the videoTracks attribute on this SourceBuffer object.
// 5.3.8.9 Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object referenced by the videoTracks attribute on this SourceBuffer object.
videoTracks().add(videoTrack);
// 5.3.8.10 Add new video track to the videoTracks attribute on the HTMLMediaElement.
// 5.3.8.11 Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object referenced by the videoTracks attribute on the HTMLMediaElement.
m_source->mediaElement()->videoTracks().add(videoTrack);
}
// 5.4 TODO(servolk): Add text track processing here.
// 5.5 If active track flag equals true, then run the following steps:
// activesourcebuffers. // activesourcebuffers.
m_source->setSourceBufferActive(this); if (m_activeTrack) {
// 5.5.1 Add this SourceBuffer to activeSourceBuffers.
// 5.5.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
m_source->setSourceBufferActive(this);
}
// 6. Set first initialization segment received flag to true. // 5.6. Set first initialization segment received flag to true.
m_firstInitializationSegmentReceived = true; m_firstInitializationSegmentReceived = true;
} }
return result; return true;
} }
bool SourceBuffer::hasPendingActivity() const bool SourceBuffer::hasPendingActivity() const
......
...@@ -116,7 +116,7 @@ public: ...@@ -116,7 +116,7 @@ public:
const AtomicString& interfaceName() const override; const AtomicString& interfaceName() const override;
// WebSourceBufferClient interface // WebSourceBufferClient interface
WebVector<WebMediaPlayer::TrackId> initializationSegmentReceived(const WebVector<MediaTrackInfo>&) override; bool initializationSegmentReceived(const WebVector<MediaTrackInfo>&) override;
DECLARE_VIRTUAL_TRACE(); DECLARE_VIRTUAL_TRACE();
...@@ -153,6 +153,10 @@ private: ...@@ -153,6 +153,10 @@ private:
void removeMediaTracks(); void removeMediaTracks();
const TrackDefault* getTrackDefault(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
AtomicString defaultTrackLabel(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
AtomicString defaultTrackLanguage(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
// FileReaderLoaderClient interface // FileReaderLoaderClient interface
void didStartLoading() override; void didStartLoading() override;
void didReceiveDataForClient(const char* data, unsigned dataLength) override; void didReceiveDataForClient(const char* data, unsigned dataLength) override;
...@@ -169,6 +173,7 @@ private: ...@@ -169,6 +173,7 @@ private:
double m_timestampOffset; double m_timestampOffset;
Member<AudioTrackList> m_audioTracks; Member<AudioTrackList> m_audioTracks;
Member<VideoTrackList> m_videoTracks; Member<VideoTrackList> m_videoTracks;
bool m_activeTrack = false;
double m_appendWindowStart; double m_appendWindowStart;
double m_appendWindowEnd; double m_appendWindowEnd;
bool m_firstInitializationSegmentReceived; bool m_firstInitializationSegmentReceived;
......
...@@ -11,19 +11,19 @@ ...@@ -11,19 +11,19 @@
namespace blink { namespace blink {
static const AtomicString& audioKeyword() const AtomicString& TrackDefault::audioKeyword()
{ {
DEFINE_STATIC_LOCAL(const AtomicString, audio, ("audio")); DEFINE_STATIC_LOCAL(const AtomicString, audio, ("audio"));
return audio; return audio;
} }
static const AtomicString& videoKeyword() const AtomicString& TrackDefault::videoKeyword()
{ {
DEFINE_STATIC_LOCAL(const AtomicString, video, ("video")); DEFINE_STATIC_LOCAL(const AtomicString, video, ("video"));
return video; return video;
} }
static const AtomicString& textKeyword() const AtomicString& TrackDefault::textKeyword()
{ {
DEFINE_STATIC_LOCAL(const AtomicString, text, ("text")); DEFINE_STATIC_LOCAL(const AtomicString, text, ("text"));
return text; return text;
......
...@@ -15,6 +15,10 @@ class ExceptionState; ...@@ -15,6 +15,10 @@ class ExceptionState;
class TrackDefault final : public GarbageCollectedFinalized<TrackDefault>, public ScriptWrappable { class TrackDefault final : public GarbageCollectedFinalized<TrackDefault>, public ScriptWrappable {
DEFINE_WRAPPERTYPEINFO(); DEFINE_WRAPPERTYPEINFO();
public: public:
static const AtomicString& audioKeyword();
static const AtomicString& videoKeyword();
static const AtomicString& textKeyword();
static TrackDefault* create(const AtomicString& type, const String& language, const String& label, const Vector<String>& kinds, const String& byteStreamTrackID, ExceptionState&); static TrackDefault* create(const AtomicString& type, const String& language, const String& label, const Vector<String>& kinds, const String& byteStreamTrackID, ExceptionState&);
virtual ~TrackDefault(); virtual ~TrackDefault();
......
...@@ -21,15 +21,15 @@ public: ...@@ -21,15 +21,15 @@ public:
struct MediaTrackInfo { struct MediaTrackInfo {
WebMediaPlayer::TrackType trackType; WebMediaPlayer::TrackType trackType;
WebMediaPlayer::TrackId id; WebMediaPlayer::TrackId id;
WebString byteStreamTrackID;
WebString kind; WebString kind;
WebString label; WebString label;
WebString language; WebString language;
}; };
// Notifies SourceBuffer that parsing of a new init segment has been completed successfully. The input parameter is a collection // Notifies SourceBuffer that parsing of a new init segment has been completed successfully. The input parameter is a collection
// of information about media tracks found in the new init segment. The return value is a vector of blink WebMediaPlayer track ids // of information about media tracks found in the new init segment. The return value is true in case of success.
// assigned to each track of the input collection (the order of output track ids must match the input track information). virtual bool initializationSegmentReceived(const WebVector<MediaTrackInfo>& tracks) = 0;
virtual WebVector<WebMediaPlayer::TrackId> initializationSegmentReceived(const WebVector<MediaTrackInfo>& tracks) = 0;
}; };
} // namespace blink } // namespace blink
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment