Commit efae0344 authored by servolk's avatar servolk Committed by Commit bot

Implement InitSegmentReceived algorithm in blink

This CL begins moving MSE init segment received algorithm implementation
to blink level, so that it could be shared across different
implementations (e.g. between Chrome and Opera). The old init segment
algorithm is in MediaSourceState::OnNewConfigs, and for now it needs to
be kept, since it does other important things that must be done on the
Chromium media pipeline level atm (e.g. creating track buffers and
demuxer streams).

BUG=620881

Review-Url: https://codereview.chromium.org/1678523003
Cr-Commit-Position: refs/heads/master@{#403286}
parent 7be6ed95
......@@ -185,6 +185,8 @@ void WebSourceBufferImpl::InitSegmentReceived(
blink::WebSourceBufferClient::MediaTrackInfo trackInfo;
trackInfo.trackType = mediaTrackTypeToBlink(track->type());
trackInfo.id = blink::WebString::fromUTF8(track->id());
trackInfo.byteStreamTrackID = blink::WebString::fromUTF8(
base::UintToString(track->bytestream_track_id()));
trackInfo.kind = blink::WebString::fromUTF8(track->kind());
trackInfo.label = blink::WebString::fromUTF8(track->label());
trackInfo.language = blink::WebString::fromUTF8(track->language());
......
......@@ -22,7 +22,7 @@
assert_equals(sourceBuffer.videoTracks.length, 1, "videoTracks.length");
assert_equals(sourceBuffer.videoTracks[0].kind, "main", "videoTrack.kind");
assert_equals(sourceBuffer.videoTracks[0].label, "", "videoTrack.label");
assert_equals(sourceBuffer.videoTracks[0].language, "eng", "videoTrack.language");
assert_equals(sourceBuffer.videoTracks[0].language, "", "videoTrack.language");
assert_equals(sourceBuffer.videoTracks[0].sourceBuffer, sourceBuffer, "videoTrack.sourceBuffer");
// The first video track is selected by default.
assert_true(sourceBuffer.videoTracks[0].selected, "sourceBuffer.videoTracks[0].selected");
......@@ -30,7 +30,7 @@
assert_equals(sourceBuffer.audioTracks.length, 1, "audioTracks.length");
assert_equals(sourceBuffer.audioTracks[0].kind, "main", "audioTrack.kind");
assert_equals(sourceBuffer.audioTracks[0].label, "", "audioTrack.label");
assert_equals(sourceBuffer.audioTracks[0].language, "eng", "audioTrack.language");
assert_equals(sourceBuffer.audioTracks[0].language, "", "audioTrack.language");
assert_equals(sourceBuffer.audioTracks[0].sourceBuffer, sourceBuffer, "audioTrack.sourceBuffer");
// The first audio track is enabled by default.
assert_true(sourceBuffer.audioTracks[0].enabled, "sourceBuffer.audioTracks[0].enabled");
......
<!DOCTYPE html>
<html>
<head>
<script src="/w3c/resources/testharness.js"></script>
<script src="/w3c/resources/testharnessreport.js"></script>
<script src="mediasource-util.js"></script>
</head>
<body>
<script>
function verifyTrackProperties(trackType, track, expectedTrackInfo) {
assert_equals(track.kind, expectedTrackInfo.kind, trackType + ".kind");
assert_equals(track.label, expectedTrackInfo.label, trackType + ".label");
assert_equals(track.language, expectedTrackInfo.language, trackType + ".language");
}
function loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, successCallback)
{
var initSegment = MediaSourceUtil.extractSegmentData(mediaData, segmentInfo.init);
test.expectEvent(sourceBuffer.audioTracks, "addtrack", "sourceBuffer.audioTracks addtrack event");
test.expectEvent(sourceBuffer.videoTracks, "addtrack", "sourceBuffer.videoTracks addtrack event");
test.expectEvent(mediaElement.audioTracks, "addtrack", "mediaElement.audioTracks addtrack event");
test.expectEvent(mediaElement.videoTracks, "addtrack", "mediaElement.videoTracks addtrack event");
test.expectEvent(mediaElement, "loadedmetadata", "loadedmetadata done.");
test.expectEvent(sourceBuffer, "updateend", "initSegment append ended.");
sourceBuffer.appendBuffer(initSegment);
test.waitForExpectedEvents(function()
{
assert_equals(sourceBuffer.audioTracks.length, 1, "audioTracks.length");
verifyTrackProperties("audio", sourceBuffer.audioTracks[0], expectedAudioTrackInfo);
assert_equals(sourceBuffer.audioTracks[0].sourceBuffer, sourceBuffer, "audioTrack.sourceBuffer");
// The first audio track is enabled by default.
assert_true(sourceBuffer.audioTracks[0].enabled, "sourceBuffer.audioTracks[0].enabled");
assert_equals(sourceBuffer.videoTracks.length, 1, "videoTracks.length");
verifyTrackProperties("video", sourceBuffer.videoTracks[0], expectedVideoTrackInfo);
assert_equals(sourceBuffer.videoTracks[0].sourceBuffer, sourceBuffer, "videoTrack.sourceBuffer");
// The first video track is selected by default.
assert_true(sourceBuffer.videoTracks[0].selected, "sourceBuffer.videoTracks[0].selected");
assert_not_equals(sourceBuffer.audioTracks[0].id, sourceBuffer.videoTracks[0].id, "track ids must be unique");
assert_equals(mediaElement.videoTracks.length, 1, "videoTracks.length");
assert_equals(mediaElement.videoTracks[0], sourceBuffer.videoTracks[0], "mediaElement.videoTrack == sourceBuffer.videoTrack");
assert_equals(mediaElement.audioTracks.length, 1, "audioTracks.length");
assert_equals(mediaElement.audioTracks[0], sourceBuffer.audioTracks[0], "mediaElement.audioTrack == sourceBuffer.audioTrack");
successCallback();
});
}
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var expectedAudioTrackInfo = { kind: "main", label: "", language: "" };
var expectedVideoTrackInfo = { kind: "main", label: "", language: "" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func(function ()
{
// Now append the same exact init segment again. This should succeed, but no new tracks should
// be added to the SourceBuffer or MediaElement.
test.failOnEvent(sourceBuffer.audioTracks, "addtrack", "Unexpected addtrack event on sourceBuffer.audioTracks for second init segment");
test.failOnEvent(sourceBuffer.videoTracks, "addtrack", "Unexpected addtrack event on sourceBuffer.videoTracks for second init segment");
test.failOnEvent(mediaElement.audioTracks, "addtrack", "Unexpected addtrack event on mediaElement.audioTracks for second init segment");
test.failOnEvent(mediaElement.videoTracks, "addtrack", "Unexpected addtrack event on mediaElement.videoTracks for second init segment");
test.expectEvent(sourceBuffer, "updateend", "InitSegment append ended.");
var initSegment = MediaSourceUtil.extractSegmentData(mediaData, segmentInfo.init);
sourceBuffer.appendBuffer(initSegment);
test.waitForExpectedEvents(function()
{
assert_equals(mediaElement.audioTracks.length, 1, "mediaElement.audioTracks.length");
assert_equals(mediaElement.videoTracks.length, 1, "mediaElement.videoTracks.length");
assert_equals(sourceBuffer.audioTracks.length, 1, "sourceBuffer.audioTracks.length");
assert_equals(sourceBuffer.videoTracks.length, 1, "sourceBuffer.videoTracks.length");
test.done();
});
}));
}, "Same init segment appended twice");
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var originalTrackDefaults = [
new TrackDefault("audio", "audio-language", "audio-label", ["main"], "2"),
new TrackDefault("video", "video-language", "video-label", ["main"], "1")
];
var trackDefaultList = new TrackDefaultList(originalTrackDefaults.slice());
sourceBuffer.trackDefaults = trackDefaultList;
var expectedAudioTrackInfo = { kind: "main", label: "audio-label", language: "audio-language" };
var expectedVideoTrackInfo = { kind: "main", label: "video-label", language: "video-language" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func_done());
}, "Track defaults processing in the init segment algorithm (explicit bytestream ids)");
mediasource_testafterdataloaded(function(test, mediaElement, mediaSource, segmentInfo, sourceBuffer, mediaData)
{
var originalTrackDefaults = [
new TrackDefault("audio", "audio-language", "audio-label", ["main"], ""),
new TrackDefault("video", "video-language", "video-label", ["main"], "")
];
var trackDefaultList = new TrackDefaultList(originalTrackDefaults.slice());
sourceBuffer.trackDefaults = trackDefaultList;
var expectedAudioTrackInfo = { kind: "main", label: "audio-label", language: "audio-language" };
var expectedVideoTrackInfo = { kind: "main", label: "video-label", language: "video-language" };
loadMediaAndVerifyAddedTracks(test, mediaElement, segmentInfo, sourceBuffer, mediaData, expectedAudioTrackInfo, expectedVideoTrackInfo, test.step_func_done());
}, "Track defaults processing in the init segment algorithm (no bytestream ids)");
</script>
</body>
</html>
......@@ -116,7 +116,7 @@ public:
const AtomicString& interfaceName() const override;
// WebSourceBufferClient interface
WebVector<WebMediaPlayer::TrackId> initializationSegmentReceived(const WebVector<MediaTrackInfo>&) override;
bool initializationSegmentReceived(const WebVector<MediaTrackInfo>&) override;
DECLARE_VIRTUAL_TRACE();
......@@ -153,6 +153,10 @@ private:
void removeMediaTracks();
const TrackDefault* getTrackDefault(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
AtomicString defaultTrackLabel(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
AtomicString defaultTrackLanguage(const AtomicString& trackType, const AtomicString& byteStreamTrackID) const;
// FileReaderLoaderClient interface
void didStartLoading() override;
void didReceiveDataForClient(const char* data, unsigned dataLength) override;
......@@ -169,6 +173,7 @@ private:
double m_timestampOffset;
Member<AudioTrackList> m_audioTracks;
Member<VideoTrackList> m_videoTracks;
bool m_activeTrack = false;
double m_appendWindowStart;
double m_appendWindowEnd;
bool m_firstInitializationSegmentReceived;
......
......@@ -11,19 +11,19 @@
namespace blink {
static const AtomicString& audioKeyword()
const AtomicString& TrackDefault::audioKeyword()
{
DEFINE_STATIC_LOCAL(const AtomicString, audio, ("audio"));
return audio;
}
static const AtomicString& videoKeyword()
const AtomicString& TrackDefault::videoKeyword()
{
DEFINE_STATIC_LOCAL(const AtomicString, video, ("video"));
return video;
}
static const AtomicString& textKeyword()
const AtomicString& TrackDefault::textKeyword()
{
DEFINE_STATIC_LOCAL(const AtomicString, text, ("text"));
return text;
......
......@@ -15,6 +15,10 @@ class ExceptionState;
class TrackDefault final : public GarbageCollectedFinalized<TrackDefault>, public ScriptWrappable {
DEFINE_WRAPPERTYPEINFO();
public:
static const AtomicString& audioKeyword();
static const AtomicString& videoKeyword();
static const AtomicString& textKeyword();
static TrackDefault* create(const AtomicString& type, const String& language, const String& label, const Vector<String>& kinds, const String& byteStreamTrackID, ExceptionState&);
virtual ~TrackDefault();
......
......@@ -21,15 +21,15 @@ public:
struct MediaTrackInfo {
WebMediaPlayer::TrackType trackType;
WebMediaPlayer::TrackId id;
WebString byteStreamTrackID;
WebString kind;
WebString label;
WebString language;
};
// Notifies SourceBuffer that parsing of a new init segment has been completed successfully. The input parameter is a collection
// of information about media tracks found in the new init segment. The return value is a vector of blink WebMediaPlayer track ids
// assigned to each track of the input collection (the order of output track ids must match the input track information).
virtual WebVector<WebMediaPlayer::TrackId> initializationSegmentReceived(const WebVector<MediaTrackInfo>& tracks) = 0;
// of information about media tracks found in the new init segment. The return value is true in case of success.
virtual bool initializationSegmentReceived(const WebVector<MediaTrackInfo>& tracks) = 0;
};
} // namespace blink
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment