Commit b8baab92 authored by Matt Wolenetz's avatar Matt Wolenetz Committed by Chromium LUCI CQ

[MSE][WebCodecs] Plumb appendEncodedChunks to WebSourceBuffer::AppendChunks

This change:

1) Implements SourceBuffer::appendEncodedChunks(). This is the first
   promise-based MSE API. Like async appendBuffer(), once the
   synchronous prepareAppend steps are complete, an async task is
   scheduled to complete the chunks' append. Unlike async
   appendBuffer(), the 'update', 'updateend', 'abort', 'error' events
   involved in an async chunk append are not enqueued, as the same
   information is exposed instead in promise rejections. Also, mixing
   event-notification with promise resolution/rejection is confusing and
   redundant, and w3ctag guidelines for promise-based APIs need such
   events originating in a promise's async execution to be dispatched
   (not just enqueued) prior to promise resolution/rejection occurring.

2) Converts (using new local helpers in SourceBuffer) the chunks
   directly into the type used in the underlying MSE buffering
   implementation (StreamParserBuffers in a circular_deque) during the
   synchronous portion of appendEncodedChunks().

3) The async chunk task is canceled during contextDestruction; it also
   is used to gate hasPendingActivity. Essentially, it's a 3rd async
   operation the SourceBuffer could do (in addition to pre-existing
   async appendBuffer and async remove). At most one of these three
   async operations may be pending at a time. This keeps the behavior
   unsurprising and aligns better w.r.t. previous similar async MSE
   operations.

4) Adds a new WebSourceBuffer AppendChunks method and a stubbed
   implementation of it in WebSourceBufferImpl.

Later changes will update WSBI::AppendChunks() to send the buffers
through ChunkDemuxer to the WebCodecsEncodedChunkStreamParser, and add
tests for promise rejection/abort/success scenarios and basic
end-to-end buffering and playback of encoded chunks with MSE.
Refinements, such as supporting h264 chunk buffering, using
non-hardcoded audio chunk duration (from a new optional duration, but
required-for-MSE, EncodedAudioChunkInit attribute), and letting the app
provide decode timestamp in EncodedVideoChunkInit, may also come later.

BUG=1144908

Change-Id: Ieb5d0942e68f48156bee9290dcb99dad2e280e85
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2574534Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Commit-Queue: Matthew Wolenetz <wolenetz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#836201}
parent 4a110138
...@@ -146,6 +146,15 @@ bool WebSourceBufferImpl::Append(const unsigned char* data, ...@@ -146,6 +146,15 @@ bool WebSourceBufferImpl::Append(const unsigned char* data,
return success; return success;
} }
bool WebSourceBufferImpl::AppendChunks(
std::unique_ptr<media::StreamParser::BufferQueue> buffer_queue,
double* timestamp_offset) {
// TODO(crbug.com/1144908): Continue MSE-for-WebCodecs encoded chunk buffering
// implementation from here through ChunkDemuxer/SourceBufferState/etc.
NOTIMPLEMENTED();
return false;
}
void WebSourceBufferImpl::ResetParserState() { void WebSourceBufferImpl::ResetParserState() {
demuxer_->ResetParserState(id_, demuxer_->ResetParserState(id_,
append_window_start_, append_window_end_, append_window_start_, append_window_end_,
......
...@@ -36,6 +36,9 @@ class WebSourceBufferImpl : public blink::WebSourceBuffer { ...@@ -36,6 +36,9 @@ class WebSourceBufferImpl : public blink::WebSourceBuffer {
bool Append(const unsigned char* data, bool Append(const unsigned char* data,
unsigned length, unsigned length,
double* timestamp_offset) override; double* timestamp_offset) override;
bool AppendChunks(
std::unique_ptr<media::StreamParser::BufferQueue> buffer_queue,
double* timestamp_offset) override;
void ResetParserState() override; void ResetParserState() override;
void Remove(double start, double end) override; void Remove(double start, double end) override;
bool CanChangeType(const blink::WebString& content_type, bool CanChangeType(const blink::WebString& content_type,
......
...@@ -30,6 +30,7 @@ include_rules = [ ...@@ -30,6 +30,7 @@ include_rules = [
"+media/base/audio_renderer_sink.h", "+media/base/audio_renderer_sink.h",
"+media/base/eme_constants.h", "+media/base/eme_constants.h",
"+media/base/media_log.h", "+media/base/media_log.h",
"+media/base/stream_parser.h",
"+media/base/video_frame_metadata.h", "+media/base/video_frame_metadata.h",
"+media/base/video_transformation.h", "+media/base/video_transformation.h",
"+mojo/public", "+mojo/public",
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_SOURCE_BUFFER_H_ #ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_SOURCE_BUFFER_H_
#define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_SOURCE_BUFFER_H_ #define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_SOURCE_BUFFER_H_
#include "media/base/stream_parser.h"
#include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/public/platform/web_string.h"
#include "third_party/blink/public/platform/web_time_range.h" #include "third_party/blink/public/platform/web_time_range.h"
...@@ -68,14 +69,18 @@ class WebSourceBuffer { ...@@ -68,14 +69,18 @@ class WebSourceBuffer {
virtual bool EvictCodedFrames(double current_playback_time, virtual bool EvictCodedFrames(double current_playback_time,
size_t new_data_size) = 0; size_t new_data_size) = 0;
// Appends data and runs the segment parser loop algorithm. // Appends data and runs the segment parser loop algorithm (or more simply
// The algorithm may update |*timestamp_offset| if |timestamp_offset| is not // appends and processes caller-provided media::StreamParserBuffers in the
// null. // AppendChunks version). The algorithm and associated frame processing may
// update |*timestamp_offset| if |timestamp_offset| is not null.
// Returns true on success, otherwise the append error algorithm needs to // Returns true on success, otherwise the append error algorithm needs to
// run with the decode error parameter set to true. // run with the decode error parameter set to true.
virtual bool Append(const unsigned char* data, virtual bool Append(const unsigned char* data,
unsigned length, unsigned length,
double* timestamp_offset) = 0; double* timestamp_offset) = 0;
virtual bool AppendChunks(
std::unique_ptr<media::StreamParser::BufferQueue> buffer_queue,
double* timestamp_offset) = 0;
virtual void ResetParserState() = 0; virtual void ResetParserState() = 0;
virtual void Remove(double start, double end) = 0; virtual void Remove(double start, double end) = 0;
......
...@@ -2,6 +2,8 @@ include_rules = [ ...@@ -2,6 +2,8 @@ include_rules = [
"-third_party/blink/renderer/modules", "-third_party/blink/renderer/modules",
"+media/base/audio_decoder_config.h", "+media/base/audio_decoder_config.h",
"+media/base/logging_override_if_enabled.h", "+media/base/logging_override_if_enabled.h",
"+media/base/stream_parser.h",
"+media/base/stream_parser_buffer.h",
"+media/base/video_decoder_config.h", "+media/base/video_decoder_config.h",
"+media/filters", "+media/filters",
"+media/formats/mp4/box_definitions.h", "+media/formats/mp4/box_definitions.h",
......
...@@ -264,6 +264,19 @@ SourceBuffer* MediaSource::AddSourceBufferUsingConfig( ...@@ -264,6 +264,19 @@ SourceBuffer* MediaSource::AddSourceBufferUsingConfig(
String console_message; String console_message;
CodecConfigEval eval; CodecConfigEval eval;
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
// TODO(crbug.com/1144908): The SourceBuffer needs these for converting h264
// EncodedVideoChunks. Probably best if these details are put into a new
// WebCodecs VideoDecoderHelper abstraction (or similar), since this top-level
// MediaSource impl shouldn't need to worry about the details of specific
// codec bitstream conversions (nor should the underlying implementation be
// depended upon to redo work done already in WebCodecs decoder configuration
// validation.) In initial prototype, we do not support h264 buffering, so
// will fail if these become populated by MakeMediaVideoDecoderConfig, below.
std::unique_ptr<media::H264ToAnnexBBitstreamConverter> h264_converter;
std::unique_ptr<media::mp4::AVCDecoderConfigurationRecord> h264_avcc;
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
if (config->hasAudioConfig()) { if (config->hasAudioConfig()) {
audio_config = std::make_unique<media::AudioDecoderConfig>(); audio_config = std::make_unique<media::AudioDecoderConfig>();
eval = AudioDecoder::MakeMediaAudioDecoderConfig(*(config->audioConfig()), eval = AudioDecoder::MakeMediaAudioDecoderConfig(*(config->audioConfig()),
...@@ -272,23 +285,23 @@ SourceBuffer* MediaSource::AddSourceBufferUsingConfig( ...@@ -272,23 +285,23 @@ SourceBuffer* MediaSource::AddSourceBufferUsingConfig(
} else { } else {
DCHECK(config->hasVideoConfig()); DCHECK(config->hasVideoConfig());
video_config = std::make_unique<media::VideoDecoderConfig>(); video_config = std::make_unique<media::VideoDecoderConfig>();
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
// TODO(crbug.com/1144908): Give these to the resulting SourceBuffer for use
// in converting h264 EncodedVideoChunks. Probably best if these details are
// put into a new WebCodecs VideoDecoderHelper abstraction (or similar),
// since this top-level MediaSource impl shouldn't need to worry about the
// details of specific codec bitstream conversions (nor should the
// underlying implementation be depended upon to redo work done already
// in WebCodecs decoder configuration validation.)
std::unique_ptr<media::H264ToAnnexBBitstreamConverter> h264_converter;
std::unique_ptr<media::mp4::AVCDecoderConfigurationRecord> h264_avcc;
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
eval = VideoDecoder::MakeMediaVideoDecoderConfig( eval = VideoDecoder::MakeMediaVideoDecoderConfig(
*(config->videoConfig()), *video_config /* out */, *(config->videoConfig()), *video_config /* out */,
#if BUILDFLAG(USE_PROPRIETARY_CODECS) #if BUILDFLAG(USE_PROPRIETARY_CODECS)
h264_converter /* out */, h264_avcc /* out */, h264_converter /* out */, h264_avcc /* out */,
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) #endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
console_message /* out */); console_message /* out */);
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
// TODO(crbug.com/1144908): Initial prototype does not support h264
// buffering. See above.
if (eval == CodecConfigEval::kSupported && (h264_converter || h264_avcc)) {
eval = CodecConfigEval::kUnsupported;
console_message =
"H.264 EncodedVideoChunk buffering is not yet supported in MSE. See "
"https://crbug.com/1144908.";
video_config.reset();
}
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
} }
switch (eval) { switch (eval) {
......
...@@ -35,14 +35,21 @@ ...@@ -35,14 +35,21 @@
#include <sstream> #include <sstream>
#include <utility> #include <utility>
#include "base/numerics/checked_math.h"
#include "media/base/logging_override_if_enabled.h" #include "media/base/logging_override_if_enabled.h"
#include "media/base/stream_parser_buffer.h"
#include "third_party/blink/public/platform/task_type.h" #include "third_party/blink/public/platform/task_type.h"
#include "third_party/blink/public/platform/web_source_buffer.h" #include "third_party/blink/public/platform/web_source_buffer.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
#include "third_party/blink/renderer/bindings/modules/v8/encoded_audio_chunk_or_encoded_video_chunk.h"
#include "third_party/blink/renderer/bindings/modules/v8/encoded_av_chunk_sequence_or_encoded_av_chunk.h" #include "third_party/blink/renderer/bindings/modules/v8/encoded_av_chunk_sequence_or_encoded_av_chunk.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_decoder_config.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_audio_decoder_config.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_encoded_audio_chunk.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_encoded_video_chunk.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_source_buffer_config.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_source_buffer_config.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_video_decoder_config.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_video_decoder_config.h"
#include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/document.h"
#include "third_party/blink/renderer/core/dom/dom_exception.h"
#include "third_party/blink/renderer/core/dom/events/event.h" #include "third_party/blink/renderer/core/dom/events/event.h"
#include "third_party/blink/renderer/core/dom/events/event_queue.h" #include "third_party/blink/renderer/core/dom/events/event_queue.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h" #include "third_party/blink/renderer/core/execution_context/execution_context.h"
...@@ -109,6 +116,82 @@ WTF::String WebTimeRangesToString(const WebTimeRanges& ranges) { ...@@ -109,6 +116,82 @@ WTF::String WebTimeRangesToString(const WebTimeRanges& ranges) {
return string_builder.ToString(); return string_builder.ToString();
} }
// These track IDs are used as to differentiate tracks within a SourceBuffer.
// They can be duplicated across SourceBuffers, since these are not the
// TrackList identifiers exposed to the web app; these are instead equivalents
// of bytestream format's in-band track identifiers.
// TODO(crbug.com/1144908): Consider standardizing these especially if
// TrackDefaults makes a return to MSE spec, so that apps can provide
// name/label/kind/etc metadata for tracks originating from appended WebCodecs
// chunks.
constexpr media::StreamParser::TrackId kWebCodecsAudioTrackId = 1;
constexpr media::StreamParser::TrackId kWebCodecsVideoTrackId = 2;
// TODO(crbug.com/1144908): Move these converters into a WebCodecs decoder
// helper abstraction. Beyond reuse (instead of copying the various
// MakeDecoderBuffer methods), that will also help enable buffering h264 where
// bitstream conversion might be necessary during conversion.
// Note, caller updates results further as necessary (e.g. duration, DTS, etc).
scoped_refptr<media::StreamParserBuffer> MakeAudioStreamParserBuffer(
const EncodedAudioChunk& audio_chunk) {
// TODO(crbug.com/1144908): DecoderBuffer takes size_t size, but
// StreamParserBuffer takes int. Fix this. For now, checked_cast is used.
auto stream_parser_buffer = media::StreamParserBuffer::CopyFrom(
static_cast<uint8_t*>(audio_chunk.data()->Data()),
base::checked_cast<int>(audio_chunk.data()->ByteLength()),
audio_chunk.type() == "key", media::DemuxerStream::AUDIO,
kWebCodecsAudioTrackId);
// TODO(crbug.com/1144908): Remove or change the following to DCHECK once
// StreamParserBuffer::CopyFrom takes size_t, not int.
CHECK_EQ(audio_chunk.data()->ByteLength(), stream_parser_buffer->data_size());
// Currently, we do not populate any side_data in these converters.
DCHECK_EQ(0U, stream_parser_buffer->side_data_size());
DCHECK_EQ(nullptr, stream_parser_buffer->side_data());
stream_parser_buffer->set_timestamp(
base::TimeDelta::FromMicroseconds(audio_chunk.timestamp()));
// TODO(crbug.com/1144908): Get EncodedAudioChunk to have an optional duration
// attribute, and require it to be populated for use by MSE-for-WebCodecs,
// here. For initial prototype, hard-coded 22ms is used as estimated duration.
stream_parser_buffer->set_duration(base::TimeDelta::FromMilliseconds(22));
stream_parser_buffer->set_is_duration_estimated(true);
return stream_parser_buffer;
}
// Caller must verify that video_chunk.duration().has_value().
scoped_refptr<media::StreamParserBuffer> MakeVideoStreamParserBuffer(
const EncodedVideoChunk& video_chunk) {
// TODO(crbug.com/1144908): DecoderBuffer takes size_t size, but
// StreamParserBuffer takes int. Fix this. For now, checked_cast is used.
auto stream_parser_buffer = media::StreamParserBuffer::CopyFrom(
static_cast<uint8_t*>(video_chunk.data()->Data()),
base::checked_cast<int>(video_chunk.data()->ByteLength()),
video_chunk.type() == "key", media::DemuxerStream::VIDEO,
kWebCodecsVideoTrackId);
// TODO(crbug.com/1144908): Remove or change the following to DCHECK once
// StreamParserBuffer::CopyFrom takes size_t, not int.
CHECK_EQ(video_chunk.data()->ByteLength(), stream_parser_buffer->data_size());
// Currently, we do not populate any side_data in these converters.
DCHECK_EQ(0U, stream_parser_buffer->side_data_size());
DCHECK_EQ(nullptr, stream_parser_buffer->side_data());
stream_parser_buffer->set_timestamp(
base::TimeDelta::FromMicroseconds(video_chunk.timestamp()));
// TODO(crbug.com/1144908): Get EncodedVideoChunk to have an optional decode
// timestamp attribute. If it is populated, use it for the DTS of the
// StreamParserBuffer, here. For initial prototype, only in-order PTS==DTS
// chunks are supported. Out-of-order chunks may result in buffered range gaps
// or decode errors.
DCHECK(video_chunk.duration().has_value());
stream_parser_buffer->set_duration(
base::TimeDelta::FromMicroseconds(video_chunk.duration().value()));
return stream_parser_buffer;
}
} // namespace } // namespace
SourceBuffer::SourceBuffer(std::unique_ptr<WebSourceBuffer> web_source_buffer, SourceBuffer::SourceBuffer(std::unique_ptr<WebSourceBuffer> web_source_buffer,
...@@ -535,29 +618,155 @@ void SourceBuffer::appendBuffer(NotShared<DOMArrayBufferView> data, ...@@ -535,29 +618,155 @@ void SourceBuffer::appendBuffer(NotShared<DOMArrayBufferView> data,
data.View()->byteLength(), exception_state); data.View()->byteLength(), exception_state);
} }
// Note that |chunks| may be a sequence of mixed audio and video encoded chunks
// (which should cause underlying buffering validation to emit error akin to
// appending video to an audio track or vice-versa). It was impossible to get
// the bindings generator to disambiguate sequence<audio> vs sequence<video>,
// hence we could not use simple overloading in the IDL for these two. Neither
// could the IDL union attempt similar. We must enforce that semantic in
// implementation. Further note, |chunks| may instead be a single audio or a
// single video chunk as a helpful additional overload for one-chunk-at-a-time
// append use-cases.
ScriptPromise SourceBuffer::appendEncodedChunks( ScriptPromise SourceBuffer::appendEncodedChunks(
ScriptState* script_state, ScriptState* script_state,
const EncodedChunks& chunks, const EncodedChunks& chunks,
ExceptionState& exception_state) { ExceptionState& exception_state) {
// Note that |chunks| may be a sequence of mixed audio and video encoded
// chunks (which should cause underlying buffering validation to emit error
// akin to appending video to an audio track or vice-versa). It was impossible
// to get the bindings generator to disambiguate sequence<audio> vs
// sequence<video>, hence we could not use simple overloading in the IDL for
// these two. Neither could the IDL union attempt similar. We must enforce
// that semantic in implementation. Further note, |chunks| may instead be a
// single audio or a single video chunk as a helpful additional overload for
// one-chunk-at-a-time append use-cases.
DVLOG(2) << __func__ << " this=" << this; DVLOG(2) << __func__ << " this=" << this;
// TODO(crbug.com/1144908): Validate allowed in current state (and take lock TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
// at appropriate point), unwrap the chunks, get a promise and its resolver, "media", "SourceBuffer::appendEncodedChunks", TRACE_ID_LOCAL(this));
// give the resolver to the async validation and buffering of the chunks,
// return the promise. if (ThrowExceptionIfRemovedOrUpdating(IsRemoved(), updating_,
exception_state.ThrowTypeError( exception_state)) {
"unimplemented - see https://crbug.com/1144908"); TRACE_EVENT_NESTABLE_ASYNC_END0(
return ScriptPromise(); "media", "SourceBuffer::appendEncodedChunks", TRACE_ID_LOCAL(this));
return ScriptPromise();
}
// Convert |chunks| to a StreamParser::BufferQueue.
// TODO(crbug.com/1144908): Support out-of-order DTS vs PTS sequences. For
// now, PTS is assumed to be DTS (as is common in some formats like WebM).
// TODO(crbug.com/1144908): Add optional EncodedAudioChunk duration attribute
// and require it to be populated for use with MSE. For now, all audio chunks
// are estimated.
DCHECK(!pending_chunks_to_buffer_);
auto buffer_queue = std::make_unique<media::StreamParser::BufferQueue>();
size_t size = 0;
if (chunks.IsEncodedAudioChunk()) {
buffer_queue->emplace_back(
MakeAudioStreamParserBuffer(*(chunks.GetAsEncodedAudioChunk())));
size += buffer_queue->back()->data_size() +
buffer_queue->back()->side_data_size();
} else if (chunks.IsEncodedVideoChunk()) {
const auto& video_chunk = *(chunks.GetAsEncodedVideoChunk());
if (!video_chunk.duration().has_value()) {
MediaSource::LogAndThrowTypeError(
exception_state,
"EncodedVideoChunk is missing duration, required for use with "
"SourceBuffer.");
return ScriptPromise();
;
}
buffer_queue->emplace_back(MakeVideoStreamParserBuffer(video_chunk));
size += buffer_queue->back()->data_size() +
buffer_queue->back()->side_data_size();
} else if (chunks.IsEncodedAudioChunkOrEncodedVideoChunkSequence()) {
for (const auto& av_chunk :
chunks.GetAsEncodedAudioChunkOrEncodedVideoChunkSequence()) {
// TODO(crbug.com/1144908): Can null entries occur in the sequence, and
// should they be ignored or should they cause exception? Ignoring for
// now, if they occur.
if (av_chunk.IsNull())
continue;
if (av_chunk.IsEncodedAudioChunk()) {
buffer_queue->emplace_back(
MakeAudioStreamParserBuffer(*(av_chunk.GetAsEncodedAudioChunk())));
size += buffer_queue->back()->data_size() +
buffer_queue->back()->side_data_size();
} else if (av_chunk.IsEncodedVideoChunk()) {
const auto& video_chunk = *(av_chunk.GetAsEncodedVideoChunk());
if (!video_chunk.duration().has_value()) {
MediaSource::LogAndThrowTypeError(
exception_state,
"EncodedVideoChunk is missing duration, required for use with "
"SourceBuffer.");
return ScriptPromise();
;
}
buffer_queue->emplace_back(MakeVideoStreamParserBuffer(video_chunk));
size += buffer_queue->back()->data_size() +
buffer_queue->back()->side_data_size();
}
}
}
DCHECK(!append_encoded_chunks_resolver_);
append_encoded_chunks_resolver_ =
MakeGarbageCollected<ScriptPromiseResolver>(script_state);
auto promise = append_encoded_chunks_resolver_->Promise();
// Do remainder of steps of analogue of prepare append algorithm and sending
// the |buffer_queue| to be buffered by |web_source_buffer_| asynchronously
// only if attachment is usable and underlying demuxer is protected from
// destruction (applicable especially for MSE-in-Worker case). Note, we must
// have |source_| and |source_| must have an attachment because !IsRemoved().
if (!source_->RunUnlessElementGoneOrClosingUs(WTF::Bind(
&SourceBuffer::AppendEncodedChunks_Locked, WrapPersistent(this),
std::move(buffer_queue), size, WTF::Unretained(&exception_state)))) {
// TODO(crbug.com/878133): Determine in specification what the specific,
// app-visible, exception should be for this case.
MediaSource::LogAndThrowDOMException(
exception_state, DOMExceptionCode::kInvalidStateError,
"Worker MediaSource attachment is closing");
append_encoded_chunks_resolver_ = nullptr;
return ScriptPromise();
}
return promise;
}
void SourceBuffer::AppendEncodedChunks_Locked(
std::unique_ptr<media::StreamParser::BufferQueue> buffer_queue,
size_t size,
ExceptionState* exception_state,
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */) {
DVLOG(2) << __func__ << " this=" << this << ", size=" << size;
DCHECK(source_);
DCHECK(!updating_);
source_->AssertAttachmentsMutexHeldIfCrossThreadForDebugging();
DCHECK(append_encoded_chunks_resolver_);
DCHECK(buffer_queue);
DCHECK(!pending_chunks_to_buffer_);
double media_time = GetMediaTime();
if (!PrepareAppend(media_time, size, *exception_state)) {
TRACE_EVENT_NESTABLE_ASYNC_END0(
"media", "SourceBuffer::appendEncodedChunks", TRACE_ID_LOCAL(this));
append_encoded_chunks_resolver_ = nullptr;
return;
}
pending_chunks_to_buffer_ = std::move(buffer_queue);
updating_ = true;
// Note, this promisified API does not queue for dispatch events like
// 'updatestart', 'update', 'error', 'abort', nor 'updateend' during the scope
// of synchronous and asynchronous operation, because the promise's resolution
// or rejection indicates the same information and lets us not wait until
// those events are dispatched before resolving them. See verbose reasons in
// AbortIfUpdating().
// Asynchronously run the analogue of the buffer append algorithm.
append_encoded_chunks_async_task_handle_ = PostCancellableTask(
*GetExecutionContext()->GetTaskRunner(TaskType::kMediaElementEvent),
FROM_HERE,
WTF::Bind(&SourceBuffer::AppendEncodedChunksAsyncPart,
WrapPersistent(this)));
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1("media", "delay", TRACE_ID_LOCAL(this),
"type", "initialDelay");
} }
void SourceBuffer::abort(ExceptionState& exception_state) { void SourceBuffer::abort(ExceptionState& exception_state) {
...@@ -909,17 +1118,41 @@ void SourceBuffer::AbortIfUpdating() { ...@@ -909,17 +1118,41 @@ void SourceBuffer::AbortIfUpdating() {
DCHECK_EQ(pending_remove_start_, -1); DCHECK_EQ(pending_remove_start_, -1);
const char* trace_event_name = "SourceBuffer::appendBuffer";
// 4.1. Abort the buffer append and stream append loop algorithms if they are // 4.1. Abort the buffer append and stream append loop algorithms if they are
// running. // running.
append_buffer_async_task_handle_.Cancel();
pending_append_data_.clear();
pending_append_data_offset_ = 0;
// 4.2. Set the updating attribute to false. // 4.2. Set the updating attribute to false.
updating_ = false; updating_ = false;
if (pending_chunks_to_buffer_) {
append_encoded_chunks_async_task_handle_.Cancel();
pending_chunks_to_buffer_.reset();
// For async Promise resolution/rejection, we do not use events to notify
// the app, since event dispatch could occur after the promise callback
// microtask dispatch and violate the design principle, "Events should fire
// before Promises resolve", unless we introduced unnecessary further
// latency to enqueue a task to resolve/reject the promise. In this case,
// the elision of the "abort" and "updateend" events is synonymous with
// rejection with an AbortError DOMException, enabling faster abort
// notification. See
// https://w3ctag.github.io/design-principles/#promises-and-events
// TODO(crbug.com/1144908): Consider moving this verbosity to eventual
// specification.
DCHECK(append_encoded_chunks_resolver_);
append_encoded_chunks_resolver_->Reject(MakeGarbageCollected<DOMException>(
DOMExceptionCode::kAbortError, "Aborted by explicit abort()"));
append_encoded_chunks_resolver_ = nullptr;
TRACE_EVENT_NESTABLE_ASYNC_END0(
"media", "SourceBuffer::appendEncodedChunks", TRACE_ID_LOCAL(this));
return;
}
DCHECK(!append_encoded_chunks_resolver_);
append_buffer_async_task_handle_.Cancel();
pending_append_data_.clear();
pending_append_data_offset_ = 0;
// For the regular, non-promisified appendBuffer abort, use events to notify
// result.
// 4.3. Queue a task to fire a simple event named abort at this SourceBuffer // 4.3. Queue a task to fire a simple event named abort at this SourceBuffer
// object. // object.
ScheduleEvent(event_type_names::kAbort); ScheduleEvent(event_type_names::kAbort);
...@@ -928,7 +1161,7 @@ void SourceBuffer::AbortIfUpdating() { ...@@ -928,7 +1161,7 @@ void SourceBuffer::AbortIfUpdating() {
// SourceBuffer object. // SourceBuffer object.
ScheduleEvent(event_type_names::kUpdateend); ScheduleEvent(event_type_names::kUpdateend);
TRACE_EVENT_NESTABLE_ASYNC_END0("media", trace_event_name, TRACE_EVENT_NESTABLE_ASYNC_END0("media", "SourceBuffer::appendBuffer",
TRACE_ID_LOCAL(this)); TRACE_ID_LOCAL(this));
} }
...@@ -1600,6 +1833,7 @@ void SourceBuffer::NotifyParseWarning(const ParseWarning warning) { ...@@ -1600,6 +1833,7 @@ void SourceBuffer::NotifyParseWarning(const ParseWarning warning) {
bool SourceBuffer::HasPendingActivity() const { bool SourceBuffer::HasPendingActivity() const {
return updating_ || append_buffer_async_task_handle_.IsActive() || return updating_ || append_buffer_async_task_handle_.IsActive() ||
append_encoded_chunks_async_task_handle_.IsActive() ||
remove_async_task_handle_.IsActive() || remove_async_task_handle_.IsActive() ||
(async_event_queue_ && async_event_queue_->HasPendingEvents()); (async_event_queue_ && async_event_queue_->HasPendingEvents());
} }
...@@ -1609,6 +1843,10 @@ void SourceBuffer::ContextDestroyed() { ...@@ -1609,6 +1843,10 @@ void SourceBuffer::ContextDestroyed() {
pending_append_data_.clear(); pending_append_data_.clear();
pending_append_data_offset_ = 0; pending_append_data_offset_ = 0;
append_encoded_chunks_async_task_handle_.Cancel();
pending_chunks_to_buffer_.reset();
append_encoded_chunks_resolver_ = nullptr;
remove_async_task_handle_.Cancel(); remove_async_task_handle_.Cancel();
pending_remove_start_ = -1; pending_remove_start_ = -1;
pending_remove_end_ = -1; pending_remove_end_ = -1;
...@@ -1798,6 +2036,23 @@ void SourceBuffer::AppendBufferInternal_Locked( ...@@ -1798,6 +2036,23 @@ void SourceBuffer::AppendBufferInternal_Locked(
"type", "initialDelay"); "type", "initialDelay");
} }
void SourceBuffer::AppendEncodedChunksAsyncPart() {
// Do the async append operation only if attachment is usable and underlying
// demuxer is protected from destruction (applicable especially for
// MSE-in-Worker case).
DCHECK(!IsRemoved()); // So must have |source_| and it must have attachment.
if (!source_->RunUnlessElementGoneOrClosingUs(
WTF::Bind(&SourceBuffer::AppendEncodedChunksAsyncPart_Locked,
WrapPersistent(this)))) {
// TODO(crbug.com/878133): Determine in specification what the specific,
// app-visible, behavior should be for this case. In this implementation,
// the safest thing to do is nothing here now. See more verbose reason in
// similar AppendBufferAsyncPart() implementation.
DVLOG(1) << __func__ << " this=" << this
<< ": Worker MediaSource attachment is closing";
}
}
void SourceBuffer::AppendBufferAsyncPart() { void SourceBuffer::AppendBufferAsyncPart() {
// Do the async append operation only if attachment is usable and underlying // Do the async append operation only if attachment is usable and underlying
// demuxer is protected from destruction (applicable especially for // demuxer is protected from destruction (applicable especially for
...@@ -1820,6 +2075,51 @@ void SourceBuffer::AppendBufferAsyncPart() { ...@@ -1820,6 +2075,51 @@ void SourceBuffer::AppendBufferAsyncPart() {
} }
} }
void SourceBuffer::AppendEncodedChunksAsyncPart_Locked(
MediaSourceAttachmentSupplement::ExclusiveKey pass_key) {
DCHECK(source_);
source_->AssertAttachmentsMutexHeldIfCrossThreadForDebugging();
DCHECK(updating_);
DCHECK(append_encoded_chunks_resolver_);
DCHECK(pending_chunks_to_buffer_);
// Run the analogue to the segment parser loop.
// TODO(crbug.com/1144908): Consider buffering |pending_chunks_to_buffer_| in
// multiple async iterations if it contains many buffers. It is unclear if
// this is necessary when buffering encoded chunks.
TRACE_EVENT_NESTABLE_ASYNC_END0("media", "delay", TRACE_ID_LOCAL(this));
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1("media", "appending", TRACE_ID_LOCAL(this),
"chunkCount",
pending_chunks_to_buffer_->size());
bool append_success = web_source_buffer_->AppendChunks(
std::move(pending_chunks_to_buffer_), &timestamp_offset_);
if (!append_success) {
AppendError(pass_key);
append_encoded_chunks_resolver_->Reject(MakeGarbageCollected<DOMException>(
DOMExceptionCode::kSyntaxError,
"Parsing or frame processing error while buffering encoded chunks."));
append_encoded_chunks_resolver_ = nullptr;
} else {
updating_ = false;
// Don't schedule 'update' or 'updateend' for this promisified async
// method's completion. Promise resolution/rejection will signal same,
// faster.
append_encoded_chunks_resolver_->Resolve();
append_encoded_chunks_resolver_ = nullptr;
}
TRACE_EVENT_NESTABLE_ASYNC_END0("media", "appending", TRACE_ID_LOCAL(this));
TRACE_EVENT_NESTABLE_ASYNC_END0("media", "SourceBuffer::appendEncodedChunks",
TRACE_ID_LOCAL(this));
DVLOG(3) << __func__ << " done. this=" << this
<< " media_time=" << GetMediaTime() << " buffered="
<< WebTimeRangesToString(web_source_buffer_->Buffered());
}
void SourceBuffer::AppendBufferAsyncPart_Locked( void SourceBuffer::AppendBufferAsyncPart_Locked(
MediaSourceAttachmentSupplement::ExclusiveKey pass_key) { MediaSourceAttachmentSupplement::ExclusiveKey pass_key) {
DCHECK(source_); DCHECK(source_);
...@@ -1839,7 +2139,7 @@ void SourceBuffer::AppendBufferAsyncPart_Locked( ...@@ -1839,7 +2139,7 @@ void SourceBuffer::AppendBufferAsyncPart_Locked(
// doesn't block the renderer event loop very long. This value was selected // doesn't block the renderer event loop very long. This value was selected
// by looking at YouTube SourceBuffer usage across a variety of bitrates. // by looking at YouTube SourceBuffer usage across a variety of bitrates.
// This value allows relatively large appends while keeping append() call // This value allows relatively large appends while keeping append() call
// duration in the ~5-15ms range. Note that even in MSE-in-Worker case, we // duration in the ~5-15ms range. Note that even in MSE-in-Worker case, we
// retain this behavior because some synchronous operations done by the main // retain this behavior because some synchronous operations done by the main
// thread media element on our attachment block until we are finished and have // thread media element on our attachment block until we are finished and have
// exited the attachment's RunExclusively() callback scope. // exited the attachment's RunExclusively() callback scope.
...@@ -1966,13 +2266,18 @@ void SourceBuffer::AppendError( ...@@ -1966,13 +2266,18 @@ void SourceBuffer::AppendError(
// 2. Set the updating attribute to false. // 2. Set the updating attribute to false.
updating_ = false; updating_ = false;
// 3. Queue a task to fire a simple event named error at this SourceBuffer // Only schedule 'error' and 'updateend' here for the non-promisified regular
// object. // appendBuffer asynchronous operation error. The promisified
ScheduleEvent(event_type_names::kError); // appendEncodedChunks rejection will be handled by caller.
if (!append_encoded_chunks_resolver_) {
// 3. Queue a task to fire a simple event named error at this SourceBuffer
// object.
ScheduleEvent(event_type_names::kError);
// 4. Queue a task to fire a simple event named updateend at this SourceBuffer // 4. Queue a task to fire a simple event named updateend at this
// object. // SourceBuffer object.
ScheduleEvent(event_type_names::kUpdateend); ScheduleEvent(event_type_names::kUpdateend);
}
// 5. If decode error is true, then run the end of stream algorithm with the // 5. If decode error is true, then run the end of stream algorithm with the
// error parameter set to "decode". // error parameter set to "decode".
...@@ -1984,6 +2289,7 @@ void SourceBuffer::Trace(Visitor* visitor) const { ...@@ -1984,6 +2289,7 @@ void SourceBuffer::Trace(Visitor* visitor) const {
visitor->Trace(source_); visitor->Trace(source_);
visitor->Trace(track_defaults_); visitor->Trace(track_defaults_);
visitor->Trace(async_event_queue_); visitor->Trace(async_event_queue_);
visitor->Trace(append_encoded_chunks_resolver_);
visitor->Trace(audio_tracks_); visitor->Trace(audio_tracks_);
visitor->Trace(video_tracks_); visitor->Trace(video_tracks_);
EventTargetWithInlineData::Trace(visitor); EventTargetWithInlineData::Trace(visitor);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <memory> #include <memory>
#include "base/memory/scoped_refptr.h" #include "base/memory/scoped_refptr.h"
#include "media/base/stream_parser.h"
#include "third_party/blink/public/platform/web_source_buffer_client.h" #include "third_party/blink/public/platform/web_source_buffer_client.h"
#include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h" #include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise.h" #include "third_party/blink/renderer/bindings/core/v8/script_promise.h"
...@@ -58,6 +59,7 @@ class ExceptionState; ...@@ -58,6 +59,7 @@ class ExceptionState;
class MediaSource; class MediaSource;
class MediaSourceTracer; class MediaSourceTracer;
class MediaSourceAttachmentSupplement; class MediaSourceAttachmentSupplement;
class ScriptPromiseResolver;
class ScriptState; class ScriptState;
class SourceBufferConfig; class SourceBufferConfig;
class TimeRanges; class TimeRanges;
...@@ -153,6 +155,7 @@ class SourceBuffer final : public EventTargetWithInlineData, ...@@ -153,6 +155,7 @@ class SourceBuffer final : public EventTargetWithInlineData,
bool PrepareAppend(double media_time, size_t new_data_size, ExceptionState&); bool PrepareAppend(double media_time, size_t new_data_size, ExceptionState&);
bool EvictCodedFrames(double media_time, size_t new_data_size); bool EvictCodedFrames(double media_time, size_t new_data_size);
void AppendBufferInternal(const unsigned char*, size_t, ExceptionState&); void AppendBufferInternal(const unsigned char*, size_t, ExceptionState&);
void AppendEncodedChunksAsyncPart();
void AppendBufferAsyncPart(); void AppendBufferAsyncPart();
void AppendError(MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */); void AppendError(MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
...@@ -188,11 +191,18 @@ class SourceBuffer final : public EventTargetWithInlineData, ...@@ -188,11 +191,18 @@ class SourceBuffer final : public EventTargetWithInlineData,
const String& type, const String& type,
ExceptionState*, ExceptionState*,
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */); MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
void AppendEncodedChunks_Locked(
std::unique_ptr<media::StreamParser::BufferQueue> buffer_queue,
size_t size,
ExceptionState* exception_state,
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
void AppendBufferInternal_Locked( void AppendBufferInternal_Locked(
const unsigned char*, const unsigned char*,
size_t, size_t,
ExceptionState*, ExceptionState*,
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */); MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
void AppendEncodedChunksAsyncPart_Locked(
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
void AppendBufferAsyncPart_Locked( void AppendBufferAsyncPart_Locked(
MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */); MediaSourceAttachmentSupplement::ExclusiveKey /* passkey */);
void RemoveAsyncPart_Locked( void RemoveAsyncPart_Locked(
...@@ -238,6 +248,7 @@ class SourceBuffer final : public EventTargetWithInlineData, ...@@ -238,6 +248,7 @@ class SourceBuffer final : public EventTargetWithInlineData,
AtomicString mode_; AtomicString mode_;
bool updating_; bool updating_;
double timestamp_offset_; double timestamp_offset_;
Member<AudioTrackList> audio_tracks_; Member<AudioTrackList> audio_tracks_;
Member<VideoTrackList> video_tracks_; Member<VideoTrackList> video_tracks_;
...@@ -245,10 +256,25 @@ class SourceBuffer final : public EventTargetWithInlineData, ...@@ -245,10 +256,25 @@ class SourceBuffer final : public EventTargetWithInlineData,
double append_window_end_; double append_window_end_;
bool first_initialization_segment_received_; bool first_initialization_segment_received_;
// |updating_| logic, per spec, allows at most one of the following async
// operations to be exclusively pending for this SourceBuffer: appendBuffer(),
// appendEncodedChunks(), or remove(). The following three sections
// respectively track the async state for these pending operations:
// These are valid only during the scope of synchronous and asynchronous
// follow-up of appendBuffer().
Vector<unsigned char> pending_append_data_; Vector<unsigned char> pending_append_data_;
wtf_size_t pending_append_data_offset_; wtf_size_t pending_append_data_offset_;
TaskHandle append_buffer_async_task_handle_; TaskHandle append_buffer_async_task_handle_;
// This resolver is set and valid only during the scope of synchronous and
// asynchronous follow-up of appendEncodedChunks().
std::unique_ptr<media::StreamParser::BufferQueue> pending_chunks_to_buffer_;
Member<ScriptPromiseResolver> append_encoded_chunks_resolver_;
TaskHandle append_encoded_chunks_async_task_handle_;
// These are valid only during the scope of synchronous and asynchronous
// follow-up of remove().
double pending_remove_start_; double pending_remove_start_;
double pending_remove_end_; double pending_remove_end_;
TaskHandle remove_async_task_handle_; TaskHandle remove_async_task_handle_;
......
...@@ -845,6 +845,14 @@ _CONFIG = [ ...@@ -845,6 +845,14 @@ _CONFIG = [
'media::.+', 'media::.+',
] ]
}, },
{
'paths': [
'third_party/blink/renderer/modules/mediasource/',
],
'allowed': [
'media::.+',
]
},
{ {
'paths': [ 'paths': [
'third_party/blink/renderer/modules/webcodecs/', 'third_party/blink/renderer/modules/webcodecs/',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment