Commit 321b9e0f authored by Thomas Guilbert's avatar Thomas Guilbert Committed by Chromium LUCI CQ

Add MediaStreamAudioTrackUnderlyingSource

This CL introduces MediaStreamAudioTrackUnderlyingSource. It is an
audio sink (in the MediaStream sense) which receives audio data. It
is also an underlying source (in the Streams API sense), which converts
the data received from an audio track into blink::AudioFrames, which
can then be pulled from a ReadableStream.

The CL also removes PushableAudioData and replaces it with
AudioFrameSerializationData, which will be improved upon (or replaced)
in crbug.com/1168418.

Bug: 1157608, 1168418
Change-Id: I8230ada4331899d8f1ef7373fb3f3eeb9a5fde95
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2640913
Commit-Queue: Thomas Guilbert <tguilbert@chromium.org>
Auto-Submit: Thomas Guilbert <tguilbert@chromium.org>
Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarGuido Urdaneta <guidou@chromium.org>
Reviewed-by: default avatarChrome Cunningham <chcunningham@chromium.org>
Cr-Commit-Position: refs/heads/master@{#846314}
parent 81e1b64b
......@@ -355,6 +355,7 @@ source_set("unit_tests") {
"mediastream/media_devices_test.cc",
"mediastream/media_stream_audio_processor_test.cc",
"mediastream/media_stream_audio_track_underlying_sink_test.cc",
"mediastream/media_stream_audio_track_underlying_source_test.cc",
"mediastream/media_stream_constraints_util_audio_test.cc",
"mediastream/media_stream_constraints_util_sets_test.cc",
"mediastream/media_stream_constraints_util_test.cc",
......
......@@ -33,6 +33,8 @@ blink_modules_sources("mediastream") {
"media_stream_audio_processor.h",
"media_stream_audio_track_underlying_sink.cc",
"media_stream_audio_track_underlying_sink.h",
"media_stream_audio_track_underlying_source.cc",
"media_stream_audio_track_underlying_source.h",
"media_stream_constraints_util.cc",
"media_stream_constraints_util.h",
"media_stream_constraints_util_audio.cc",
......
......@@ -46,6 +46,7 @@ include_rules = [
"+third_party/blink/renderer/modules/modules_export.h",
"+third_party/blink/renderer/modules/peerconnection",
"+third_party/blink/renderer/modules/webcodecs/audio_frame.h",
"+third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h",
"+third_party/blink/renderer/modules/webcodecs/video_frame.h",
"+third_party/blink/renderer/modules/webrtc",
"+ui/gfx/geometry/size.h",
......
......@@ -8,6 +8,7 @@
#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_frame.h"
#include "third_party/blink/renderer/modules/mediastream/pushable_media_stream_audio_source.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
......@@ -51,9 +52,7 @@ ScriptPromise MediaStreamAudioTrackUnderlyingSink::write(
return ScriptPromise();
}
base::TimeTicks estimated_capture_time = base::TimeTicks::Now();
pushable_source->PushAudioData(audio_frame->GetPushableAudioData(),
estimated_capture_time);
pushable_source->PushAudioData(audio_frame->GetSerializationData());
audio_frame->close();
return ScriptPromise::CastUndefined(script_state);
......
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/mediastream/media_stream_audio_track_underlying_source.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_throw_dom_exception.h"
#include "third_party/blink/renderer/core/dom/dom_exception.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/core/streams/readable_stream_default_controller_with_script_scope.h"
#include "third_party/blink/renderer/modules/mediastream/media_stream_track.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
#include "third_party/blink/renderer/platform/bindings/exception_code.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_audio_track.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
#include "third_party/webrtc/api/frame_transformer_interface.h"
namespace blink {
MediaStreamAudioTrackUnderlyingSource::MediaStreamAudioTrackUnderlyingSource(
ScriptState* script_state,
MediaStreamComponent* track,
wtf_size_t max_queue_size)
: UnderlyingSourceBase(script_state),
main_task_runner_(ExecutionContext::From(script_state)
->GetTaskRunner(TaskType::kInternalMediaRealTime)),
track_(track),
max_queue_size_(std::max(1u, max_queue_size)) {
DCHECK(track_);
}
ScriptPromise MediaStreamAudioTrackUnderlyingSource::pull(
ScriptState* script_state) {
DCHECK(main_task_runner_->RunsTasksInCurrentSequence());
if (!queue_.empty()) {
ProcessPullRequest();
} else {
is_pending_pull_ = true;
}
DCHECK_LT(queue_.size(), max_queue_size_);
return ScriptPromise::CastUndefined(script_state);
}
ScriptPromise MediaStreamAudioTrackUnderlyingSource::Start(
ScriptState* script_state) {
DCHECK(main_task_runner_->RunsTasksInCurrentSequence());
MediaStreamAudioTrack* audio_track = MediaStreamAudioTrack::From(track_);
if (!audio_track) {
return ScriptPromise::RejectWithDOMException(
script_state,
DOMException::Create(
"No input track",
DOMException::GetErrorName(DOMExceptionCode::kInvalidStateError)));
}
WebMediaStreamAudioSink::AddToAudioTrack(this, WebMediaStreamTrack(track_));
return ScriptPromise::CastUndefined(script_state);
}
ScriptPromise MediaStreamAudioTrackUnderlyingSource::Cancel(
ScriptState* script_state,
ScriptValue reason) {
DisconnectFromTrack();
return ScriptPromise::CastUndefined(script_state);
}
void MediaStreamAudioTrackUnderlyingSource::DisconnectFromTrack() {
DCHECK(main_task_runner_->RunsTasksInCurrentSequence());
if (!track_)
return;
WebMediaStreamAudioSink::RemoveFromAudioTrack(this,
WebMediaStreamTrack(track_));
track_.Clear();
}
void MediaStreamAudioTrackUnderlyingSource::Trace(Visitor* visitor) const {
visitor->Trace(track_);
UnderlyingSourceBase::Trace(visitor);
}
double MediaStreamAudioTrackUnderlyingSource::DesiredSizeForTesting() const {
return Controller()->DesiredSize();
}
void MediaStreamAudioTrackUnderlyingSource::Close() {
DCHECK(main_task_runner_->RunsTasksInCurrentSequence());
DisconnectFromTrack();
// Check for Controller(), as the context might have been destroyed.
if (Controller())
Controller()->Close();
queue_.clear();
}
void MediaStreamAudioTrackUnderlyingSource::OnData(
const media::AudioBus& audio_bus,
base::TimeTicks estimated_capture_time) {
DCHECK(audio_parameters_.IsValid());
auto data_copy =
media::AudioBus::Create(audio_bus.channels(), audio_bus.frames());
audio_bus.CopyTo(data_copy.get());
auto queue_data = AudioFrameSerializationData::Wrap(
std::move(data_copy), audio_parameters_.sample_rate(),
estimated_capture_time - base::TimeTicks());
PostCrossThreadTask(
*main_task_runner_, FROM_HERE,
CrossThreadBindOnce(
&MediaStreamAudioTrackUnderlyingSource::OnDataOnMainThread,
WrapCrossThreadPersistent(this), std::move(queue_data)));
}
void MediaStreamAudioTrackUnderlyingSource::OnDataOnMainThread(
std::unique_ptr<AudioFrameSerializationData> queue_data) {
DCHECK(main_task_runner_->RunsTasksInCurrentSequence());
DCHECK_LE(queue_.size(), max_queue_size_);
// If the |queue_| is empty and the consumer has signaled a pull, bypass
// |queue_| and send the frame directly to the stream controller.
if (queue_.empty() && is_pending_pull_) {
SendFrameToStream(std::move(queue_data));
return;
}
if (queue_.size() == max_queue_size_)
queue_.pop_front();
queue_.emplace_back(std::move(queue_data));
if (is_pending_pull_) {
ProcessPullRequest();
}
}
void MediaStreamAudioTrackUnderlyingSource::OnSetFormat(
const media::AudioParameters& params) {
DCHECK(params.IsValid());
audio_parameters_ = params;
}
void MediaStreamAudioTrackUnderlyingSource::ProcessPullRequest() {
DCHECK(!queue_.empty());
SendFrameToStream(std::move(queue_.front()));
queue_.pop_front();
}
void MediaStreamAudioTrackUnderlyingSource::SendFrameToStream(
std::unique_ptr<AudioFrameSerializationData> queue_data) {
DCHECK(Controller());
AudioFrame* audio_frame =
MakeGarbageCollected<AudioFrame>(std::move(queue_data));
Controller()->Enqueue(audio_frame);
is_pending_pull_ = false;
}
} // namespace blink
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_MEDIA_STREAM_AUDIO_TRACK_UNDERLYING_SOURCE_H_
#define THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_MEDIA_STREAM_AUDIO_TRACK_UNDERLYING_SOURCE_H_
#include "base/threading/thread_checker.h"
#include "media/base/audio_parameters.h"
#include "third_party/blink/public/platform/modules/mediastream/web_media_stream_audio_sink.h"
#include "third_party/blink/renderer/core/streams/underlying_source_base.h"
#include "third_party/blink/renderer/modules/modules_export.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame.h"
#include "third_party/blink/renderer/platform/wtf/deque.h"
namespace blink {
class AudioFrameSerializationData;
class MediaStreamComponent;
class MODULES_EXPORT MediaStreamAudioTrackUnderlyingSource
: public UnderlyingSourceBase,
public WebMediaStreamAudioSink {
public:
explicit MediaStreamAudioTrackUnderlyingSource(ScriptState*,
MediaStreamComponent*,
wtf_size_t queue_size);
MediaStreamAudioTrackUnderlyingSource(
const MediaStreamAudioTrackUnderlyingSource&) = delete;
MediaStreamAudioTrackUnderlyingSource& operator=(
const MediaStreamAudioTrackUnderlyingSource&) = delete;
// UnderlyingSourceBase
ScriptPromise pull(ScriptState*) override;
ScriptPromise Start(ScriptState*) override;
ScriptPromise Cancel(ScriptState*, ScriptValue reason) override;
// WebMediaStreamAudioSink
void OnData(const media::AudioBus& audio_bus,
base::TimeTicks estimated_capture_time) override;
void OnSetFormat(const media::AudioParameters& params) override;
MediaStreamComponent* Track() const { return track_.Get(); }
wtf_size_t MaxQueueSize() const { return max_queue_size_; }
const Deque<std::unique_ptr<AudioFrameSerializationData>>& QueueForTesting()
const {
return queue_;
}
bool IsPendingPullForTesting() const { return is_pending_pull_; }
double DesiredSizeForTesting() const;
void Close();
void Trace(Visitor*) const override;
private:
void ProcessPullRequest();
void SendFrameToStream(std::unique_ptr<AudioFrameSerializationData>);
void DisconnectFromTrack();
void OnDataOnMainThread(std::unique_ptr<AudioFrameSerializationData> data);
const scoped_refptr<base::SequencedTaskRunner> main_task_runner_;
Member<MediaStreamComponent> track_;
media::AudioParameters audio_parameters_;
// An internal deque prior to the stream controller's queue. It acts as a ring
// buffer and allows dropping old frames instead of new ones in case frames
// accumulate due to slow consumption.
Deque<std::unique_ptr<AudioFrameSerializationData>> queue_;
const wtf_size_t max_queue_size_;
bool is_pending_pull_ = false;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_MEDIA_STREAM_AUDIO_TRACK_UNDERLYING_SOURCE_H_
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/mediastream/media_stream_audio_track_underlying_source.h"
#include "base/run_loop.h"
#include "base/test/gmock_callback_support.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/platform/modules/mediastream/web_media_stream_track.h"
#include "third_party/blink/public/web/web_heap.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise_tester.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/core/streams/readable_stream.h"
#include "third_party/blink/renderer/core/streams/readable_stream_default_controller_with_script_scope.h"
#include "third_party/blink/renderer/modules/mediastream/media_stream_track.h"
#include "third_party/blink/renderer/modules/mediastream/mock_media_stream_audio_sink.h"
#include "third_party/blink/renderer/modules/mediastream/pushable_media_stream_audio_source.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_audio_track.h"
#include "third_party/blink/renderer/platform/testing/io_task_runner_testing_platform_support.h"
#include "third_party/blink/renderer/platform/testing/testing_platform_support.h"
using testing::_;
using testing::AnyNumber;
namespace blink {
class MediaStreamAudioTrackUnderlyingSourceTest : public testing::Test {
public:
MediaStreamAudioTrackUnderlyingSourceTest()
: media_stream_source_(MakeGarbageCollected<MediaStreamSource>(
"dummy_source_id",
MediaStreamSource::kTypeAudio,
"dummy_source_name",
false /* remote */)),
pushable_audio_source_(new PushableMediaStreamAudioSource(
Thread::MainThread()->GetTaskRunner(),
Platform::Current()->GetIOTaskRunner())) {
media_stream_source_->SetPlatformSource(
base::WrapUnique(pushable_audio_source_));
component_ = MakeGarbageCollected<MediaStreamComponent>(
String::FromUTF8("audio_track"), media_stream_source_);
pushable_audio_source_->ConnectToTrack(component_);
}
~MediaStreamAudioTrackUnderlyingSourceTest() override {
platform_->RunUntilIdle();
component_ = nullptr;
media_stream_source_ = nullptr;
WebHeap::CollectAllGarbageForTesting();
}
MediaStreamComponent* CreateTrack(ExecutionContext* execution_context) {
return MakeGarbageCollected<MediaStreamTrack>(execution_context, component_)
->Component();
}
MediaStreamAudioTrackUnderlyingSource* CreateSource(ScriptState* script_state,
wtf_size_t buffer_size) {
MediaStreamComponent* track =
MakeGarbageCollected<MediaStreamTrack>(
ExecutionContext::From(script_state), component_)
->Component();
return MakeGarbageCollected<MediaStreamAudioTrackUnderlyingSource>(
script_state, track, buffer_size);
}
MediaStreamAudioTrackUnderlyingSource* CreateSource(
ScriptState* script_state) {
return CreateSource(script_state, 1u);
}
protected:
void PushFrame(
const base::Optional<base::TimeDelta>& timestamp = base::nullopt) {
auto data = AudioFrameSerializationData::Wrap(
media::AudioBus::Create(/*channels=*/2, /*frames=*/10),
/*sample_rate=*/8000,
timestamp.value_or(base::TimeDelta::FromSeconds(1)));
pushable_audio_source_->PushAudioData(std::move(data));
platform_->RunUntilIdle();
}
ScopedTestingPlatformSupport<IOTaskRunnerTestingPlatformSupport> platform_;
Persistent<MediaStreamSource> media_stream_source_;
Persistent<MediaStreamComponent> component_;
PushableMediaStreamAudioSource* const pushable_audio_source_;
};
TEST_F(MediaStreamAudioTrackUnderlyingSourceTest,
AudioFrameFlowsThroughStreamAndCloses) {
V8TestingScope v8_scope;
ScriptState* script_state = v8_scope.GetScriptState();
auto* source = CreateSource(script_state);
auto* stream =
ReadableStream::CreateWithCountQueueingStrategy(script_state, source, 0);
NonThrowableExceptionState exception_state;
auto* reader =
stream->GetDefaultReaderForTesting(script_state, exception_state);
ScriptPromiseTester read_tester(script_state,
reader->read(script_state, exception_state));
EXPECT_FALSE(read_tester.IsFulfilled());
PushFrame();
read_tester.WaitUntilSettled();
EXPECT_TRUE(read_tester.IsFulfilled());
source->Close();
}
TEST_F(MediaStreamAudioTrackUnderlyingSourceTest,
CancelStreamDisconnectsFromTrack) {
V8TestingScope v8_scope;
ScriptState* script_state = v8_scope.GetScriptState();
auto* source = CreateSource(script_state);
auto* stream =
ReadableStream::CreateWithCountQueueingStrategy(script_state, source, 0);
// The stream is connected to a sink.
EXPECT_TRUE(source->Track());
NonThrowableExceptionState exception_state;
stream->cancel(script_state, exception_state);
// Canceling the stream disconnects it from the track.
EXPECT_FALSE(source->Track());
}
TEST_F(MediaStreamAudioTrackUnderlyingSourceTest,
DropOldFramesWhenQueueIsFull) {
V8TestingScope v8_scope;
ScriptState* script_state = v8_scope.GetScriptState();
const wtf_size_t buffer_size = 5;
auto* source = CreateSource(script_state, buffer_size);
EXPECT_EQ(source->MaxQueueSize(), buffer_size);
// Create a stream to ensure there is a controller associated to the source.
ReadableStream::CreateWithCountQueueingStrategy(script_state, source, 0);
// Add a sink to the track to make it possible to wait until a pushed frame
// is delivered to sinks, including |source|, which is a sink of the track.
MockMediaStreamAudioSink mock_sink;
WebMediaStreamTrack track(source->Track());
WebMediaStreamAudioSink::AddToAudioTrack(&mock_sink, track);
auto push_frame_sync = [&mock_sink, this](const base::TimeDelta timestamp) {
base::RunLoop sink_loop;
EXPECT_CALL(mock_sink, OnData(_, _))
.WillOnce(base::test::RunOnceClosure(sink_loop.QuitClosure()));
PushFrame(timestamp);
sink_loop.Run();
};
const auto& queue = source->QueueForTesting();
for (wtf_size_t i = 0; i < buffer_size; ++i) {
EXPECT_EQ(queue.size(), i);
base::TimeDelta timestamp = base::TimeDelta::FromSeconds(i);
push_frame_sync(timestamp);
EXPECT_EQ(queue.back()->timestamp(), timestamp);
EXPECT_EQ(queue.front()->timestamp(), base::TimeDelta());
}
// Push another frame while the queue is full.
EXPECT_EQ(queue.size(), buffer_size);
push_frame_sync(base::TimeDelta::FromSeconds(buffer_size));
// Since the queue was full, the oldest frame from the queue should have been
// dropped.
EXPECT_EQ(queue.size(), buffer_size);
EXPECT_EQ(queue.back()->timestamp(),
base::TimeDelta::FromSeconds(buffer_size));
EXPECT_EQ(queue.front()->timestamp(), base::TimeDelta::FromSeconds(1));
// Pulling with frames in the queue should move the oldest frame in the queue
// to the stream's controller.
EXPECT_EQ(source->DesiredSizeForTesting(), 0);
EXPECT_FALSE(source->IsPendingPullForTesting());
source->pull(script_state);
EXPECT_EQ(source->DesiredSizeForTesting(), -1);
EXPECT_FALSE(source->IsPendingPullForTesting());
EXPECT_EQ(queue.size(), buffer_size - 1);
EXPECT_EQ(queue.front()->timestamp(), base::TimeDelta::FromSeconds(2));
source->Close();
EXPECT_EQ(queue.size(), 0u);
WebMediaStreamAudioSink::RemoveFromAudioTrack(&mock_sink, track);
}
TEST_F(MediaStreamAudioTrackUnderlyingSourceTest,
BypassQueueAfterPullWithEmptyBuffer) {
V8TestingScope v8_scope;
ScriptState* script_state = v8_scope.GetScriptState();
auto* source = CreateSource(script_state);
// Create a stream to ensure there is a controller associated to the source.
ReadableStream::CreateWithCountQueueingStrategy(script_state, source, 0);
MockMediaStreamAudioSink mock_sink;
WebMediaStreamTrack track(source->Track());
WebMediaStreamAudioSink::AddToAudioTrack(&mock_sink, track);
auto push_frame_sync = [&mock_sink, this]() {
base::RunLoop sink_loop;
EXPECT_CALL(mock_sink, OnData(_, _))
.WillOnce(base::test::RunOnceClosure(sink_loop.QuitClosure()));
PushFrame();
sink_loop.Run();
};
// At first, the queue is empty and the desired size is empty as well.
EXPECT_TRUE(source->QueueForTesting().empty());
EXPECT_EQ(source->DesiredSizeForTesting(), 0);
EXPECT_FALSE(source->IsPendingPullForTesting());
source->pull(script_state);
EXPECT_TRUE(source->QueueForTesting().empty());
EXPECT_EQ(source->DesiredSizeForTesting(), 0);
EXPECT_TRUE(source->IsPendingPullForTesting());
push_frame_sync();
// Since a pull was pending, the frame is put directly in the stream
// controller, bypassing the source queue.
EXPECT_TRUE(source->QueueForTesting().empty());
EXPECT_EQ(source->DesiredSizeForTesting(), -1);
EXPECT_FALSE(source->IsPendingPullForTesting());
source->Close();
WebMediaStreamAudioSink::RemoveFromAudioTrack(&mock_sink, track);
}
TEST_F(MediaStreamAudioTrackUnderlyingSourceTest, QueueSizeCannotBeZero) {
V8TestingScope v8_scope;
ScriptState* script_state = v8_scope.GetScriptState();
auto* source = CreateSource(script_state, 0u);
// Queue size is always at least 1, even if 0 is requested.
EXPECT_EQ(source->MaxQueueSize(), 1u);
source->Close();
}
} // namespace blink
......@@ -5,6 +5,7 @@
#include "third_party/blink/renderer/modules/mediastream/pushable_media_stream_audio_source.h"
#include "third_party/blink/public/mojom/mediastream/media_stream.mojom-blink.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/wtf/functional.h"
......@@ -21,13 +22,12 @@ void PushableMediaStreamAudioSource::LivenessBroker::
}
void PushableMediaStreamAudioSource::LivenessBroker::PushAudioData(
std::unique_ptr<PushableAudioData> data,
base::TimeTicks reference_time) {
std::unique_ptr<AudioFrameSerializationData> data) {
WTF::MutexLocker locker(mutex_);
if (!source_)
return;
source_->DeliverData(std::move(data), reference_time);
source_->DeliverData(std::move(data));
}
PushableMediaStreamAudioSource::PushableMediaStreamAudioSource(
......@@ -44,10 +44,9 @@ PushableMediaStreamAudioSource::~PushableMediaStreamAudioSource() {
}
void PushableMediaStreamAudioSource::PushAudioData(
std::unique_ptr<PushableAudioData> data,
base::TimeTicks reference_time) {
std::unique_ptr<AudioFrameSerializationData> data) {
if (audio_task_runner_->RunsTasksInCurrentSequence()) {
DeliverData(std::move(data), reference_time);
DeliverData(std::move(data));
return;
}
......@@ -55,16 +54,15 @@ void PushableMediaStreamAudioSource::PushAudioData(
*audio_task_runner_, FROM_HERE,
CrossThreadBindOnce(
&PushableMediaStreamAudioSource::LivenessBroker::PushAudioData,
liveness_broker_, std::move(data), reference_time));
liveness_broker_, std::move(data)));
}
void PushableMediaStreamAudioSource::DeliverData(
std::unique_ptr<PushableAudioData> data,
base::TimeTicks reference_time) {
std::unique_ptr<AudioFrameSerializationData> data) {
DCHECK(audio_task_runner_->RunsTasksInCurrentSequence());
const media::AudioBus& audio_bus = *data->data();
int sample_rate = data->sampleRate();
int sample_rate = data->sample_rate();
media::AudioParameters params = GetAudioParameters();
if (!params.IsValid() ||
......@@ -80,7 +78,7 @@ void PushableMediaStreamAudioSource::DeliverData(
last_frames_ = audio_bus.frames();
}
DeliverDataToTracks(audio_bus, reference_time);
DeliverDataToTracks(audio_bus, base::TimeTicks() + data->timestamp());
}
bool PushableMediaStreamAudioSource::EnsureSourceIsStarted() {
......
......@@ -13,14 +13,7 @@
namespace blink {
// Wrapper that abstracts how audio data is actually backed, to simplify
// lifetime guarantees when jumping threads.
class PushableAudioData {
public:
virtual ~PushableAudioData() = default;
virtual media::AudioBus* data() = 0;
virtual int sampleRate() = 0;
};
class AudioFrameSerializationData;
// Simplifies the creation of audio tracks.
class MODULES_EXPORT PushableMediaStreamAudioSource
......@@ -34,7 +27,7 @@ class MODULES_EXPORT PushableMediaStreamAudioSource
// This can be called from any thread, and will push the data on
// |audio_task_runner_|
void PushAudioData(std::unique_ptr<PushableAudioData> data, base::TimeTicks);
void PushAudioData(std::unique_ptr<AudioFrameSerializationData> data);
bool running() const {
DCHECK(GetTaskRunner()->BelongsToCurrentThread());
......@@ -49,8 +42,7 @@ class MODULES_EXPORT PushableMediaStreamAudioSource
explicit LivenessBroker(PushableMediaStreamAudioSource* source);
void OnSourceDestroyedOrStopped();
void PushAudioData(std::unique_ptr<PushableAudioData> data,
base::TimeTicks reference_time);
void PushAudioData(std::unique_ptr<AudioFrameSerializationData> data);
private:
WTF::Mutex mutex_;
......@@ -59,8 +51,7 @@ class MODULES_EXPORT PushableMediaStreamAudioSource
// Actually push data to the audio tracks. Only called on
// |audio_task_runner_|.
void DeliverData(std::unique_ptr<PushableAudioData> data,
base::TimeTicks reference_time);
void DeliverData(std::unique_ptr<AudioFrameSerializationData> data);
// MediaStreamAudioSource implementation.
bool EnsureSourceIsStarted() final;
......
......@@ -12,6 +12,7 @@
#include "third_party/blink/public/platform/modules/mediastream/web_media_stream_audio_sink.h"
#include "third_party/blink/public/web/web_heap.h"
#include "third_party/blink/renderer/modules/mediastream/mock_media_stream_audio_sink.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_audio_track.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_component.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_source.h"
......@@ -23,25 +24,6 @@ using testing::WithArg;
namespace blink {
namespace {
class FakeAudioData : public PushableAudioData {
public:
FakeAudioData(int channels, int frames, int sample_rate)
: sample_rate_(sample_rate),
audio_bus_(media::AudioBus::Create(channels, frames)) {}
// PushableMediaStreamAudioSource::AudioData implementation.
media::AudioBus* data() override { return audio_bus_.get(); }
int sampleRate() override { return sample_rate_; }
private:
int sample_rate_;
std::unique_ptr<media::AudioBus> audio_bus_;
};
} // namespace
class PushableMediaStreamAudioSourceTest : public testing::Test {
public:
PushableMediaStreamAudioSourceTest() {
......@@ -101,9 +83,9 @@ class PushableMediaStreamAudioSourceTest : public testing::Test {
run_loop.Quit();
}));
pushable_audio_source_->PushAudioData(
std::make_unique<FakeAudioData>(channels, frames, sample_rate),
reference_time);
pushable_audio_source_->PushAudioData(AudioFrameSerializationData::Wrap(
media::AudioBus::Create(channels, frames), sample_rate,
reference_time - base::TimeTicks()));
run_loop.Run();
}
......
......@@ -17,6 +17,8 @@ blink_modules_sources("webcodecs") {
"audio_encoder.h",
"audio_frame.cc",
"audio_frame.h",
"audio_frame_serialization_data.cc",
"audio_frame_serialization_data.h",
"codec_config_eval.h",
"codec_logger.cc",
"codec_logger.h",
......
......@@ -3,39 +3,39 @@
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/webcodecs/audio_frame.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_frame_init.h"
#include "third_party/blink/renderer/modules/mediastream/pushable_media_stream_audio_source.h"
#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
namespace blink {
namespace {
class SharedAudioData final : public PushableAudioData {
class SharedAudioData final : public AudioFrameSerializationData {
public:
~SharedAudioData() override = default;
explicit SharedAudioData(std::unique_ptr<SharedAudioBuffer> shared_buffer)
: shared_buffer_(std::move(shared_buffer)) {
SharedAudioData(std::unique_ptr<SharedAudioBuffer> shared_buffer,
base::TimeDelta timestamp)
: AudioFrameSerializationData(shared_buffer->sampleRate(), timestamp),
backing_buffer_(std::move(shared_buffer)) {
buffer_wrapper_ =
media::AudioBus::CreateWrapper(shared_buffer_->numberOfChannels());
media::AudioBus::CreateWrapper(backing_buffer_->numberOfChannels());
for (int i = 0; i < buffer_wrapper_->channels(); ++i) {
float* channel_data =
static_cast<float*>(shared_buffer_->channels()[i].Data());
static_cast<float*>(backing_buffer_->channels()[i].Data());
buffer_wrapper_->SetChannelData(i, channel_data);
}
buffer_wrapper_->set_frames(shared_buffer_->length());
buffer_wrapper_->set_frames(backing_buffer_->length());
}
~SharedAudioData() override = default;
media::AudioBus* data() override { return buffer_wrapper_.get(); }
int sampleRate() override { return shared_buffer_->sampleRate(); }
private:
std::unique_ptr<media::AudioBus> buffer_wrapper_;
std::unique_ptr<SharedAudioBuffer> shared_buffer_;
std::unique_ptr<SharedAudioBuffer> backing_buffer_;
};
} // namespace
......@@ -54,7 +54,7 @@ AudioFrame::AudioFrame(scoped_refptr<media::AudioBuffer> buffer)
buffer_ = AudioBuffer::CreateUninitialized(
buffer->channel_count(), buffer->frame_count(), buffer->sample_rate());
// Wrap blink buffer a media::AudioBus so we can interface with
// Wrap blink buffer with a media::AudioBus so we can interface with
// media::AudioBuffer to copy the data out.
auto media_bus_wrapper =
media::AudioBus::CreateWrapper(buffer->channel_count());
......@@ -73,9 +73,36 @@ AudioFrame::AudioFrame(scoped_refptr<media::AudioBuffer> buffer)
0 /* dest_frame_offset */, media_bus_wrapper.get());
}
std::unique_ptr<PushableAudioData> AudioFrame::GetPushableAudioData() {
std::unique_ptr<AudioFrameSerializationData>
AudioFrame::GetSerializationData() {
DCHECK(buffer_);
return std::make_unique<SharedAudioData>(buffer_->CreateSharedAudioBuffer());
return std::make_unique<SharedAudioData>(
buffer_->CreateSharedAudioBuffer(),
base::TimeDelta::FromMicroseconds(timestamp_));
}
AudioFrame::AudioFrame(std::unique_ptr<AudioFrameSerializationData> data)
: timestamp_(data->timestamp().InMicroseconds()) {
const media::AudioBus& audio_bus = *data->data();
buffer_ = AudioBuffer::CreateUninitialized(
audio_bus.channels(), audio_bus.frames(), data->sample_rate());
// Wrap blink buffer with a media::AudioBus so we can interface with
// media::AudioBuffer to copy the data out.
auto audio_buffer_wrapper =
media::AudioBus::CreateWrapper(audio_bus.channels());
for (int i = 0; i < audio_buffer_wrapper->channels(); ++i) {
DCHECK_EQ(buffer_->getChannelData(i)->byteLength(),
audio_bus.frames() * sizeof(float));
float* channel_data = buffer_->getChannelData(i)->Data();
audio_buffer_wrapper->SetChannelData(i, channel_data);
}
audio_buffer_wrapper->set_frames(audio_bus.frames());
// Copy the frames.
// TODO(https://crbug.com/1168418): Avoid this copy by refactoring
// blink::AudioBuffer accept a serializable audio data backing object.
audio_bus.CopyTo(audio_buffer_wrapper.get());
}
void AudioFrame::close() {
......
......@@ -14,7 +14,7 @@ namespace blink {
class ExceptionState;
class AudioFrameInit;
class PushableAudioData;
class AudioFrameSerializationData;
class MODULES_EXPORT AudioFrame final : public ScriptWrappable {
DEFINE_WRAPPERTYPEINFO();
......@@ -24,6 +24,7 @@ class MODULES_EXPORT AudioFrame final : public ScriptWrappable {
// Internal constructor for creating from media::AudioDecoder output.
explicit AudioFrame(scoped_refptr<media::AudioBuffer>);
explicit AudioFrame(std::unique_ptr<AudioFrameSerializationData> data);
// audio_frame.idl implementation.
explicit AudioFrame(AudioFrameInit*);
......@@ -32,7 +33,7 @@ class MODULES_EXPORT AudioFrame final : public ScriptWrappable {
AudioBuffer* buffer() const;
// Returns audio data that will outlive |this| being closed() or destroyed.
std::unique_ptr<PushableAudioData> GetPushableAudioData();
std::unique_ptr<AudioFrameSerializationData> GetSerializationData();
// GarbageCollected override.
void Trace(Visitor*) const override;
......
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/webcodecs/audio_frame_serialization_data.h"
namespace blink {
namespace {
class BasicAudioFrameSerializationData final
: public AudioFrameSerializationData {
public:
BasicAudioFrameSerializationData(std::unique_ptr<media::AudioBus> data,
int sample_rate,
base::TimeDelta timestamp)
: AudioFrameSerializationData(sample_rate, timestamp),
data_(std::move(data)) {}
~BasicAudioFrameSerializationData() override = default;
media::AudioBus* data() override { return data_.get(); }
private:
std::unique_ptr<media::AudioBus> data_;
};
} // namespace
AudioFrameSerializationData::AudioFrameSerializationData(
int sample_rate,
base::TimeDelta timestamp)
: sample_rate_(sample_rate), timestamp_(timestamp) {}
// static
std::unique_ptr<AudioFrameSerializationData> AudioFrameSerializationData::Wrap(
std::unique_ptr<media::AudioBus> data,
int sample_rate,
base::TimeDelta timestamp) {
return std::make_unique<BasicAudioFrameSerializationData>(
std::move(data), sample_rate, timestamp);
}
} // namespace blink
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBCODECS_AUDIO_FRAME_SERIALIZATION_DATA_H_
#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBCODECS_AUDIO_FRAME_SERIALIZATION_DATA_H_
#include "base/time/time.h"
#include "media/base/audio_bus.h"
#include "third_party/blink/renderer/modules/modules_export.h"
namespace blink {
// Wrapper that contains all the necessary information to recreate an
// AudioFrame. It abstracts how audio data is actually backed, to simplify
// lifetime guarantees when jumping threads.
// TODO(https://crbug.com/1168418): add actual serialization support, to allow
// the use of AudioFrames in workers.
class MODULES_EXPORT AudioFrameSerializationData {
public:
virtual ~AudioFrameSerializationData() = default;
AudioFrameSerializationData(const AudioFrameSerializationData&) = delete;
AudioFrameSerializationData& operator=(const AudioFrameSerializationData&) =
delete;
// Helper function that creates a simple media::AudioBus backed wrapper.
static std::unique_ptr<AudioFrameSerializationData> Wrap(
std::unique_ptr<media::AudioBus> data,
int sample_rate,
base::TimeDelta timestamp);
virtual media::AudioBus* data() = 0;
int sample_rate() const { return sample_rate_; }
base::TimeDelta timestamp() const { return timestamp_; }
protected:
AudioFrameSerializationData(int sample_rate, base::TimeDelta timestamp);
private:
int sample_rate_;
base::TimeDelta timestamp_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBCODECS_AUDIO_FRAME_SERIALIZATION_DATA_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment