Commit 1a7fde0c authored by Steven Holte's avatar Steven Holte Committed by Commit Bot

Revert "Fuchsia: Implement MixerOutputStreamFuchsia using AudioRenderer2."

This reverts commit 08410d56.

Reason for revert: 
This appears to be causing build failures due to a DEPS issue:
https://logs.chromium.org/v/?s=chromium%2Fbuildbucket%2Fcr-buildbucket.appspot.com%2F8943347677136985328%2F%2B%2Fsteps%2Fgenerate_build_files%2F0%2Fstdout

BUG=853968

Original change's description:
> Fuchsia: Implement MixerOutputStreamFuchsia using AudioRenderer2.
> 
> Previously MixerOutputStreamFuchsia was using media_client library,
> which is deprecated now. Update it to use AudioRenderer2 FIDL interface
> directly.
> 
> Bug: 851733
> Change-Id: I72a43369d16ecd626aa7294a6f3500b57bb3731e
> Reviewed-on: https://chromium-review.googlesource.com/1100376
> Reviewed-by: Kenneth MacKay <kmackay@chromium.org>
> Reviewed-by: Wez <wez@chromium.org>
> Commit-Queue: Sergey Ulanov <sergeyu@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#568159}

TBR=wez@chromium.org,sergeyu@chromium.org,kmackay@chromium.org

Change-Id: I9b5b2262d8ddc2edb706fb6fbec3d1fdc6d19563
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 851733
Reviewed-on: https://chromium-review.googlesource.com/1105320Reviewed-by: default avatarSteven Holte <holte@chromium.org>
Commit-Queue: Steven Holte <holte@chromium.org>
Cr-Commit-Position: refs/heads/master@{#568219}
parent 9a6c4e4b
...@@ -40,8 +40,9 @@ cast_source_set("cma_backend_support") { ...@@ -40,8 +40,9 @@ cast_source_set("cma_backend_support") {
"//chromecast/public", "//chromecast/public",
"//chromecast/public/media", "//chromecast/public/media",
"//media", "//media",
"//third_party/fuchsia-sdk:media",
] ]
libs = [ "media_client" ]
} }
cast_source_set("unit_tests") { cast_source_set("unit_tests") {
...@@ -55,6 +56,5 @@ cast_source_set("unit_tests") { ...@@ -55,6 +56,5 @@ cast_source_set("unit_tests") {
deps = [ deps = [
":cma_backend_support", ":cma_backend_support",
"//testing/gtest", "//testing/gtest",
"//third_party/fuchsia-sdk:media",
] ]
} }
...@@ -4,11 +4,10 @@ ...@@ -4,11 +4,10 @@
#include "chromecast/media/cma/backend/fuchsia/mixer_output_stream_fuchsia.h" #include "chromecast/media/cma/backend/fuchsia/mixer_output_stream_fuchsia.h"
#include <fuchsia/media/cpp/fidl.h> #include <media/audio.h>
#include <zircon/syscalls.h> #include <zircon/syscalls.h>
#include "base/command_line.h" #include "base/command_line.h"
#include "base/fuchsia/component_context.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "chromecast/base/chromecast_switches.h" #include "chromecast/base/chromecast_switches.h"
#include "media/base/audio_sample_types.h" #include "media/base/audio_sample_types.h"
...@@ -18,10 +17,10 @@ ...@@ -18,10 +17,10 @@
namespace chromecast { namespace chromecast {
namespace media { namespace media {
// Target period between Write() calls. It's used to calculate the value // |buffer_size| passed to media_client library when initializing audio output
// returned from OptimalWriteFramesCount(). // stream. Current implementation ignores this parameter, so the value doesn't
constexpr base::TimeDelta kTargetWritePeriod = // make much difference. StreamMixer by default writes chunks of 768 frames.
base::TimeDelta::FromMilliseconds(10); constexpr int kDefaultPeriodSize = 768;
// Same value as in MixerOutputStreamAlsa. Currently this value is used to // Same value as in MixerOutputStreamAlsa. Currently this value is used to
// simulate blocking Write() similar to ALSA's behavior, see comments in // simulate blocking Write() similar to ALSA's behavior, see comments in
...@@ -33,39 +32,44 @@ std::unique_ptr<MixerOutputStream> MixerOutputStream::Create() { ...@@ -33,39 +32,44 @@ std::unique_ptr<MixerOutputStream> MixerOutputStream::Create() {
return std::make_unique<MixerOutputStreamFuchsia>(); return std::make_unique<MixerOutputStreamFuchsia>();
} }
MixerOutputStreamFuchsia::MixerOutputStreamFuchsia() = default; MixerOutputStreamFuchsia::MixerOutputStreamFuchsia() {}
MixerOutputStreamFuchsia::~MixerOutputStreamFuchsia() = default;
MixerOutputStreamFuchsia::~MixerOutputStreamFuchsia() {
if (manager_)
fuchsia_audio_manager_free(manager_);
}
bool MixerOutputStreamFuchsia::Start(int requested_sample_rate, int channels) { bool MixerOutputStreamFuchsia::Start(int requested_sample_rate, int channels) {
DCHECK(!audio_renderer_); DCHECK(!stream_);
DCHECK(reference_time_.is_null());
if (!manager_)
manager_ = fuchsia_audio_manager_create();
DCHECK(manager_);
fuchsia_audio_parameters fuchsia_params;
fuchsia_params.sample_rate = requested_sample_rate;
fuchsia_params.num_channels = channels;
fuchsia_params.buffer_size = kDefaultPeriodSize;
int result = fuchsia_audio_manager_create_output_stream(
manager_, nullptr, &fuchsia_params, &stream_);
if (result < 0) {
LOG(ERROR) << "Failed to open audio output, error code: " << result;
DCHECK(!stream_);
return false;
}
if (!UpdatePresentationDelay()) {
fuchsia_audio_output_stream_free(stream_);
stream_ = nullptr;
return false;
}
sample_rate_ = requested_sample_rate; sample_rate_ = requested_sample_rate;
channels_ = channels; channels_ = channels;
target_packet_size_ = ::media::AudioTimestampHelper::TimeToFrames(
kTargetWritePeriod, sample_rate_); started_time_ = base::TimeTicks();
// Connect |audio_renderer_|.
fuchsia::media::AudioPtr audio_server =
base::fuchsia::ComponentContext::GetDefault()
->ConnectToService<fuchsia::media::Audio>();
audio_server->CreateRendererV2(audio_renderer_.NewRequest());
audio_renderer_.set_error_handler(
fit::bind_member(this, &MixerOutputStreamFuchsia::OnRendererError));
// Configure the renderer.
fuchsia::media::AudioPcmFormat format;
format.sample_format = fuchsia::media::AudioSampleFormat::FLOAT;
format.channels = channels_;
format.frames_per_second = sample_rate_;
audio_renderer_->SetPcmFormat(std::move(format));
// Use number of samples to specify media position.
audio_renderer_->SetPtsUnits(sample_rate_, 1);
audio_renderer_->EnableMinLeadTimeEvents(true);
audio_renderer_.events().OnMinLeadTimeChanged =
fit::bind_member(this, &MixerOutputStreamFuchsia::OnMinLeadTimeChanged);
return true; return true;
} }
...@@ -76,143 +80,104 @@ int MixerOutputStreamFuchsia::GetSampleRate() { ...@@ -76,143 +80,104 @@ int MixerOutputStreamFuchsia::GetSampleRate() {
MediaPipelineBackend::AudioDecoder::RenderingDelay MediaPipelineBackend::AudioDecoder::RenderingDelay
MixerOutputStreamFuchsia::GetRenderingDelay() { MixerOutputStreamFuchsia::GetRenderingDelay() {
if (reference_time_.is_null())
return MediaPipelineBackend::AudioDecoder::RenderingDelay();
base::TimeTicks now = base::TimeTicks::Now(); base::TimeTicks now = base::TimeTicks::Now();
base::TimeDelta delay = GetCurrentStreamTime() - now; base::TimeDelta delay =
base::TimeDelta::FromMicroseconds(presentation_delay_ns_ / 1000);
if (!started_time_.is_null()) {
base::TimeTicks stream_time = GetCurrentStreamTime();
if (stream_time > now)
delay += stream_time - now;
}
return MediaPipelineBackend::AudioDecoder::RenderingDelay( return MediaPipelineBackend::AudioDecoder::RenderingDelay(
/*delay_microseconds=*/delay.InMicroseconds(), /*delay_microseconds=*/delay.InMicroseconds(),
/*timestamp_microseconds=*/(now - base::TimeTicks()).InMicroseconds()); /*timestamp_microseconds=*/(now - base::TimeTicks()).InMicroseconds());
} }
int MixerOutputStreamFuchsia::OptimalWriteFramesCount() { int MixerOutputStreamFuchsia::OptimalWriteFramesCount() {
return target_packet_size_; return kDefaultPeriodSize;
} }
bool MixerOutputStreamFuchsia::Write(const float* data, bool MixerOutputStreamFuchsia::Write(const float* data,
int data_size, int data_size,
bool* out_playback_interrupted) { bool* out_playback_interrupted) {
if (!audio_renderer_) if (!stream_)
return false; return false;
DCHECK_EQ(data_size % channels_, 0); DCHECK(data_size % channels_ == 0);
// Allocate payload buffer if necessary. do {
if (!payload_buffer_.mapped_size() && !InitializePayloadBuffer()) zx_time_t presentation_time = FUCHSIA_AUDIO_NO_TIMESTAMP;
return false; if (started_time_.is_null()) {
// Presentation time (PTS) needs to be specified only for the first frame
// If Write() was called for the current playback position then assume that // after stream is started or restarted. Mixer will calculate PTS for all
// playback was interrupted. // following frames. 1us is added to account for the time passed between
auto now = base::TimeTicks::Now(); // zx_clock_get() and fuchsia_audio_output_stream_write().
bool playback_interrupted = !reference_time_.is_null() && zx_time_t zx_now = zx_clock_get(ZX_CLOCK_MONOTONIC);
now >= (GetCurrentStreamTime() - min_lead_time_); presentation_time = zx_now + presentation_delay_ns_ + ZX_USEC(1);
if (out_playback_interrupted) started_time_ = base::TimeTicks::FromZxTime(zx_now);
*out_playback_interrupted = playback_interrupted; stream_position_samples_ = 0;
}
// Reset playback position if playback was interrupted. int result = fuchsia_audio_output_stream_write(
if (playback_interrupted) stream_, const_cast<float*>(data), data_size, presentation_time);
reference_time_ = base::TimeTicks(); if (result == ZX_ERR_IO_MISSED_DEADLINE) {
LOG(ERROR) << "MixerOutputStreamFuchsia::PumpSamples() missed deadline, "
size_t packet_size = data_size * sizeof(float); "resetting PTS.";
if (payload_buffer_pos_ + packet_size > payload_buffer_.mapped_size()) { if (!UpdatePresentationDelay()) {
payload_buffer_pos_ = 0; return false;
} }
started_time_ = base::TimeTicks();
DCHECK_LE(payload_buffer_pos_ + data_size, payload_buffer_.mapped_size()); *out_playback_interrupted = true;
memcpy(reinterpret_cast<uint8_t*>(payload_buffer_.memory()) + } else if (result != ZX_OK) {
payload_buffer_pos_, LOG(ERROR) << "fuchsia_audio_output_stream_write() returned " << result;
data, packet_size); return false;
}
// Send a new packet. } while (started_time_.is_null());
fuchsia::media::AudioPacket packet;
packet.timestamp = stream_position_samples_;
packet.payload_offset = payload_buffer_pos_;
packet.payload_size = packet_size;
packet.flags = 0;
audio_renderer_->SendPacketNoReply(std::move(packet));
// Update stream position.
int frames = data_size / channels_; int frames = data_size / channels_;
stream_position_samples_ += frames; stream_position_samples_ += frames;
payload_buffer_pos_ += packet_size;
// Block the thread to limit amount of buffered data. Currently
if (reference_time_.is_null()) { // MixerOutputStreamAlsa uses blocking Write() and StreamMixer relies on that
reference_time_ = now + min_lead_time_; // behavior. Sleep() below replicates the same behavior on Fuchsia.
audio_renderer_->PlayNoReply(reference_time_.ToZxTime(), // TODO(sergeyu): Refactor StreamMixer to work with non-blocking Write().
stream_position_samples_ - frames); base::TimeDelta max_buffer_duration =
} else { ::media::AudioTimestampHelper::FramesToTime(kMaxOutputBufferSizeFrames,
// Block the thread to limit amount of buffered data. Currently sample_rate_);
// MixerOutputStreamAlsa uses blocking Write() and StreamMixer relies on base::TimeDelta current_buffer_duration =
// that behavior. Sleep() below replicates the same behavior on Fuchsia. GetCurrentStreamTime() - base::TimeTicks::Now();
// TODO(sergeyu): Refactor StreamMixer to work with non-blocking Write(). if (current_buffer_duration > max_buffer_duration)
base::TimeDelta max_buffer_duration = base::PlatformThread::Sleep(current_buffer_duration - max_buffer_duration);
::media::AudioTimestampHelper::FramesToTime(kMaxOutputBufferSizeFrames,
sample_rate_);
base::TimeDelta current_buffer_duration =
GetCurrentStreamTime() - min_lead_time_ - now;
if (current_buffer_duration > max_buffer_duration) {
base::PlatformThread::Sleep(current_buffer_duration -
max_buffer_duration);
}
}
return true; return true;
} }
void MixerOutputStreamFuchsia::Stop() { void MixerOutputStreamFuchsia::Stop() {
reference_time_ = base::TimeTicks(); started_time_ = base::TimeTicks();
audio_renderer_.Unbind();
}
size_t MixerOutputStreamFuchsia::GetMinBufferSize() { if (stream_) {
// Ensure that |payload_buffer_| fits enough packets to cover |min_lead_time_| fuchsia_audio_output_stream_free(stream_);
// and kMaxOutputBufferSizeFrames plus one extra packet. stream_ = nullptr;
int min_packets = (::media::AudioTimestampHelper::TimeToFrames(min_lead_time_, }
sample_rate_) +
kMaxOutputBufferSizeFrames + target_packet_size_ - 1) /
target_packet_size_ +
1;
return min_packets * target_packet_size_ * channels_ * sizeof(float);
} }
bool MixerOutputStreamFuchsia::InitializePayloadBuffer() { bool MixerOutputStreamFuchsia::UpdatePresentationDelay() {
size_t buffer_size = GetMinBufferSize(); int result = fuchsia_audio_output_stream_get_min_delay(
if (!payload_buffer_.CreateAndMapAnonymous(buffer_size)) { stream_, &presentation_delay_ns_);
LOG(WARNING) << "Failed to allocate VMO of size " << buffer_size; if (result != ZX_OK) {
LOG(ERROR) << "fuchsia_audio_output_stream_get_min_delay() failed: "
<< result;
return false; return false;
} }
payload_buffer_pos_ = 0;
audio_renderer_->SetPayloadBuffer(
zx::vmo(payload_buffer_.handle().Duplicate().GetHandle()));
return true; return true;
} }
base::TimeTicks MixerOutputStreamFuchsia::GetCurrentStreamTime() { base::TimeTicks MixerOutputStreamFuchsia::GetCurrentStreamTime() {
DCHECK(!reference_time_.is_null()); DCHECK(!started_time_.is_null());
return reference_time_ + ::media::AudioTimestampHelper::FramesToTime( return started_time_ + ::media::AudioTimestampHelper::FramesToTime(
stream_position_samples_, sample_rate_); stream_position_samples_, sample_rate_);
}
void MixerOutputStreamFuchsia::OnRendererError() {
LOG(WARNING) << "AudioRenderer has failed.";
Stop();
}
void MixerOutputStreamFuchsia::OnMinLeadTimeChanged(int64_t min_lead_time) {
min_lead_time_ = base::TimeDelta::FromNanoseconds(min_lead_time);
// When min_lead_time_ increases we may need to reallocate |payload_buffer_|.
// Code below just unmaps the current buffer. The new buffer will be allocated
// lated in PumpSamples(). This is necessary because VMO allocation may fail
// and it's not possible to report that error here - OnMinLeadTimeChanged()
// may be invoked before Start().
if (payload_buffer_.mapped_size() > 0 &&
GetMinBufferSize() > payload_buffer_.mapped_size()) {
payload_buffer_.Unmap();
}
} }
} // namespace media } // namespace media
......
...@@ -5,9 +5,8 @@ ...@@ -5,9 +5,8 @@
#ifndef CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_OUTPUT_STREAM_FUCHSIA_H_ #ifndef CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_OUTPUT_STREAM_FUCHSIA_H_
#define CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_OUTPUT_STREAM_FUCHSIA_H_ #define CHROMECAST_MEDIA_CMA_BACKEND_AUDIO_OUTPUT_STREAM_FUCHSIA_H_
#include <fuchsia/media/cpp/fidl.h> #include <media/audio.h>
#include "base/memory/shared_memory.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "chromecast/public/media/mixer_output_stream.h" #include "chromecast/public/media/mixer_output_stream.h"
...@@ -32,36 +31,21 @@ class MixerOutputStreamFuchsia : public MixerOutputStream { ...@@ -32,36 +31,21 @@ class MixerOutputStreamFuchsia : public MixerOutputStream {
void Stop() override; void Stop() override;
private: private:
size_t GetMinBufferSize(); bool UpdatePresentationDelay();
bool InitializePayloadBuffer();
base::TimeTicks GetCurrentStreamTime(); base::TimeTicks GetCurrentStreamTime();
// Event handlers for |audio_renderer_|. fuchsia_audio_manager* manager_ = nullptr;
void OnRendererError(); fuchsia_audio_output_stream* stream_ = nullptr;
void OnMinLeadTimeChanged(int64_t min_lead_time);
int sample_rate_ = 0; int sample_rate_ = 0;
int channels_ = 0; int channels_ = 0;
// Value returned by OptimalWriteFramesCount(). base::TimeTicks started_time_;
int target_packet_size_ = 0;
// Audio renderer connection.
fuchsia::media::AudioRenderer2Ptr audio_renderer_;
base::SharedMemory payload_buffer_;
size_t payload_buffer_pos_ = 0;
// Set only while stream is playing.
base::TimeTicks reference_time_;
int64_t stream_position_samples_ = 0; int64_t stream_position_samples_ = 0;
// Current min lead time for the stream. This value is updated by // Total presentation delay for the stream. This value is returned by
// AudioRenderer::OnMinLeadTimeChanged event. Assume 50ms until we get the // fuchsia_audio_output_stream_get_min_delay()
// first OnMinLeadTimeChanged event. zx_duration_t presentation_delay_ns_ = 0;
base::TimeDelta min_lead_time_ = base::TimeDelta::FromMilliseconds(50);
DISALLOW_COPY_AND_ASSIGN(MixerOutputStreamFuchsia); DISALLOW_COPY_AND_ASSIGN(MixerOutputStreamFuchsia);
}; };
......
...@@ -4,99 +4,19 @@ ...@@ -4,99 +4,19 @@
#include "chromecast/media/cma/backend/fuchsia/mixer_output_stream_fuchsia.h" #include "chromecast/media/cma/backend/fuchsia/mixer_output_stream_fuchsia.h"
#include "base/location.h"
#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace chromecast { namespace chromecast {
namespace media { namespace media {
constexpr int kSampleRate = 48000;
constexpr int kNumChannels = 2;
class MixerOutputStreamFuchsiaTest : public ::testing::Test { class MixerOutputStreamFuchsiaTest : public ::testing::Test {
protected: protected:
base::MessageLoopForIO message_loop_;
MixerOutputStreamFuchsia output_; MixerOutputStreamFuchsia output_;
}; };
TEST_F(MixerOutputStreamFuchsiaTest, StartAndStop) { TEST_F(MixerOutputStreamFuchsiaTest, StartAndStop) {
EXPECT_TRUE(output_.Start(kSampleRate, kNumChannels)); EXPECT_TRUE(output_.Start(48000, 2));
EXPECT_EQ(output_.GetSampleRate(), kSampleRate); EXPECT_EQ(output_.GetSampleRate(), 48000);
output_.Stop();
}
TEST_F(MixerOutputStreamFuchsiaTest, Play1s) {
EXPECT_TRUE(output_.Start(kSampleRate, kNumChannels));
constexpr base::TimeDelta kTestStreamDuration =
base::TimeDelta::FromMilliseconds(300);
constexpr float kSignalFrequencyHz = 1000;
auto started = base::TimeTicks::Now();
int samples_to_play =
kSampleRate * kTestStreamDuration / base::TimeDelta::FromSeconds(1);
int pos = 0;
while (pos < samples_to_play) {
std::vector<float> buffer;
int num_frames = output_.OptimalWriteFramesCount();
buffer.resize(num_frames * kNumChannels);
for (int i = 0; i < num_frames; ++i) {
float v = sin(2 * M_PI * pos * kSignalFrequencyHz / kSampleRate);
for (int c = 0; c < kNumChannels; ++c) {
buffer[i * kNumChannels + c] = v;
}
pos += 1;
}
bool interrupted = true;
EXPECT_TRUE(output_.Write(buffer.data(), buffer.size(), &interrupted));
// Run message loop to process async events.
base::RunLoop().RunUntilIdle();
}
auto ended = base::TimeTicks::Now();
// Verify that Write() was blocking, allowing 100ms for buffering.
EXPECT_GT(ended - started,
kTestStreamDuration - base::TimeDelta::FromMilliseconds(100));
output_.Stop();
}
TEST_F(MixerOutputStreamFuchsiaTest, PlaybackInterrupted) {
EXPECT_TRUE(output_.Start(kSampleRate, kNumChannels));
std::vector<float> buffer;
int num_frames = output_.OptimalWriteFramesCount();
buffer.resize(num_frames * kNumChannels);
bool interrupted = true;
// First Write() always returns interrupted = false.
EXPECT_TRUE(output_.Write(buffer.data(), buffer.size(), &interrupted));
EXPECT_FALSE(interrupted);
interrupted = true;
// Repeated Write() is expected to return interrupted = false.
EXPECT_TRUE(output_.Write(buffer.data(), buffer.size(), &interrupted));
EXPECT_FALSE(interrupted);
// Run message loop for 100ms before calling Write() again.
base::RunLoop run_loop;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(),
base::TimeDelta::FromMilliseconds(100));
run_loop.Run();
// Write() is called to late, expect interrupted = true.
interrupted = false;
EXPECT_TRUE(output_.Write(buffer.data(), buffer.size(), &interrupted));
EXPECT_TRUE(interrupted);
output_.Stop(); output_.Stop();
} }
......
...@@ -224,10 +224,6 @@ StreamMixer::StreamMixer( ...@@ -224,10 +224,6 @@ StreamMixer::StreamMixer(
if (mixer_thread_) { if (mixer_thread_) {
base::Thread::Options options; base::Thread::Options options;
options.priority = base::ThreadPriority::REALTIME_AUDIO; options.priority = base::ThreadPriority::REALTIME_AUDIO;
#if defined(OS_FUCHSIA)
// MixerOutputStreamFuchsia uses FIDL, which works only on IO threads.
options.message_loop_type = base::MessageLoop::TYPE_IO;
#endif
mixer_thread_->StartWithOptions(options); mixer_thread_->StartWithOptions(options);
mixer_task_runner_ = mixer_thread_->task_runner(); mixer_task_runner_ = mixer_thread_->task_runner();
mixer_task_runner_->PostTask(FROM_HERE, base::BindOnce(&UseHighPriority)); mixer_task_runner_->PostTask(FROM_HERE, base::BindOnce(&UseHighPriority));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment