Commit a3665685 authored by Andres Calderon Jaramillo's avatar Andres Calderon Jaramillo Committed by Commit Bot

IPC plumbing for hardware-accelerated image decodes.

This CL creates a new GPU channel IPC message type,
GpuChannelMsg_ScheduleImageDecode, which a renderer will eventually be
able to use to request hardware-accelerated image decodes. Two classes
are added and plumbed into the existing GPU channel architecture to
help deal with this new message:

1) On the service side (GPU process), an ImageDecodeAcceleratorStub
   processes incoming messages. There is one such stub per GpuChannel,
   although it is owned by the GpuChannelMessageFilter because it
   expects to receive messages on the IO thread.

2) On the client side (renderer), an ImageDecodeAcceleratorProxy is
   associated with a GpuChannelHost. It's a thread-safe helper that
   allows components in the renderer to schedule image decodes and get
   back a SyncToken to synchronize on the completion of the decode.

The processing of the new IPC message is guarded by the
kVaapiJpegImageDecodeAcceleration feature flag: the stub refuses to
handle that message if the feature is disabled.

Bug: 868400
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I5d24e661341b5744b30306aca22903f28a52d695
Reviewed-on: https://chromium-review.googlesource.com/c/1285610Reviewed-by: default avatarRobert Sesek <rsesek@chromium.org>
Reviewed-by: default avatarSunny Sachanandani <sunnyps@chromium.org>
Reviewed-by: default avatarAntoine Labour <piman@chromium.org>
Commit-Queue: Andres Calderon Jaramillo <andrescj@chromium.org>
Cr-Commit-Position: refs/heads/master@{#603944}
parent 2d6fff6e
...@@ -22,6 +22,8 @@ source_set("ipc_client_sources") { ...@@ -22,6 +22,8 @@ source_set("ipc_client_sources") {
"command_buffer_proxy_impl.h", "command_buffer_proxy_impl.h",
"gpu_channel_host.cc", "gpu_channel_host.cc",
"gpu_channel_host.h", "gpu_channel_host.h",
"image_decode_accelerator_proxy.cc",
"image_decode_accelerator_proxy.h",
"shared_image_interface_proxy.cc", "shared_image_interface_proxy.cc",
"shared_image_interface_proxy.h", "shared_image_interface_proxy.h",
] ]
...@@ -36,6 +38,8 @@ source_set("ipc_client_sources") { ...@@ -36,6 +38,8 @@ source_set("ipc_client_sources") {
"//gpu/config:config_sources", "//gpu/config:config_sources",
"//gpu/ipc/common:ipc_common_sources", "//gpu/ipc/common:ipc_common_sources",
"//mojo/public/cpp/system", "//mojo/public/cpp/system",
"//ui/gfx:color_space",
"//ui/gfx/geometry",
"//ui/gl", "//ui/gl",
] ]
public_deps = [ public_deps = [
......
...@@ -44,7 +44,11 @@ GpuChannelHost::GpuChannelHost(int channel_id, ...@@ -44,7 +44,11 @@ GpuChannelHost::GpuChannelHost(int channel_id,
shared_image_interface_( shared_image_interface_(
this, this,
static_cast<int32_t>( static_cast<int32_t>(
GpuChannelReservedRoutes::kSharedImageInterface)) { GpuChannelReservedRoutes::kSharedImageInterface)),
image_decode_accelerator_proxy_(
this,
static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
next_image_id_.GetNext(); next_image_id_.GetNext();
for (int32_t i = 0; for (int32_t i = 0;
i <= static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue); ++i) i <= static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue); ++i)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "gpu/config/gpu_feature_info.h" #include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_info.h" #include "gpu/config/gpu_info.h"
#include "gpu/gpu_export.h" #include "gpu/gpu_export.h"
#include "gpu/ipc/client/image_decode_accelerator_proxy.h"
#include "gpu/ipc/client/shared_image_interface_proxy.h" #include "gpu/ipc/client/shared_image_interface_proxy.h"
#include "ipc/ipc_channel_handle.h" #include "ipc/ipc_channel_handle.h"
#include "ipc/message_filter.h" #include "ipc/message_filter.h"
...@@ -150,6 +151,10 @@ class GPU_EXPORT GpuChannelHost ...@@ -150,6 +151,10 @@ class GPU_EXPORT GpuChannelHost
return &shared_image_interface_; return &shared_image_interface_;
} }
ImageDecodeAcceleratorProxy* image_decode_accelerator_proxy() {
return &image_decode_accelerator_proxy_;
}
protected: protected:
friend class base::RefCountedThreadSafe<GpuChannelHost>; friend class base::RefCountedThreadSafe<GpuChannelHost>;
~GpuChannelHost() override; ~GpuChannelHost() override;
...@@ -253,6 +258,9 @@ class GPU_EXPORT GpuChannelHost ...@@ -253,6 +258,9 @@ class GPU_EXPORT GpuChannelHost
SharedImageInterfaceProxy shared_image_interface_; SharedImageInterfaceProxy shared_image_interface_;
// A client-side helper to send image decode requests to the GPU process.
ImageDecodeAcceleratorProxy image_decode_accelerator_proxy_;
// Image IDs are allocated in sequence. // Image IDs are allocated in sequence.
base::AtomicSequenceNumber next_image_id_; base::AtomicSequenceNumber next_image_id_;
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/ipc/client/image_decode_accelerator_proxy.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
namespace gpu {
ImageDecodeAcceleratorProxy::ImageDecodeAcceleratorProxy(GpuChannelHost* host,
int32_t route_id)
: host_(host), route_id_(route_id) {}
ImageDecodeAcceleratorProxy::~ImageDecodeAcceleratorProxy() {}
SyncToken ImageDecodeAcceleratorProxy::ScheduleImageDecode(
const std::vector<uint8_t>& encoded_data,
const gfx::Size& output_size,
int32_t raster_decoder_route_id,
uint32_t transfer_cache_entry_id,
int32_t discardable_handle_shm_id,
uint32_t discardable_handle_shm_offset,
const gfx::ColorSpace& target_color_space,
bool needs_mips) {
GpuChannelMsg_ScheduleImageDecode_Params params;
params.encoded_data = encoded_data;
params.output_size = output_size;
params.raster_decoder_route_id = raster_decoder_route_id;
params.transfer_cache_entry_id = transfer_cache_entry_id;
params.discardable_handle_shm_id = discardable_handle_shm_id;
params.discardable_handle_shm_offset = discardable_handle_shm_offset;
params.target_color_space = target_color_space;
params.needs_mips = needs_mips;
base::AutoLock lock(lock_);
uint64_t release_count = ++next_release_count_;
// Note: we send the message under the lock to guarantee monotonicity of the
// release counts as seen by the service.
host_->Send(
new GpuChannelMsg_ScheduleImageDecode(route_id_, params, release_count));
return SyncToken(
CommandBufferNamespace::GPU_IO,
CommandBufferIdFromChannelAndRoute(host_->channel_id(), route_id_),
release_count);
}
} // namespace gpu
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_IPC_CLIENT_IMAGE_DECODE_ACCELERATOR_PROXY_H_
#define GPU_IPC_CLIENT_IMAGE_DECODE_ACCELERATOR_PROXY_H_
#include <vector>
#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "gpu/command_buffer/common/sync_token.h"
namespace gfx {
class ColorSpace;
class Size;
} // namespace gfx
namespace gpu {
class GpuChannelHost;
// A client-side interface to schedule hardware-accelerated image decodes on the
// GPU process. This is only supported in OOP-R mode. To use this functionality,
// the renderer should first find out the supported image types (e.g., JPEG,
// WebP, etc.) and profiles (e.g., a maximum size of 8192x8192). This
// information can be obtained from GpuInfo. No decode requests should be sent
// for unsupported image types/profiles.
//
// The actual decode is done asynchronously on the service side, but the client
// can synchronize using a sync token that will be released upon the completion
// of the decode.
//
// To send a decode request, the renderer should:
//
// (1) Create a locked ClientImageTransferCacheEntry without a backing
// SkPixmap. This entry should not be serialized over the command buffer.
//
// (2) Call ScheduleImageDecode().
//
// (3) Issue a server wait on the sync token returned in step (2).
//
// When the service is done with the decode, a ServiceImageTransferCacheEntry
// will be created/locked with the decoded data and the sync token is
// released.
//
// Objects of this class are thread-safe.
//
// TODO(andrescj): actually put the decoder's capabilities in GpuInfo.
class ImageDecodeAcceleratorProxy {
public:
ImageDecodeAcceleratorProxy(GpuChannelHost* host, int32_t route_id);
~ImageDecodeAcceleratorProxy();
// Schedules a hardware-accelerated image decode on the GPU process. The image
// in |encoded_data| is decoded and scaled to |output_size|. Upon completion,
// a service-side transfer cache entry will be created with the decoded data
// using |transfer_cache_entry_id|, |discardable_handle_shm_id|, and
// |discardable_handle_shm_offset|. The |raster_decoder_route_id| is used to
// look up the appropriate command buffer and create the transfer cache entry
// correctly. Returns a sync token that will be released after the decode is
// done and the service-side transfer cache entry is created.
SyncToken ScheduleImageDecode(const std::vector<uint8_t>& encoded_data,
const gfx::Size& output_size,
int32_t raster_decoder_route_id,
uint32_t transfer_cache_entry_id,
int32_t discardable_handle_shm_id,
uint32_t discardable_handle_shm_offset,
const gfx::ColorSpace& target_color_space,
bool needs_mips);
private:
GpuChannelHost* const host_;
const int32_t route_id_;
base::Lock lock_;
uint64_t next_release_count_ GUARDED_BY(lock_) = 0;
DISALLOW_COPY_AND_ASSIGN(ImageDecodeAcceleratorProxy);
};
} // namespace gpu
#endif // GPU_IPC_CLIENT_IMAGE_DECODE_ACCELERATOR_PROXY_H_
...@@ -128,6 +128,8 @@ source_set("ipc_common_sources") { ...@@ -128,6 +128,8 @@ source_set("ipc_common_sources") {
"//gpu/command_buffer/common:common_sources", "//gpu/command_buffer/common:common_sources",
"//gpu/config:config_sources", "//gpu/config:config_sources",
"//ui/base", "//ui/base",
"//ui/gfx:color_space",
"//ui/gfx/geometry",
"//ui/gfx/ipc", "//ui/gfx/ipc",
"//ui/gfx/ipc/buffer_types", "//ui/gfx/ipc/buffer_types",
"//ui/gfx/ipc/color", "//ui/gfx/ipc/color",
......
...@@ -11,7 +11,8 @@ namespace gpu { ...@@ -11,7 +11,8 @@ namespace gpu {
enum class GpuChannelReservedRoutes : int32_t { enum class GpuChannelReservedRoutes : int32_t {
kSharedImageInterface = 0, kSharedImageInterface = 0,
kMaxValue = kSharedImageInterface, kImageDecodeAccelerator = 1,
kMaxValue = kImageDecodeAccelerator,
}; };
inline CommandBufferId CommandBufferIdFromChannelAndRoute(int channel_id, inline CommandBufferId CommandBufferIdFromChannelAndRoute(int channel_id,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "gpu/ipc/common/surface_handle.h" #include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_channel_handle.h" #include "ipc/ipc_channel_handle.h"
#include "ipc/ipc_message_macros.h" #include "ipc/ipc_message_macros.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence_handle.h" #include "ui/gfx/gpu_fence_handle.h"
#include "ui/gfx/gpu_memory_buffer.h" #include "ui/gfx/gpu_memory_buffer.h"
...@@ -82,6 +83,17 @@ IPC_STRUCT_BEGIN(GpuChannelMsg_CreateSharedImage_Params) ...@@ -82,6 +83,17 @@ IPC_STRUCT_BEGIN(GpuChannelMsg_CreateSharedImage_Params)
IPC_STRUCT_MEMBER(uint32_t, release_id) IPC_STRUCT_MEMBER(uint32_t, release_id)
IPC_STRUCT_END() IPC_STRUCT_END()
IPC_STRUCT_BEGIN(GpuChannelMsg_ScheduleImageDecode_Params)
IPC_STRUCT_MEMBER(std::vector<uint8_t>, encoded_data)
IPC_STRUCT_MEMBER(gfx::Size, output_size)
IPC_STRUCT_MEMBER(int32_t, raster_decoder_route_id)
IPC_STRUCT_MEMBER(uint32_t, transfer_cache_entry_id)
IPC_STRUCT_MEMBER(int32_t, discardable_handle_shm_id)
IPC_STRUCT_MEMBER(uint32_t, discardable_handle_shm_offset)
IPC_STRUCT_MEMBER(gfx::ColorSpace, target_color_space)
IPC_STRUCT_MEMBER(bool, needs_mips)
IPC_STRUCT_END()
IPC_STRUCT_BEGIN(GpuDeferredMessage) IPC_STRUCT_BEGIN(GpuDeferredMessage)
IPC_STRUCT_MEMBER(IPC::Message, message) IPC_STRUCT_MEMBER(IPC::Message, message)
IPC_STRUCT_MEMBER(std::vector<gpu::SyncToken>, sync_token_fences) IPC_STRUCT_MEMBER(std::vector<gpu::SyncToken>, sync_token_fences)
...@@ -116,6 +128,14 @@ IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateSharedImage, ...@@ -116,6 +128,14 @@ IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateSharedImage,
GpuChannelMsg_CreateSharedImage_Params /* params */) GpuChannelMsg_CreateSharedImage_Params /* params */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_DestroySharedImage, gpu::Mailbox /* id */) IPC_MESSAGE_ROUTED1(GpuChannelMsg_DestroySharedImage, gpu::Mailbox /* id */)
// Schedules a hardware-accelerated image decode in the GPU process. Renderers
// should use gpu::ImageDecodeAcceleratorProxy to schedule decode requests which
// are processed by gpu::ImageDecodeAcceleratorStub on the service side.
IPC_MESSAGE_ROUTED2(
GpuChannelMsg_ScheduleImageDecode,
GpuChannelMsg_ScheduleImageDecode_Params /* decode_params */,
uint64_t /* release_count */)
// Crash the GPU process in similar way to how chrome://gpucrash does. // Crash the GPU process in similar way to how chrome://gpucrash does.
// This is only supported in testing environments, and is otherwise ignored. // This is only supported in testing environments, and is otherwise ignored.
IPC_MESSAGE_CONTROL0(GpuChannelMsg_CrashForTesting) IPC_MESSAGE_CONTROL0(GpuChannelMsg_CrashForTesting)
......
...@@ -29,6 +29,8 @@ jumbo_component("service") { ...@@ -29,6 +29,8 @@ jumbo_component("service") {
"gpu_memory_buffer_factory.h", "gpu_memory_buffer_factory.h",
"gpu_watchdog_thread.cc", "gpu_watchdog_thread.cc",
"gpu_watchdog_thread.h", "gpu_watchdog_thread.h",
"image_decode_accelerator_stub.cc",
"image_decode_accelerator_stub.h",
"image_transport_surface.h", "image_transport_surface.h",
"image_transport_surface_delegate.h", "image_transport_surface_delegate.h",
"pass_through_image_transport_surface.cc", "pass_through_image_transport_surface.cc",
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "gpu/ipc/service/gpu_channel_manager.h" #include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h" #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h" #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
#include "gpu/ipc/service/raster_command_buffer_stub.h" #include "gpu/ipc/service/raster_command_buffer_stub.h"
#include "gpu/ipc/service/webgpu_command_buffer_stub.h" #include "gpu/ipc/service/webgpu_command_buffer_stub.h"
#include "ipc/ipc_channel.h" #include "ipc/ipc_channel.h"
...@@ -119,6 +120,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter ...@@ -119,6 +120,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
Scheduler* scheduler_; Scheduler* scheduler_;
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_; scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<ImageDecodeAcceleratorStub> image_decode_accelerator_stub_;
base::ThreadChecker io_thread_checker_; base::ThreadChecker io_thread_checker_;
DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter); DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
...@@ -130,7 +132,12 @@ GpuChannelMessageFilter::GpuChannelMessageFilter( ...@@ -130,7 +132,12 @@ GpuChannelMessageFilter::GpuChannelMessageFilter(
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
: gpu_channel_(gpu_channel), : gpu_channel_(gpu_channel),
scheduler_(scheduler), scheduler_(scheduler),
main_task_runner_(std::move(main_task_runner)) { main_task_runner_(std::move(main_task_runner)),
image_decode_accelerator_stub_(
base::MakeRefCounted<ImageDecodeAcceleratorStub>(
gpu_channel,
static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator))) {
io_thread_checker_.DetachFromThread(); io_thread_checker_.DetachFromThread();
} }
...@@ -140,6 +147,7 @@ GpuChannelMessageFilter::~GpuChannelMessageFilter() { ...@@ -140,6 +147,7 @@ GpuChannelMessageFilter::~GpuChannelMessageFilter() {
void GpuChannelMessageFilter::Destroy() { void GpuChannelMessageFilter::Destroy() {
base::AutoLock auto_lock(gpu_channel_lock_); base::AutoLock auto_lock(gpu_channel_lock_);
image_decode_accelerator_stub_->Shutdown();
gpu_channel_ = nullptr; gpu_channel_ = nullptr;
} }
...@@ -270,7 +278,11 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { ...@@ -270,7 +278,11 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
} }
scheduler_->ScheduleTasks(std::move(tasks)); scheduler_->ScheduleTasks(std::move(tasks));
} else if (message.routing_id() ==
static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
if (!image_decode_accelerator_stub_->OnMessageReceived(message))
return MessageErrorHandler(message, "Invalid image decode request");
} else if (message.routing_id() == MSG_ROUTING_CONTROL || } else if (message.routing_id() == MSG_ROUTING_CONTROL ||
message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
message.type() == message.type() ==
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "base/containers/flat_map.h" #include "base/containers/flat_map.h"
#include "base/macros.h" #include "base/macros.h"
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h" #include "base/memory/weak_ptr.h"
#include "base/process/process.h" #include "base/process/process.h"
#include "base/single_thread_task_runner.h" #include "base/single_thread_task_runner.h"
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h" #include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h" #include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h" #include "gpu/ipc/service/gpu_channel_manager.h"
...@@ -26,7 +27,8 @@ class GpuChannelManagerTest : public GpuChannelTestCommon { ...@@ -26,7 +27,8 @@ class GpuChannelManagerTest : public GpuChannelTestCommon {
GpuChannel* channel = CreateChannel(kClientId, true); GpuChannel* channel = CreateChannel(kClientId, true);
EXPECT_TRUE(channel); EXPECT_TRUE(channel);
int32_t kRouteId = 1; int32_t kRouteId =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
const SurfaceHandle kFakeSurfaceHandle = 1; const SurfaceHandle kFakeSurfaceHandle = 1;
SurfaceHandle surface_handle = kFakeSurfaceHandle; SurfaceHandle surface_handle = kFakeSurfaceHandle;
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <stdint.h> #include <stdint.h>
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h" #include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h" #include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_test_common.h" #include "gpu/ipc/service/gpu_channel_test_common.h"
...@@ -27,7 +28,8 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferAllowed) { ...@@ -27,7 +28,8 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferAllowed) {
SurfaceHandle surface_handle = kFakeSurfaceHandle; SurfaceHandle surface_handle = kFakeSurfaceHandle;
DCHECK_NE(surface_handle, kNullSurfaceHandle); DCHECK_NE(surface_handle, kNullSurfaceHandle);
int32_t kRouteId = 1; int32_t kRouteId =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
init_params.surface_handle = surface_handle; init_params.surface_handle = surface_handle;
init_params.share_group_id = MSG_ROUTING_NONE; init_params.share_group_id = MSG_ROUTING_NONE;
...@@ -55,7 +57,8 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferDisallowed) { ...@@ -55,7 +57,8 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferDisallowed) {
SurfaceHandle surface_handle = kFakeSurfaceHandle; SurfaceHandle surface_handle = kFakeSurfaceHandle;
DCHECK_NE(surface_handle, kNullSurfaceHandle); DCHECK_NE(surface_handle, kNullSurfaceHandle);
int32_t kRouteId = 1; int32_t kRouteId =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
init_params.surface_handle = surface_handle; init_params.surface_handle = surface_handle;
init_params.share_group_id = MSG_ROUTING_NONE; init_params.share_group_id = MSG_ROUTING_NONE;
...@@ -79,7 +82,8 @@ TEST_F(GpuChannelTest, CreateOffscreenCommandBuffer) { ...@@ -79,7 +82,8 @@ TEST_F(GpuChannelTest, CreateOffscreenCommandBuffer) {
GpuChannel* channel = CreateChannel(kClientId, true); GpuChannel* channel = CreateChannel(kClientId, true);
ASSERT_TRUE(channel); ASSERT_TRUE(channel);
int32_t kRouteId = 1; int32_t kRouteId =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
init_params.surface_handle = kNullSurfaceHandle; init_params.surface_handle = kNullSurfaceHandle;
init_params.share_group_id = MSG_ROUTING_NONE; init_params.share_group_id = MSG_ROUTING_NONE;
...@@ -104,7 +108,8 @@ TEST_F(GpuChannelTest, IncompatibleStreamIds) { ...@@ -104,7 +108,8 @@ TEST_F(GpuChannelTest, IncompatibleStreamIds) {
ASSERT_TRUE(channel); ASSERT_TRUE(channel);
// Create first context. // Create first context.
int32_t kRouteId1 = 1; int32_t kRouteId1 =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
int32_t kStreamId1 = 1; int32_t kStreamId1 = 1;
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
init_params.surface_handle = kNullSurfaceHandle; init_params.surface_handle = kNullSurfaceHandle;
...@@ -124,7 +129,7 @@ TEST_F(GpuChannelTest, IncompatibleStreamIds) { ...@@ -124,7 +129,7 @@ TEST_F(GpuChannelTest, IncompatibleStreamIds) {
EXPECT_TRUE(stub); EXPECT_TRUE(stub);
// Create second context in same share group but different stream. // Create second context in same share group but different stream.
int32_t kRouteId2 = 2; int32_t kRouteId2 = kRouteId1 + 1;
int32_t kStreamId2 = 2; int32_t kStreamId2 = 2;
init_params.share_group_id = kRouteId1; init_params.share_group_id = kRouteId1;
...@@ -147,7 +152,8 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) { ...@@ -147,7 +152,8 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) {
ASSERT_TRUE(channel); ASSERT_TRUE(channel);
// Create first context, we will share this one. // Create first context, we will share this one.
int32_t kSharedRouteId = 1; int32_t kSharedRouteId =
static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
{ {
SCOPED_TRACE("kSharedRouteId"); SCOPED_TRACE("kSharedRouteId");
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
...@@ -168,7 +174,7 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) { ...@@ -168,7 +174,7 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) {
EXPECT_TRUE(channel->LookupCommandBuffer(kSharedRouteId)); EXPECT_TRUE(channel->LookupCommandBuffer(kSharedRouteId));
// This context shares with the first one, this should be possible. // This context shares with the first one, this should be possible.
int32_t kFriendlyRouteId = 2; int32_t kFriendlyRouteId = kSharedRouteId + 1;
{ {
SCOPED_TRACE("kFriendlyRouteId"); SCOPED_TRACE("kFriendlyRouteId");
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
...@@ -193,7 +199,7 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) { ...@@ -193,7 +199,7 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) {
// Meanwhile another context is being made pointing to the shared one. This // Meanwhile another context is being made pointing to the shared one. This
// should fail. // should fail.
int32_t kAnotherRouteId = 3; int32_t kAnotherRouteId = kFriendlyRouteId + 1;
{ {
SCOPED_TRACE("kAnotherRouteId"); SCOPED_TRACE("kAnotherRouteId");
GPUCreateCommandBufferConfig init_params; GPUCreateCommandBufferConfig init_params;
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
#include "base/feature_list.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "ipc/ipc_message.h"
#include "ipc/ipc_message_macros.h"
namespace gpu {
ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(GpuChannel* channel,
int32_t route_id)
: channel_(channel),
sequence_(channel->scheduler()->CreateSequence(SchedulingPriority::kLow)),
sync_point_client_state_(
channel->sync_point_manager()->CreateSyncPointClientState(
CommandBufferNamespace::GPU_IO,
CommandBufferIdFromChannelAndRoute(channel->client_id(),
route_id),
sequence_)),
main_task_runner_(channel->task_runner()),
io_task_runner_(channel->io_task_runner()) {}
bool ImageDecodeAcceleratorStub::OnMessageReceived(const IPC::Message& msg) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
if (!base::FeatureList::IsEnabled(
features::kVaapiJpegImageDecodeAcceleration)) {
return false;
}
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(ImageDecodeAcceleratorStub, msg)
IPC_MESSAGE_HANDLER(GpuChannelMsg_ScheduleImageDecode,
OnScheduleImageDecode)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
}
void ImageDecodeAcceleratorStub::Shutdown() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
sync_point_client_state_->Destroy();
channel_->scheduler()->DestroySequence(sequence_);
channel_ = nullptr;
}
ImageDecodeAcceleratorStub::~ImageDecodeAcceleratorStub() {
DCHECK(!channel_);
}
void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
const GpuChannelMsg_ScheduleImageDecode_Params& decode_params,
uint64_t release_count) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
if (!channel_) {
// The channel is no longer available, so don't schedule a decode.
return;
}
// TODO(andrescj): schedule the release of the decode sync token and start the
// decode.
}
} // namespace gpu
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
#define GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "gpu/command_buffer/service/sequence_id.h"
struct GpuChannelMsg_ScheduleImageDecode_Params;
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace IPC {
class Message;
} // namespace IPC
namespace gpu {
class GpuChannel;
class SyncPointClientState;
// Processes incoming image decode requests from renderers: it schedules the
// decode with the appropriate hardware decode accelerator and releases sync
// tokens as decodes complete. These sync tokens must be generated on the client
// side (in ImageDecodeAcceleratorProxy) using the following information:
//
// - The command buffer namespace is GPU_IO.
// - The command buffer ID is created using the
// CommandBufferIdFromChannelAndRoute() function using
// GpuChannelReservedRoutes::kImageDecodeAccelerator as the route ID.
// - The release count should be incremented for each decode request.
//
// An object of this class is meant to be used in
// both the IO thread (for receiving decode requests) and the main thread (for
// processing completed decodes).
class ImageDecodeAcceleratorStub
: public base::RefCountedThreadSafe<ImageDecodeAcceleratorStub> {
public:
ImageDecodeAcceleratorStub(GpuChannel* channel, int32_t route_id);
// Processes a message from the renderer. Should be called on the IO thread.
bool OnMessageReceived(const IPC::Message& msg);
// Called on the main thread to indicate that |channel_| should no longer be
// used.
void Shutdown();
private:
friend class base::RefCountedThreadSafe<ImageDecodeAcceleratorStub>;
~ImageDecodeAcceleratorStub();
void OnScheduleImageDecode(
const GpuChannelMsg_ScheduleImageDecode_Params& params,
uint64_t release_count);
base::Lock lock_;
GpuChannel* channel_ GUARDED_BY(lock_);
SequenceId sequence_ GUARDED_BY(lock_);
scoped_refptr<SyncPointClientState> sync_point_client_state_
GUARDED_BY(lock_);
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
DISALLOW_COPY_AND_ASSIGN(ImageDecodeAcceleratorStub);
};
} // namespace gpu
#endif // GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment