Commit c12f2b27 authored by lionel.g.landwerlin's avatar lionel.g.landwerlin Committed by Commit bot

media: VideoFrame: add offset for shared memory buffers

Frames' data could be located at an offset within a shared memory
buffer. This adds metadata into media::VideoFrame to support this use
case.

BUG=455409
TEST=none

Review URL: https://codereview.chromium.org/877353002

Cr-Commit-Position: refs/heads/master@{#315428}
parent 7a9ac631
......@@ -134,6 +134,7 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
static_cast<uint8*>(output_buffer->data()),
output_buffer->size(),
base::SharedMemory::NULLHandle(),
0,
base::TimeDelta(),
base::Closure());
}
......
......@@ -502,6 +502,7 @@ void VideoCaptureController::VideoCaptureDeviceClient::OnIncomingCapturedData(
media::VideoFrame::AllocationSize(media::VideoFrame::I420,
dimensions),
base::SharedMemory::NULLHandle(),
0,
base::TimeDelta(),
base::Closure());
DCHECK(frame.get());
......
......@@ -135,6 +135,7 @@ class VideoCaptureControllerTest : public testing::Test {
reinterpret_cast<uint8*>(buffer->data()),
media::VideoFrame::AllocationSize(media::VideoFrame::I420, dimensions),
base::SharedMemory::NULLHandle(),
0,
base::TimeDelta(),
base::Closure());
}
......
......@@ -184,7 +184,8 @@ void GpuVideoEncodeAcceleratorHost::Encode(
}
Send(new AcceleratedVideoEncoderMsg_Encode(
encoder_route_id_, next_frame_id_, handle, frame_size, force_keyframe));
encoder_route_id_, next_frame_id_, handle, frame->shared_memory_offset(),
frame_size, force_keyframe));
frame_map_[next_frame_id_] = frame;
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
......
......@@ -710,9 +710,10 @@ IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_ErrorNotification,
// Queue a input buffer to the encoder to encode. |frame_id| will be returned by
// AcceleratedVideoEncoderHostMsg_NotifyInputDone.
IPC_MESSAGE_ROUTED4(AcceleratedVideoEncoderMsg_Encode,
IPC_MESSAGE_ROUTED5(AcceleratedVideoEncoderMsg_Encode,
int32 /* frame_id */,
base::SharedMemoryHandle /* buffer_handle */,
uint32 /* buffer_offset */,
uint32 /* buffer_size */,
bool /* force_keyframe */)
......
......@@ -9,6 +9,8 @@
#include "base/logging.h"
#include "base/memory/shared_memory.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/numerics/safe_math.h"
#include "base/sys_info.h"
#include "build/build_config.h"
#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/gpu_messages.h"
......@@ -243,6 +245,7 @@ GpuVideoEncodeAccelerator::CreateAndroidVEA() {
void GpuVideoEncodeAccelerator::OnEncode(int32 frame_id,
base::SharedMemoryHandle buffer_handle,
uint32 buffer_offset,
uint32 buffer_size,
bool force_keyframe) {
DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode(): frame_id=" << frame_id
......@@ -257,16 +260,30 @@ void GpuVideoEncodeAccelerator::OnEncode(int32 frame_id,
return;
}
uint32 aligned_offset =
buffer_offset % base::SysInfo::VMAllocationGranularity();
base::CheckedNumeric<off_t> map_offset = buffer_offset;
map_offset -= aligned_offset;
base::CheckedNumeric<size_t> map_size = buffer_size;
map_size += aligned_offset;
if (!map_offset.IsValid() || !map_size.IsValid()) {
DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode():"
<< " invalid (buffer_offset,buffer_size)";
NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
scoped_ptr<base::SharedMemory> shm(
new base::SharedMemory(buffer_handle, true));
if (!shm->Map(buffer_size)) {
if (!shm->MapAt(map_offset.ValueOrDie(), map_size.ValueOrDie())) {
DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
"could not map frame_id=" << frame_id;
NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
uint8* shm_memory = reinterpret_cast<uint8*>(shm->memory());
uint8* shm_memory = reinterpret_cast<uint8*>(shm->memory()) + aligned_offset;
scoped_refptr<media::VideoFrame> frame =
media::VideoFrame::WrapExternalPackedMemory(
input_format_,
......@@ -276,6 +293,7 @@ void GpuVideoEncodeAccelerator::OnEncode(int32 frame_id,
shm_memory,
buffer_size,
buffer_handle,
buffer_offset,
base::TimeDelta(),
// It's turtles all the way down...
base::Bind(base::IgnoreResult(&base::MessageLoopProxy::PostTask),
......
......@@ -79,6 +79,7 @@ class GpuVideoEncodeAccelerator
// process.
void OnEncode(int32 frame_id,
base::SharedMemoryHandle buffer_handle,
uint32 buffer_offset,
uint32 buffer_size,
bool force_keyframe);
void OnUseOutputBitstreamBuffer(int32 buffer_id,
......
......@@ -502,6 +502,7 @@ void RTCVideoEncoder::Impl::EncodeOneFrame() {
reinterpret_cast<uint8*>(input_buffer->memory()),
input_buffer->mapped_size(),
input_buffer->handle(),
0,
base::TimeDelta(),
base::Bind(&RTCVideoEncoder::Impl::EncodeFrameFinished, this, index));
if (!frame.get()) {
......
......@@ -244,6 +244,7 @@ void VideoCaptureImpl::OnBufferReceived(int buffer_id,
reinterpret_cast<uint8*>(buffer->buffer->memory()),
buffer->buffer_size,
buffer->buffer->handle(),
0,
timestamp - first_frame_timestamp_,
media::BindToCurrentLoop(
base::Bind(&VideoCaptureImpl::OnClientBufferFinished,
......
......@@ -286,6 +286,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
uint8* data,
size_t data_size,
base::SharedMemoryHandle handle,
size_t data_offset,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
......@@ -306,6 +307,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
timestamp,
false));
frame->shared_memory_handle_ = handle;
frame->shared_memory_offset_ = data_offset;
frame->strides_[kYPlane] = new_coded_size.width();
frame->strides_[kUPlane] = new_coded_size.width() / 2;
frame->strides_[kVPlane] = new_coded_size.width() / 2;
......@@ -685,6 +687,7 @@ VideoFrame::VideoFrame(VideoFrame::Format format,
natural_size_(natural_size),
mailbox_holder_(mailbox_holder.Pass()),
shared_memory_handle_(base::SharedMemory::NULLHandle()),
shared_memory_offset_(0),
timestamp_(timestamp),
release_sync_point_(0),
end_of_stream_(end_of_stream),
......@@ -791,6 +794,10 @@ base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
return shared_memory_handle_;
}
size_t VideoFrame::shared_memory_offset() const {
return shared_memory_offset_;
}
void VideoFrame::UpdateReleaseSyncPoint(SyncPointClient* client) {
DCHECK_EQ(format_, NATIVE_TEXTURE);
base::AutoLock locker(release_sync_point_lock_);
......
......@@ -139,6 +139,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
uint8* data,
size_t data_size,
base::SharedMemoryHandle handle,
size_t shared_memory_offset,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
......@@ -300,6 +301,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns the shared-memory handle, if present
base::SharedMemoryHandle shared_memory_handle() const;
// Returns the offset into the shared memory where the frame data begins.
size_t shared_memory_offset() const;
bool allow_overlay() const { return allow_overlay_; }
#if defined(OS_POSIX)
......@@ -393,6 +397,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Shared memory handle, if this frame was allocated from shared memory.
base::SharedMemoryHandle shared_memory_handle_;
// Offset in shared memory buffer.
size_t shared_memory_offset_;
#if defined(OS_POSIX)
// Dmabufs for each plane, if this frame is wrapping memory
// acquired via dmabuf.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment