Commit 778ab8a2 authored by Jeffrey Kardatzke's avatar Jeffrey Kardatzke Committed by Chromium LUCI CQ

media: H264 CENCv1 support for VAAPI

This adds handling of CENCv1 (full sample encryption) streams for H264
with the VAAPI accelerator. The decoder detects when this case occurs
and then offloads slice header parsing to the accelerator.

BUG=b:153111783,b:155508443
TEST=Unit tests pass, CENCv1 playback works

Change-Id: Ieb913935269163d09d24b88a8ea987a6e8ffce33
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2552634
Commit-Queue: Jeffrey Kardatzke <jkardatzke@google.com>
Reviewed-by: default avatarMiguel Casas <mcasas@chromium.org>
Reviewed-by: default avatarXiaohan Wang <xhwang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#833458}
parent ca384817
......@@ -89,6 +89,17 @@ H264Decoder::H264Accelerator::Status H264Decoder::H264Accelerator::SetStream(
return H264Decoder::H264Accelerator::Status::kNotSupported;
}
H264Decoder::H264Accelerator::Status
H264Decoder::H264Accelerator::ParseEncryptedSliceHeader(
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) {
return H264Decoder::H264Accelerator::Status::kNotSupported;
}
H264Decoder::H264Decoder(std::unique_ptr<H264Accelerator> accelerator,
VideoCodecProfile profile,
const VideoColorSpace& container_color_space)
......@@ -140,9 +151,9 @@ void H264Decoder::Reset() {
state_ = kAfterReset;
}
void H264Decoder::PrepareRefPicLists(const H264SliceHeader* slice_hdr) {
ConstructReferencePicListsP(slice_hdr);
ConstructReferencePicListsB(slice_hdr);
void H264Decoder::PrepareRefPicLists() {
ConstructReferencePicListsP();
ConstructReferencePicListsB();
}
bool H264Decoder::ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
......@@ -421,8 +432,7 @@ struct LongTermPicNumAscCompare {
}
};
void H264Decoder::ConstructReferencePicListsP(
const H264SliceHeader* slice_hdr) {
void H264Decoder::ConstructReferencePicListsP() {
// RefPicList0 (8.2.4.2.1) [[1] [2]], where:
// [1] shortterm ref pics sorted by descending pic_num,
// [2] longterm ref pics by ascending long_term_pic_num.
......@@ -456,8 +466,7 @@ struct POCDescCompare {
}
};
void H264Decoder::ConstructReferencePicListsB(
const H264SliceHeader* slice_hdr) {
void H264Decoder::ConstructReferencePicListsB() {
// RefPicList0 (8.2.4.2.3) [[1] [2] [3]], where:
// [1] shortterm ref pics with POC < curr_pic's POC sorted by descending POC,
// [2] shortterm ref pics with POC > curr_pic's POC by ascending POC,
......@@ -787,7 +796,7 @@ H264Decoder::H264Accelerator::Status H264Decoder::StartNewFrame(
return H264Accelerator::Status::kFail;
UpdatePicNums(frame_num);
PrepareRefPicLists(slice_hdr);
PrepareRefPicLists();
return accelerator_->SubmitFrameMetadata(sps, pps, dpb_, ref_pic_list_p0_,
ref_pic_list_b0_, ref_pic_list_b1_,
......@@ -1239,6 +1248,15 @@ bool H264Decoder::HandleFrameNumGap(int frame_num) {
return true;
}
H264Decoder::H264Accelerator::Status H264Decoder::ProcessEncryptedSliceHeader(
const std::vector<SubsampleEntry>& subsamples) {
DCHECK(curr_nalu_);
DCHECK(curr_slice_hdr_);
return accelerator_->ParseEncryptedSliceHeader(
curr_nalu_->data, curr_nalu_->size, subsamples, last_sps_nalu_,
last_pps_nalu_, curr_slice_hdr_.get());
}
H264Decoder::H264Accelerator::Status H264Decoder::PreprocessCurrentSlice() {
const H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
DCHECK(slice_hdr);
......@@ -1286,8 +1304,13 @@ H264Decoder::H264Accelerator::Status H264Decoder::ProcessCurrentSlice() {
max_pic_num_ = 2 * max_frame_num_;
H264Picture::Vector ref_pic_list0, ref_pic_list1;
if (!ModifyReferencePicLists(slice_hdr, &ref_pic_list0, &ref_pic_list1))
// If we are using full sample encryption then we do not have the information
// we need to update the ref pic lists here, but that's OK because the
// accelerator doesn't actually need to submit them in this case.
if (!slice_hdr->full_sample_encryption &&
!ModifyReferencePicLists(slice_hdr, &ref_pic_list0, &ref_pic_list1)) {
return H264Accelerator::Status::kFail;
}
const H264PPS* pps = parser_.GetPPS(curr_pps_id_);
if (!pps)
......@@ -1415,11 +1438,30 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
// additional key has been provided, for example), then the remaining
// steps will be executed.
if (!curr_slice_hdr_) {
curr_slice_hdr_.reset(new H264SliceHeader());
par_res =
parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
if (par_res != H264Parser::kOk)
SET_ERROR_AND_RETURN();
curr_slice_hdr_ = std::make_unique<H264SliceHeader>();
state_ = kParseSliceHeader;
}
if (state_ == kParseSliceHeader) {
// Check if the slice header is encrypted.
bool parsed_header = false;
if (current_decrypt_config_) {
const std::vector<SubsampleEntry>& subsamples =
parser_.GetCurrentSubsamples();
// There is only a single clear byte for the NALU information for
// full sample encryption, and the rest is encrypted.
if (!subsamples.empty() && subsamples[0].clear_bytes == 1) {
CHECK_ACCELERATOR_RESULT(ProcessEncryptedSliceHeader(subsamples));
parsed_header = true;
curr_slice_hdr_->pic_parameter_set_id = last_parsed_pps_id_;
}
}
if (!parsed_header) {
par_res =
parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
if (par_res != H264Parser::kOk)
SET_ERROR_AND_RETURN();
}
state_ = kTryPreprocessCurrentSlice;
}
......@@ -1469,6 +1511,8 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
if (!ProcessSPS(sps_id, &need_new_buffers))
SET_ERROR_AND_RETURN();
last_sps_nalu_.assign(curr_nalu_->data,
curr_nalu_->data + curr_nalu_->size);
if (state_ == kNeedStreamMetadata)
state_ = kAfterReset;
......@@ -1485,13 +1529,13 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
}
case H264NALU::kPPS: {
int pps_id;
CHECK_ACCELERATOR_RESULT(FinishPrevFrameIfPresent());
par_res = parser_.ParsePPS(&pps_id);
par_res = parser_.ParsePPS(&last_parsed_pps_id_);
if (par_res != H264Parser::kOk)
SET_ERROR_AND_RETURN();
last_pps_nalu_.assign(curr_nalu_->data,
curr_nalu_->data + curr_nalu_->size);
break;
}
......
......@@ -90,6 +90,23 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
const H264Picture::Vector& ref_pic_listb1,
scoped_refptr<H264Picture> pic) = 0;
// Used for handling CENCv1 streams where the entire slice header, except
// for the NALU type byte, is encrypted. |data| and |size| represent the
// encrypted slice data. |subsamples| specifies what is encrypted and should
// have just a single clear byte and the rest is encrypted. |sps_nalu_data|
// and |pps_nalu_data| are the SPS and PPS NALUs respectively.
// |slice_header_out| should have its fields filled in upon successful
// return. Returns kOk if successful, kFail if there are errors, or
// kTryAgain if the accelerator needs additional data before being able to
// proceed.
virtual Status ParseEncryptedSliceHeader(
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out);
// Submit one slice for the current frame, passing the current |pps| and
// |pic| (same as in SubmitFrameMetadata()), the parsed header for the
// current slice in |slice_hdr|, and the reordered |ref_pic_listX|,
......@@ -189,6 +206,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// retryable error) is returned. The next time Decode() is called the call
// that previously failed will be retried and execution continues from
// there (if possible).
kParseSliceHeader,
kTryPreprocessCurrentSlice,
kEnsurePicture,
kTryNewFrame,
......@@ -199,6 +217,12 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// Process H264 stream structures.
bool ProcessSPS(int sps_id, bool* need_new_buffers);
// Processes a CENCv1 encrypted slice header and fills in |curr_slice_hdr_|
// with the relevant parsed fields.
H264Accelerator::Status ProcessEncryptedSliceHeader(
const std::vector<SubsampleEntry>& subsamples);
// Process current slice header to discover if we need to start a new picture,
// finishing up the current one.
H264Accelerator::Status PreprocessCurrentSlice();
......@@ -223,7 +247,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
bool UpdateMaxNumReorderFrames(const H264SPS* sps);
// Prepare reference picture lists for the current frame.
void PrepareRefPicLists(const H264SliceHeader* slice_hdr);
void PrepareRefPicLists();
// Prepare reference picture lists for the given slice.
bool ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
H264Picture::Vector* ref_pic_list0,
......@@ -231,8 +255,8 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// Construct initial reference picture lists for use in decoding of
// P and B pictures (see 8.2.4 in spec).
void ConstructReferencePicListsP(const H264SliceHeader* slice_hdr);
void ConstructReferencePicListsB(const H264SliceHeader* slice_hdr);
void ConstructReferencePicListsP();
void ConstructReferencePicListsB();
// Helper functions for reference list construction, per spec.
int PicNumF(const H264Picture& pic);
......@@ -347,6 +371,16 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
int curr_sps_id_;
int curr_pps_id_;
// Last PPS that was parsed. Used for full sample encryption, which has the
// assumption this is streaming content which does not switch between
// different PPSes in the stream (they are present once in the container for
// the stream).
int last_parsed_pps_id_;
// Copies of the last SPS and PPS NALUs, used for full sample encryption.
std::vector<uint8_t> last_sps_nalu_;
std::vector<uint8_t> last_pps_nalu_;
// Current NALU and slice header being processed.
std::unique_ptr<H264NALU> curr_nalu_;
std::unique_ptr<H264SliceHeader> curr_slice_hdr_;
......
......@@ -30,7 +30,6 @@ using ::testing::MatcherInterface;
using ::testing::MatchResultListener;
using ::testing::Mock;
using ::testing::Return;
using ::testing::WithArg;
namespace media {
namespace {
......@@ -65,52 +64,70 @@ MATCHER(SubsampleSizeMatches, "Verify subsample sizes match buffer size") {
return subsample_total_size == buffer_size;
}
// Given a H264NALU (arg0), compute the slice header and store a copy in
// both |arg1| and |slice_header|. This assumes that the NALU comes from
// kBaselineFrame0.
ACTION_P(ComputeSliceHeader, slice_header) {
const H264NALU& slice_nalu = arg0;
// |arg1| and |slice_header| are H264SliceHeader*.
// Ideally we could just parse |slice_nalu|, but the parser needs additional
// data (like SPS and PPS entries) which we don't have. So this simulates
// parsing of |slice_nalu| by simply setting the appropriate fields
// Zero out |slice_header| so there is no need to set a lot of default values.
std::memset(slice_header, 0, sizeof(H264SliceHeader));
// Extract the values directly from the H264NALU provided.
slice_header->idr_pic_flag = (slice_nalu.nal_unit_type == 5);
slice_header->nal_ref_idc = slice_nalu.nal_ref_idc;
slice_header->nalu_data = slice_nalu.data;
slice_header->nalu_size = slice_nalu.size;
// Don't want to duplicate all the work of H264Parser.ParseSliceHeader(),
// so the following were determined by looking at the slice header after
// H264_Parser.ParseSliceHeader() was called on kBaselineFrame0.
slice_header->header_bit_size = 0x24;
slice_header->slice_type = 7;
slice_header->slice_qp_delta = 8;
slice_header->dec_ref_pic_marking_bit_size = 2u;
// Now that we have created our local copy of the slice header, copy it into
// |arg1| and return success.
std::memcpy(arg1, slice_header, sizeof(H264SliceHeader));
// Emulates encrypted slice header parsing. We don't actually encrypt the data
// so we can easily do this by just parsing it.
H264Decoder::H264Accelerator::Status ParseSliceHeader(
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_hdr_out) {
EXPECT_TRUE(!sps_nalu_data.empty());
EXPECT_TRUE(!pps_nalu_data.empty());
// Construct the bitstream for parsing.
std::vector<uint8_t> full_data;
const std::vector<uint8_t> start_code = {0u, 0u, 1u};
constexpr size_t kExtraBytes = 3 * 3; // 3 byte start code for 3 NALUs
full_data.reserve(size + sps_nalu_data.size() + pps_nalu_data.size() +
kExtraBytes);
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), sps_nalu_data.begin(), sps_nalu_data.end());
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), pps_nalu_data.begin(), pps_nalu_data.end());
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), data, data + size);
H264Parser parser;
parser.SetStream(full_data.data(), full_data.size());
while (true) {
H264NALU nalu;
H264Parser::Result res = parser.AdvanceToNextNALU(&nalu);
if (res == H264Parser::kEOStream)
break;
EXPECT_EQ(H264Parser::kOk, res);
switch (nalu.nal_unit_type) {
case H264NALU::kSPS:
int sps_id;
EXPECT_EQ(H264Parser::kOk, parser.ParseSPS(&sps_id));
break;
case H264NALU::kPPS:
int pps_id;
EXPECT_EQ(H264Parser::kOk, parser.ParsePPS(&pps_id));
break;
case H264NALU::kIDRSlice: // fallthrough
case H264NALU::kNonIDRSlice:
EXPECT_EQ(H264Parser::kOk,
parser.ParseSliceHeader(nalu, slice_hdr_out));
slice_hdr_out->full_sample_encryption = true;
break;
}
}
return H264Decoder::H264Accelerator::Status::kOk;
}
// Compare 2 H264SliceHeader objects for equality.
MATCHER_P(SliceHeaderMatches, slice_header, "Verify H264SliceHeader objects") {
// Rather than match pointers, the contents must be the same.
return std::memcmp(arg, slice_header, sizeof(H264SliceHeader)) == 0;
}
class MockH264Accelerator : public H264Decoder::H264Accelerator {
public:
MockH264Accelerator() = default;
MOCK_METHOD0(CreateH264Picture, scoped_refptr<H264Picture>());
MOCK_METHOD1(SubmitDecode, Status(scoped_refptr<H264Picture> pic));
MOCK_METHOD6(ParseEncryptedSliceHeader,
Status(const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_hdr_out));
MOCK_METHOD7(SubmitFrameMetadata,
Status(const H264SPS* sps,
const H264PPS* pps,
......@@ -150,8 +167,11 @@ class H264DecoderTest : public ::testing::Test {
// Keeps decoding the input bitstream set at |SetInputFrameFiles| until the
// decoder has consumed all bitstreams or returned from
// |H264Decoder::Decode|. Returns the same result as |H264Decoder::Decode|.
AcceleratedVideoDecoder::DecodeResult Decode();
// |H264Decoder::Decode|. If |full_sample_encryption| is true, then it sets
// a DecryptConfig for the the DecoderBuffer that indicates all but the first
// byte are encrypted. Returns the same result as |H264Decoder::Decode|.
AcceleratedVideoDecoder::DecodeResult Decode(
bool full_sample_encryption = false);
protected:
std::unique_ptr<H264Decoder> decoder_;
......@@ -192,7 +212,8 @@ void H264DecoderTest::SetInputFrameFiles(
input_frame_files_.push(f);
}
AcceleratedVideoDecoder::DecodeResult H264DecoderTest::Decode() {
AcceleratedVideoDecoder::DecodeResult H264DecoderTest::Decode(
bool full_sample_encryption) {
while (true) {
auto result = decoder_->Decode();
int32_t bitstream_id = 0;
......@@ -204,6 +225,15 @@ AcceleratedVideoDecoder::DecodeResult H264DecoderTest::Decode() {
CHECK(base::ReadFileToString(input_file, &bitstream_));
decoder_buffer_ = DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(bitstream_.data()), bitstream_.size());
if (full_sample_encryption) {
// We only use this in 2 tests, each use the same data where the offset to
// the byte after the NALU type for the slice header is 669.
constexpr int kOffsetToSliceHeader = 669;
decoder_buffer_->set_decrypt_config(DecryptConfig::CreateCencConfig(
"kFakeKeyId", std::string(DecryptConfig::kDecryptionKeySize, 'x'),
{SubsampleEntry(kOffsetToSliceHeader,
bitstream_.size() - kOffsetToSliceHeader)}));
}
EXPECT_NE(decoder_buffer_.get(), nullptr);
decoder_->SetStream(bitstream_id++, *decoder_buffer_);
}
......@@ -260,6 +290,28 @@ TEST_F(H264DecoderTest, DecodeSingleFrame) {
ASSERT_TRUE(decoder_->Flush());
}
// This is for CENCv1 full sample encryption.
TEST_F(H264DecoderTest, DecodeSingleEncryptedFrame) {
SetInputFrameFiles({kBaselineFrame0});
ASSERT_EQ(AcceleratedVideoDecoder::kConfigChange, Decode(true));
EXPECT_EQ(gfx::Size(320, 192), decoder_->GetPicSize());
EXPECT_EQ(H264PROFILE_BASELINE, decoder_->GetProfile());
EXPECT_LE(9u, decoder_->GetRequiredNumOfPictures());
{
InSequence sequence;
EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _, _))
.WillOnce(Invoke(&ParseSliceHeader));
EXPECT_CALL(*accelerator_, CreateH264Picture());
EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitSlice(_, _, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitDecode(_));
EXPECT_CALL(*accelerator_, OutputPicture(_));
}
ASSERT_EQ(AcceleratedVideoDecoder::kRanOutOfStreamData, Decode());
ASSERT_TRUE(decoder_->Flush());
}
TEST_F(H264DecoderTest, SkipNonIDRFrames) {
SetInputFrameFiles({kBaselineFrame1, kBaselineFrame2, kBaselineFrame0});
ASSERT_EQ(AcceleratedVideoDecoder::kConfigChange, Decode());
......@@ -539,6 +591,39 @@ TEST_F(H264DecoderTest, SetEncryptedStream) {
EXPECT_TRUE(decoder_->Flush());
}
TEST_F(H264DecoderTest, ParseEncryptedSliceHeaderRetry) {
SetInputFrameFiles({kBaselineFrame0});
ASSERT_EQ(AcceleratedVideoDecoder::kConfigChange, Decode(true));
EXPECT_EQ(gfx::Size(320, 192), decoder_->GetPicSize());
EXPECT_EQ(H264PROFILE_BASELINE, decoder_->GetProfile());
EXPECT_LE(9u, decoder_->GetRequiredNumOfPictures());
EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _, _))
.WillOnce(Return(H264Decoder::H264Accelerator::Status::kTryAgain));
ASSERT_EQ(AcceleratedVideoDecoder::kTryAgain, Decode(true));
// Try again, assuming key still not set. Only ParseEncryptedSliceHeader()
// should be called again.
EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _, _))
.WillOnce(Return(H264Decoder::H264Accelerator::Status::kTryAgain));
ASSERT_EQ(AcceleratedVideoDecoder::kTryAgain, Decode(true));
// Assume key has been provided now, next call to Decode() should proceed.
{
InSequence sequence;
EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _, _))
.WillOnce(Invoke(&ParseSliceHeader));
EXPECT_CALL(*accelerator_, CreateH264Picture());
EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitSlice(_, _, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitDecode(WithPoc(0)));
EXPECT_CALL(*accelerator_, OutputPicture(WithPoc(0)));
}
ASSERT_EQ(AcceleratedVideoDecoder::kRanOutOfStreamData, Decode(true));
ASSERT_TRUE(decoder_->Flush());
}
TEST_F(H264DecoderTest, SubmitFrameMetadataRetry) {
SetInputFrameFiles({kBaselineFrame0});
ASSERT_EQ(AcceleratedVideoDecoder::kConfigChange, Decode());
......
......@@ -6,6 +6,7 @@
#include <va/va.h>
#include "base/memory/aligned_memory.h"
#include "base/stl_util.h"
#include "base/trace_event/trace_event.h"
#include "media/base/cdm_context.h"
......@@ -35,6 +36,13 @@ static constexpr uint8_t kZigzagScan8x8[64] = {
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63};
int GetSliceHeaderCounter() {
// Needs to be static in case there are multiple active at once, in which case
// they all need unique values.
static base::AtomicSequenceNumber parsed_slice_hdr_counter;
return parsed_slice_hdr_counter.GetNext();
}
} // namespace
H264VaapiVideoDecoderDelegate::H264VaapiVideoDecoderDelegate(
......@@ -180,6 +188,158 @@ DecodeStatus H264VaapiVideoDecoderDelegate::SubmitFrameMetadata(
return success ? DecodeStatus::kOk : DecodeStatus::kFail;
}
DecodeStatus H264VaapiVideoDecoderDelegate::ParseEncryptedSliceHeader(
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) {
DCHECK(slice_header_out);
DCHECK(!subsamples.empty());
DCHECK_EQ(subsamples[0].clear_bytes, 1u);
// This is done by sending in the encryption parameters and the encrypted
// slice header. Then the vaEndPicture call is blocking while it decrypts and
// parses the header parameters. We use VACencStatusBuf which allows us to
// extract the slice header parameters of interest and return them to the
// caller.
VAEncryptionParameters crypto_params;
// Don't use the VAEncryptionSegmentInfo vector in the class since we do not
// need to hold this data across calls.
std::vector<VAEncryptionSegmentInfo> segment_info;
ProtectedSessionState state = SetupDecryptDecode(
true /* full sample */, size, &crypto_params, &segment_info, subsamples);
if (state == ProtectedSessionState::kFailed) {
LOG(ERROR) << "ParseEncryptedSliceHeader fails because we couldn't setup "
"the protected session";
return DecodeStatus::kFail;
} else if (state != ProtectedSessionState::kCreated) {
return DecodeStatus::kTryAgain;
}
// For encrypted header parsing, we need to also send the SPS and PPS. Both of
// those and the slice NALU need to be prefixed with the 0x000001 start code.
constexpr size_t kStartCodeSize = 3;
constexpr size_t kExtraDataBytes = 3 * kStartCodeSize;
// Adjust the first segment length and init length to compensate for inserting
// the SPS, PPS and 3 start codes.
segment_info.back().segment_length +=
sps_nalu_data.size() + pps_nalu_data.size() + kExtraDataBytes;
segment_info.back().init_byte_length +=
sps_nalu_data.size() + pps_nalu_data.size() + kExtraDataBytes;
crypto_params.status_report_index = GetSliceHeaderCounter();
// This is based on a sample from Intel for how to use this API.
constexpr size_t kDecryptQuerySizeAndAlignment = 4096;
std::unique_ptr<void, base::AlignedFreeDeleter> surface_memory(
base::AlignedAlloc(kDecryptQuerySizeAndAlignment,
kDecryptQuerySizeAndAlignment));
constexpr size_t kVaQueryCencBufferSize = 2048;
auto back_buffer_mem = std::make_unique<uint8_t[]>(kVaQueryCencBufferSize);
VACencStatusBuf* status_buf =
reinterpret_cast<VACencStatusBuf*>(surface_memory.get());
status_buf->status = VA_ENCRYPTION_STATUS_INCOMPLETE;
status_buf->buf = back_buffer_mem.get();
status_buf->buf_size = kVaQueryCencBufferSize;
auto slice_param_buf = std::make_unique<VACencSliceParameterBufferH264>();
status_buf->slice_buf_type = VaCencSliceBufParamter;
status_buf->slice_buf_size = sizeof(VACencSliceParameterBufferH264);
status_buf->slice_buf = slice_param_buf.get();
constexpr int kCencStatusSurfaceDimension = 64;
auto buffer_ptr_alloc = std::make_unique<uintptr_t>();
uintptr_t* buffer_ptr = reinterpret_cast<uintptr_t*>(buffer_ptr_alloc.get());
buffer_ptr[0] = reinterpret_cast<uintptr_t>(surface_memory.get());
auto surface = vaapi_wrapper_->CreateVASurfaceForUserPtr(
gfx::Size(kCencStatusSurfaceDimension, kCencStatusSurfaceDimension),
buffer_ptr,
2 * kCencStatusSurfaceDimension * kCencStatusSurfaceDimension);
if (!surface) {
DVLOG(1) << "Failed allocating surface for decrypt status";
return DecodeStatus::kFail;
}
// Assembles the 'slice data' which is the SPS, PPS and slice data, each of
// which is also prefixed by the 0x000001 start code.
std::vector<uint8_t> full_data;
const std::vector<uint8_t> start_code = {0u, 0u, 1u};
full_data.reserve(size + sps_nalu_data.size() + pps_nalu_data.size() +
kExtraDataBytes);
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), sps_nalu_data.begin(), sps_nalu_data.end());
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), pps_nalu_data.begin(), pps_nalu_data.end());
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), data, data + size);
if (!vaapi_wrapper_->SubmitBuffers({{VAEncryptionParameterBufferType,
sizeof(crypto_params), &crypto_params},
{VAProtectedSliceDataBufferType,
full_data.size(), full_data.data()}})) {
DVLOG(1) << "Failure submitting encrypted slice header buffers";
return DecodeStatus::kFail;
}
if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(surface->id())) {
LOG(ERROR) << "Failed executing for slice header decrypt";
return DecodeStatus::kFail;
}
if (status_buf->status != VA_ENCRYPTION_STATUS_SUCCESSFUL) {
LOG(ERROR) << "Failure status in encrypted header parsing: "
<< static_cast<int>(status_buf->status);
return DecodeStatus::kFail;
}
// Read the parsed slice header data back and populate the structure with it.
slice_header_out->idr_pic_flag = !!slice_param_buf->idr_pic_flag;
slice_header_out->nal_ref_idc = slice_param_buf->nal_ref_idc;
slice_header_out->nalu_data = data;
slice_header_out->nalu_size = size;
slice_header_out->slice_type = slice_param_buf->slice_type;
slice_header_out->frame_num = slice_param_buf->frame_number;
slice_header_out->idr_pic_id = slice_param_buf->idr_pic_id;
slice_header_out->pic_order_cnt_lsb = slice_param_buf->pic_order_cnt_lsb;
slice_header_out->delta_pic_order_cnt_bottom =
slice_param_buf->delta_pic_order_cnt_bottom;
slice_header_out->delta_pic_order_cnt0 =
slice_param_buf->delta_pic_order_cnt[0];
slice_header_out->delta_pic_order_cnt1 =
slice_param_buf->delta_pic_order_cnt[1];
slice_header_out->no_output_of_prior_pics_flag =
slice_param_buf->ref_pic_fields.bits.no_output_of_prior_pics_flag;
slice_header_out->long_term_reference_flag =
slice_param_buf->ref_pic_fields.bits.long_term_reference_flag;
slice_header_out->adaptive_ref_pic_marking_mode_flag =
slice_param_buf->ref_pic_fields.bits.adaptive_ref_pic_marking_mode_flag;
const size_t num_dec_ref_pics =
slice_param_buf->ref_pic_fields.bits.dec_ref_pic_marking_count;
if (num_dec_ref_pics > H264SliceHeader::kRefListSize) {
DVLOG(1) << "Invalid number of dec_ref_pics: " << num_dec_ref_pics;
return DecodeStatus::kFail;
}
for (size_t i = 0; i < num_dec_ref_pics; ++i) {
slice_header_out->ref_pic_marking[i].memory_mgmnt_control_operation =
slice_param_buf->memory_management_control_operation[i];
slice_header_out->ref_pic_marking[i].difference_of_pic_nums_minus1 =
slice_param_buf->difference_of_pic_nums_minus1[i];
slice_header_out->ref_pic_marking[i].long_term_pic_num =
slice_param_buf->long_term_pic_num[i];
slice_header_out->ref_pic_marking[i].long_term_frame_idx =
slice_param_buf->long_term_frame_idx[i];
slice_header_out->ref_pic_marking[i].max_long_term_frame_idx_plus1 =
slice_param_buf->max_long_term_frame_idx_plus1[i];
}
slice_header_out->full_sample_encryption = true;
slice_header_out->full_sample_index =
status_buf->status_report_index_feedback;
return DecodeStatus::kOk;
}
DecodeStatus H264VaapiVideoDecoderDelegate::SubmitSlice(
const H264PPS* pps,
const H264SliceHeader* slice_hdr,
......@@ -191,6 +351,19 @@ DecodeStatus H264VaapiVideoDecoderDelegate::SubmitSlice(
const std::vector<SubsampleEntry>& subsamples) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
TRACE_EVENT0("media,gpu", "H264VaapiVideoDecoderDelegate::SubmitSlice");
if (slice_hdr->full_sample_encryption) {
// We do not need to submit all the slice data, instead we just submit the
// index for what was already sent for parsing. The HW decoder already has
// the full slice data from when we decrypted the header.
VACencStatusParameters cenc_status = {};
cenc_status.status_report_index_feedback = slice_hdr->full_sample_index;
return vaapi_wrapper_->SubmitBuffer(VACencStatusParameterBufferType,
sizeof(VACencStatusParameters),
&cenc_status)
? DecodeStatus::kOk
: DecodeStatus::kFail;
}
bool uses_crypto = false;
VAEncryptionParameters crypto_params = {};
if ((!subsamples.empty() && subsamples[0].cypher_bytes) ||
......
......@@ -5,6 +5,7 @@
#ifndef MEDIA_GPU_VAAPI_H264_VAAPI_VIDEO_DECODER_DELEGATE_H_
#define MEDIA_GPU_VAAPI_H264_VAAPI_VIDEO_DECODER_DELEGATE_H_
#include "base/atomic_sequence_num.h"
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
#include "media/gpu/h264_decoder.h"
......@@ -39,6 +40,13 @@ class H264VaapiVideoDecoderDelegate : public H264Decoder::H264Accelerator,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
scoped_refptr<H264Picture> pic) override;
Status ParseEncryptedSliceHeader(
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples,
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) override;
Status SubmitSlice(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
......
......@@ -783,11 +783,13 @@ void VaapiVideoDecoder::SetState(State state) {
case State::kDecoding:
DCHECK(state_ == State::kWaitingForInput ||
state_ == State::kWaitingForOutput ||
state_ == State::kChangingResolution);
state_ == State::kChangingResolution ||
state_ == State::kWaitingForProtected);
break;
case State::kResetting:
DCHECK(state_ == State::kWaitingForInput ||
state_ == State::kWaitingForOutput || state_ == State::kDecoding);
state_ == State::kWaitingForOutput || state_ == State::kDecoding ||
state_ == State::kWaitingForProtected);
ClearDecodeTaskQueue(DecodeStatus::ABORTED);
break;
case State::kChangingResolution:
......
......@@ -111,6 +111,11 @@ VaapiVideoDecoderDelegate::SetupDecryptDecode(
crypto_params->encryption_type =
full_sample_ ? VA_ENCRYPTION_TYPE_CENC_CTR : VA_ENCRYPTION_TYPE_CTR_128;
} else {
if (full_sample_) {
LOG(ERROR) << "CBC encryption is not supported for CENCv1";
protected_session_state_ = ProtectedSessionState::kFailed;
return protected_session_state_;
}
crypto_params->encryption_type = VA_ENCRYPTION_TYPE_CBC;
}
......
......@@ -484,6 +484,11 @@ bool IsBlockedDriver(VaapiWrapper::CodecMode mode, VAProfile va_profile) {
return false;
}
bool IsValidVABufferType(VABufferType type) {
return type < VABufferTypeMax || type == VAEncryptionParameterBufferType ||
type == VACencStatusParameterBufferType;
}
// This class is a wrapper around its |va_display_| (and its associated
// |va_lock_|) to guarantee mutual exclusion and singleton behaviour.
class VADisplayState {
......@@ -826,7 +831,7 @@ bool GetRequiredAttribs(const base::Lock* va_lock,
#if BUILDFLAG(IS_CHROMEOS_ASH)
if (mode == VaapiWrapper::kDecodeProtected && profile != VAProfileProtected) {
required_attribs->push_back(
{VAConfigAttribEncryption, VA_ENCRYPTION_TYPE_CTR_128});
{VAConfigAttribEncryption, VA_ENCRYPTION_TYPE_CENC_CTR});
}
#endif
......@@ -2014,6 +2019,47 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
base::BindOnce(&VaapiWrapper::DestroySurface, this));
}
scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForUserPtr(
const gfx::Size& size,
uintptr_t* buffers,
size_t buffer_size) {
VASurfaceAttribExternalBuffers va_attrib_extbuf{};
va_attrib_extbuf.buffers = buffers;
va_attrib_extbuf.data_size = buffer_size;
va_attrib_extbuf.num_buffers = 1u;
va_attrib_extbuf.width = size.width();
va_attrib_extbuf.height = size.height();
std::fill(va_attrib_extbuf.pitches, va_attrib_extbuf.pitches + 3,
size.width());
va_attrib_extbuf.pixel_format = VA_FOURCC_NV12;
std::vector<VASurfaceAttrib> va_attribs(2);
va_attribs[0].flags = VA_SURFACE_ATTRIB_SETTABLE;
va_attribs[0].type = VASurfaceAttribMemoryType;
va_attribs[0].value.type = VAGenericValueTypeInteger;
va_attribs[0].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_USER_PTR;
va_attribs[1].flags = VA_SURFACE_ATTRIB_SETTABLE;
va_attribs[1].type = VASurfaceAttribExternalBufferDescriptor;
va_attribs[1].value.type = VAGenericValueTypePointer;
va_attribs[1].value.value.p = &va_attrib_extbuf;
VASurfaceID va_surface_id = VA_INVALID_ID;
const unsigned int va_format = VA_RT_FORMAT_YUV420;
{
base::AutoLock auto_lock(*va_lock_);
VAStatus va_res = vaCreateSurfaces(
va_display_, va_format, base::checked_cast<unsigned int>(size.width()),
base::checked_cast<unsigned int>(size.height()), &va_surface_id, 1,
&va_attribs[0], va_attribs.size());
VA_SUCCESS_OR_RETURN(va_res, VaapiFunctions::kVACreateSurfaces_Importing,
nullptr);
}
DVLOG(2) << __func__ << " " << va_surface_id;
return new VASurface(va_surface_id, size, va_format,
base::BindOnce(&VaapiWrapper::DestroySurface, this));
}
std::unique_ptr<NativePixmapAndSizeInfo>
VaapiWrapper::ExportVASurfaceAsNativePixmapDmaBuf(
const ScopedVASurface& scoped_va_surface) {
......@@ -2833,7 +2879,7 @@ bool VaapiWrapper::SubmitBuffer_Locked(const VABufferDescriptor& va_buffer) {
TRACE_EVENT0("media,gpu", "VaapiWrapper::SubmitBuffer_Locked");
va_lock_->AssertAcquired();
DCHECK_LT(va_buffer.type, VABufferTypeMax);
DCHECK(IsValidVABufferType(va_buffer.type));
DCHECK(va_buffer.data);
unsigned int va_buffer_size;
......@@ -2864,7 +2910,7 @@ bool VaapiWrapper::MapAndCopy_Locked(VABufferID va_buffer_id,
va_lock_->AssertAcquired();
DCHECK_NE(va_buffer_id, VA_INVALID_ID);
DCHECK_LT(va_buffer.type, VABufferTypeMax);
DCHECK(IsValidVABufferType(va_buffer.type));
DCHECK(va_buffer.data);
ScopedVABufferMapping mapping(
......
......@@ -310,6 +310,16 @@ class MEDIA_GPU_EXPORT VaapiWrapper
scoped_refptr<VASurface> CreateVASurfaceForPixmap(
scoped_refptr<gfx::NativePixmap> pixmap);
// Creates a self-releasing VASurface from |buffers|. The ownership of the
// surface is transferred to the caller. |buffers| should be a pointer array
// of size 1, with |buffer_size| corresponding to its size. |size| should be
// the desired surface dimensions (which does not need to map to |buffer_size|
// in any relevant way). |buffers| should be kept alive when using the
// VASurface and for accessing the data after the operation is complete.
scoped_refptr<VASurface> CreateVASurfaceForUserPtr(const gfx::Size& size,
uintptr_t* buffers,
size_t buffer_size);
// Syncs and exports |va_surface| as a gfx::NativePixmapDmaBuf. Currently, the
// only VAAPI surface pixel formats supported are VA_FOURCC_IMC3 and
// VA_FOURCC_NV12.
......
......@@ -359,6 +359,14 @@ struct MEDIA_EXPORT H264SliceHeader {
// Size in bits of dec_ref_pic_marking() syntax element.
size_t dec_ref_pic_marking_bit_size;
size_t pic_order_cnt_bit_size;
// This is when we are using full sample encryption and only the portions
// needed for DPB management are filled in, the rest will already be known
// by the accelerator and we will not need to specify it.
bool full_sample_encryption;
// This is used by some accelerators to handle decoding after slice header
// parsing.
uint32_t full_sample_index;
};
struct H264SEIRecoveryPoint {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment