Commit da0273e3 authored by imcheng@chromium.org's avatar imcheng@chromium.org

This tool demonstrates the use of the Media Foundation H.264 decoder as a...

This tool demonstrates the use of the Media Foundation H.264 decoder as a standalone Media Foundation Transform (MFT). See README for more information.


Review URL: http://codereview.chromium.org/3044019

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@54078 0039d316-1c4b-4281-b951-d872f2087c98
parent e5ae223b
......@@ -33,6 +33,7 @@ class VideoFrame : public StreamSample {
RGBA, // 32bpp RGBA packed 8:8:8:8
YV12, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
YV16, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
NV12, // 12bpp YVU planar 1x1 Y, 2x2 UV interleaving samples
EMPTY, // An empty frame.
ASCII, // A frame with ASCII content. For testing only.
};
......@@ -41,6 +42,8 @@ class VideoFrame : public StreamSample {
TYPE_SYSTEM_MEMORY,
TYPE_OMXBUFFERHEAD,
TYPE_EGL_IMAGE,
TYPE_MFBUFFER,
TYPE_DIRECT3DSURFACE
};
public:
......
......@@ -416,6 +416,30 @@
},
},
},
{
'target_name': 'mft_h264_decoder',
'type': 'executable',
'dependencies': [
'media',
'../base/base.gyp:base',
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
],
'include_dirs': [
'..',
],
'sources': [
'mf/main.cc',
'mf/h264mft.cc',
'mf/h264mft.h',
'mf/file_reader_util.cc',
'mf/file_reader_util.h',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
},
},
},
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
......
This tool demonstrates the use of the Media Foundation H.264 decoder as a
standalone Media Foundation Transform (MFT). The H.264 decoder takes sample
objects (IMFSample) containing Annex B streams as input, and outputs decoded
NV12 video frames as output, contained in a buffer object (if DXVA is not
enabled) or a Direct3D surface (if DXVA is enabled.)
This tool uses ffmpeg's parser and bitstream converter to read a file
containing H.264 video and outputs packets containing Annex B streams which are
then fed into the H.264 decoder. This tool also demonstrates the use of the
H.264 decoder as a state machine, and the steps taken in each state.
Requirements: Windows 7
Note1: This tool currently does decoding only. There is no visible output
besides the log entry containing state of the decoder at each input/output
step.
Note2: There is a mysterious 1-off decoded frame count when DXVA is enabled.
Note3: This tool requires the ffmpeg library to have the H.264 codec and Annex
B bitstream filter. You might need build your own, or grab one from
http://ffmpeg.arrozcru.org/autobuilds/
Note4: A single H264Mft instance is only for 1 H.264 video stream only.
Inputting streams consisting of more than 1 video to a single instance
may result in undefined behavior.
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Borrowed from media/tools/omx_test/file_reader_util.cc.
// Added some functionalities related to timestamps on packets.
#include "media/mf/file_reader_util.h"
#include <algorithm>
#include "base/scoped_comptr_win.h"
#include "base/logging.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/bitstream_converter.h"
#include "media/mf/h264mft.h"
namespace media {
//////////////////////////////////////////////////////////////////////////////
// FFmpegFileReader
FFmpegFileReader::FFmpegFileReader(const std::string& filename)
: filename_(filename),
format_context_(NULL),
codec_context_(NULL),
target_stream_(-1),
converter_(NULL),
end_of_stream_(false) {
}
FFmpegFileReader::~FFmpegFileReader() {
if (format_context_)
av_close_input_file(format_context_);
}
bool FFmpegFileReader::Initialize() {
int result = av_open_input_file(&format_context_, filename_.c_str(),
NULL, 0, NULL);
if (result < 0) {
switch (result) {
case AVERROR_NOFMT:
LOG(ERROR) << "Error: File format not supported "
<< filename_;
break;
default:
LOG(ERROR) << "Error: Could not open input for "
<< filename_ << ": " << result;
break;
}
return false;
}
if (av_find_stream_info(format_context_) < 0) {
LOG(ERROR) << "can't use FFmpeg to parse stream info";
return false;
}
for (size_t i = 0; i < format_context_->nb_streams; ++i) {
codec_context_ = format_context_->streams[i]->codec;
// Find the video stream.
if (codec_context_->codec_type == CODEC_TYPE_VIDEO) {
target_stream_ = i;
break;
}
}
if (target_stream_ == -1) {
LOG(ERROR) << "no video in the stream";
return false;
}
// Initialize the bitstream filter if needed.
// TODO(hclam): find a better way to identify mp4 container.
if (codec_context_->codec_id == CODEC_ID_H264) {
converter_.reset(new media::FFmpegBitstreamConverter(
"h264_mp4toannexb", codec_context_));
} else if (codec_context_->codec_id == CODEC_ID_MPEG4) {
converter_.reset(new media::FFmpegBitstreamConverter(
"mpeg4video_es", codec_context_));
} else if (codec_context_->codec_id == CODEC_ID_WMV3) {
converter_.reset(new media::FFmpegBitstreamConverter(
"vc1_asftorcv", codec_context_));
} else if (codec_context_->codec_id == CODEC_ID_VC1) {
converter_.reset(new media::FFmpegBitstreamConverter(
"vc1_asftoannexg", codec_context_));
}
if (converter_.get() && !converter_->Initialize()) {
converter_.reset();
LOG(ERROR) << "failed to initialize h264_mp4toannexb filter";
return false;
}
return true;
}
void FFmpegFileReader::Read(uint8** output, int* size) {
Read(output, size, NULL, NULL);
}
void FFmpegFileReader::Read(uint8** output, int* size, int* duration,
int64* sample_time) {
if (!format_context_ || !codec_context_ || target_stream_ == -1) {
*size = 0;
*output = NULL;
return;
}
AVPacket packet;
bool found = false;
while (!found) {
int result = av_read_frame(format_context_, &packet);
if (result < 0) {
*output = NULL;
*size = 0;
end_of_stream_ = true;
return;
}
if (packet.stream_index == target_stream_) {
if (converter_.get() && !converter_->ConvertPacket(&packet)) {
LOG(ERROR) << "failed to convert AVPacket";
}
*output = new uint8[packet.size];
if (*output == NULL) {
LOG(ERROR) << "Failed to allocate buffer for annex b stream";
*size = 0;
return;
}
*size = packet.size;
memcpy(*output, packet.data, packet.size);
if (duration) {
if (packet.duration == 0) {
LOG(WARNING) << "Packet duration not known";
}
// This is in AVCodecContext::time_base units
*duration = packet.duration;
}
if (sample_time) {
if (packet.pts == AV_NOPTS_VALUE) {
LOG(ERROR) << "Packet presentation time not known";
*sample_time = 0L;
} else {
// This is in AVCodecContext::time_base units
*sample_time = packet.pts;
}
}
found = true;
}
av_free_packet(&packet);
}
}
bool FFmpegFileReader::GetFrameRate(int* num, int *denom) const {
if (!codec_context_)
return false;
*num = codec_context_->time_base.num;
*denom = codec_context_->time_base.den;
if (denom == 0) {
*num = 0;
return false;
}
return true;
}
bool FFmpegFileReader::GetWidth(int* width) const {
if (!codec_context_)
return false;
*width = codec_context_->width;
return true;
}
bool FFmpegFileReader::GetHeight(int* height) const {
if (!codec_context_)
return false;
*height = codec_context_->height;
return true;
}
bool FFmpegFileReader::GetAspectRatio(int* num, int* denom) const {
if (!codec_context_)
return false;
AVRational aspect_ratio = codec_context_->sample_aspect_ratio;
if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
return false;
*num = aspect_ratio.num;
*denom = aspect_ratio.den;
return true;
}
int64 FFmpegFileReader::ConvertFFmpegTimeBaseTo100Ns(
int64 time_base_unit) const {
// FFmpeg units after time base conversion seems to be actually given in
// milliseconds (instead of seconds...) so we need to multiply it by a factor
// of 10,000 to convert it into units compatible with MF.
CHECK(codec_context_) << "Codec context needs to be initialized";
return time_base_unit * 10000 * codec_context_->time_base.num /
codec_context_->time_base.den;
}
} // namespace media
// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
//
// Borrowed from media/tools/omx_test/file_reader_util.h.
// Added some functionalities related to timestamps on packets and Media
// Foundation.
#ifndef MEDIA_MF_FILE_READER_UTIL_H_
#define MEDIA_MF_FILE_READER_UTIL_H_
#include <string>
#include "base/basictypes.h"
#include "base/scoped_handle.h"
#include "base/scoped_ptr.h"
struct AVCodecContext;
struct AVFormatContext;
namespace media {
class BitstreamConverter;
// A class to help reading and parsing input file for use in omx_test.
class FileReader {
public:
virtual ~FileReader() {}
// Initialize FileReader object, returns true if successful.
virtual bool Initialize() = 0;
// Read the file into |output|, and output the number of bytes read to
// |size|.
virtual void Read(uint8** output, int* size) = 0;
};
class FFmpegFileReader : public FileReader {
public:
explicit FFmpegFileReader(const std::string& filename);
virtual ~FFmpegFileReader();
virtual bool Initialize();
virtual void Read(uint8** output, int* size);
// Reads a video packet, converts it into Annex B stream, and allocates a
// buffer to |*output| and copies the contents into it.
void Read(uint8** output, int* size, int* duration, int64* sample_time);
bool GetFrameRate(int* num, int* denom) const;
bool GetWidth(int* width) const;
bool GetHeight(int* height) const;
bool GetAspectRatio(int* num, int* denom) const;
int64 ConvertFFmpegTimeBaseTo100Ns(int64 time_base_unit) const;
bool end_of_stream() const { return end_of_stream_; }
private:
std::string filename_;
AVFormatContext* format_context_;
AVCodecContext* codec_context_;
int target_stream_;
scoped_ptr<media::BitstreamConverter> converter_;
bool end_of_stream_;
DISALLOW_COPY_AND_ASSIGN(FFmpegFileReader);
};
} // namespace media
#endif // MEDIA_MF_FILE_READER_UTIL_H_
This diff is collapsed.
// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
//
// Decodes H.264 Annex B streams using the Media Foundation H.264 decoder as
// a standalone Media Foundation Transform (MFT).
// Note: A single H264Mft instance is only for 1 H.264 video stream only.
// Inputting streams consisting of more than 1 video to a single instance
// may result in undefined behavior.
#ifndef MEDIA_MF_H264MFT_H_
#define MEDIA_MF_H264MFT_H_
#include <string>
#include <mfidl.h>
#include "base/basictypes.h"
#include "base/scoped_ptr.h"
#include "base/scoped_comptr_win.h"
#include "media/base/video_frame.h"
struct IDirect3DDeviceManager9;
struct IMFSample;
struct IMFTransform;
namespace media {
// A decoder that takes samples of Annex B streams then outputs decoded frames.
class H264Mft {
public:
enum DecoderOutputState {
kOutputOk = 0,
kResetOutputStreamFailed,
kResetOutputStreamOk,
kNeedMoreInput,
kNoMoreOutput,
kUnspecifiedError,
kNoMemory,
kOutputSampleError
};
explicit H264Mft(bool use_dxva);
~H264Mft();
// Initializes the decoder. |dev_manager| is not required if the decoder does
// not use DXVA.
// If the other arguments are not known, leave them as 0. They can be
// provided to the decoder to try to avoid an initial output format change,
// but it is not necessary to have them.
bool Init(IDirect3DDeviceManager9* dev_manager,
int frame_rate_num, int frame_rate_denom,
int width, int height,
int aspect_num, int aspect_denom);
// Sends an Annex B stream to the decoder. The times here should be given
// in 100ns units. This creates a IMFSample, copies the stream over to the
// sample, and sends the sample to the decoder.
// Returns: true if the sample was sent successfully.
bool SendInput(uint8* data, int size, int64 timestamp, int64 duration);
// Tries to get an output sample from the decoder.
// Returns: status of the decoder, and if successful, a decoded sample.
DecoderOutputState GetOutput(scoped_refptr<VideoFrame>* decoded_frame);
// Sends a drain message to the decoder to indicate no more input will be
// sent. SendInput() should not be called after calling this method.
// Returns: true if the drain message was sent successfully.
bool SendDrainMessage();
bool initialized() const { return initialized_; }
bool use_dxva() const { return use_dxva_; }
bool drain_message_sent() const { return drain_message_sent_; }
int in_buffer_size() const { return in_buffer_size_; }
int out_buffer_size() const { return out_buffer_size_; }
int frames_read() const { return frames_read_; }
int frames_decoded() const { return frames_decoded_; }
int width() const { return width_; }
int height() const { return height_; }
private:
bool InitDecoder(IDirect3DDeviceManager9* dev_manager,
int frame_rate_num, int frame_rate_denom,
int width, int height,
int aspect_num, int aspect_denom);
bool CheckDecoderProperties();
bool CheckDecoderDxvaSupport();
bool SetDecoderD3d9Manager(IDirect3DDeviceManager9* dev_manager);
bool SetDecoderMediaTypes(int frame_rate_num, int frame_rate_denom,
int width, int height,
int aspect_num, int aspect_denom);
bool SetDecoderInputMediaType(int frame_rate_num, int frame_rate_denom,
int width, int height,
int aspect_num, int aspect_denom);
bool SetDecoderOutputMediaType(const GUID subtype);
bool SendStartMessage();
bool GetStreamsInfoAndBufferReqs();
ScopedComPtr<IMFTransform> decoder_;
bool initialized_;
bool use_dxva_;
bool drain_message_sent_;
// Minimum input and output buffer sizes as required by the decoder.
int in_buffer_size_;
int out_buffer_size_;
int frames_read_;
int frames_decoded_;
int width_;
int height_;
int stride_;
DISALLOW_COPY_AND_ASSIGN(H264Mft);
};
} // namespace media
#endif // MEDIA_MF_H264MFT_H_
// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
//
// Demonstrates the use of H264Mft.
#include <d3d9.h>
#include <dxva2api.h>
#include <mfapi.h>
#include "base/command_line.h"
#include "base/file_path.h"
#include "base/logging.h"
#include "base/scoped_comptr_win.h"
#include "base/scoped_ptr.h"
#include "base/time.h"
#include "media/base/media.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/ffmpeg/file_protocol.h"
#include "media/mf/file_reader_util.h"
#include "media/mf/h264mft.h"
using media::FFmpegFileReader;
using media::H264Mft;
using media::VideoFrame;
namespace {
void usage() {
static char* usage_msg =
"Usage: h264mft [--enable-dxva] --input-file=FILE\n"
"enable-dxva: Enables hardware accelerated decoding\n"
"To display this message: h264mft --help";
fprintf(stderr, "%s\n", usage_msg);
}
static bool InitFFmpeg() {
if (!media::InitializeMediaLibrary(FilePath()))
return false;
avcodec_init();
av_register_all();
av_register_protocol(&kFFmpegFileProtocol);
return true;
}
bool InitComLibraries() {
HRESULT hr;
hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
if (FAILED(hr)) {
LOG(ERROR) << "CoInit fail";
return false;
}
hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
if (FAILED(hr)) {
LOG(ERROR) << "MFStartup fail";
CoUninitialize();
return false;
}
return true;
}
void ShutdownComLibraries() {
HRESULT hr;
hr = MFShutdown();
if (FAILED(hr)) {
LOG(WARNING) << "Warning: MF failed to shutdown";
}
CoUninitialize();
}
IDirect3DDeviceManager9* CreateD3DDevManager(HWND video_window,
int width,
int height,
IDirect3D9** direct3d,
IDirect3DDevice9** device) {
CHECK(video_window != NULL);
CHECK(direct3d != NULL);
CHECK(device != NULL);
ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
ScopedComPtr<IDirect3D9> d3d;
d3d.Attach(Direct3DCreate9(D3D_SDK_VERSION));
if (d3d == NULL) {
LOG(ERROR) << "Failed to create D3D9";
return NULL;
}
D3DPRESENT_PARAMETERS present_params = {0};
present_params.BackBufferWidth = width;
present_params.BackBufferHeight = height;
present_params.BackBufferFormat = D3DFMT_UNKNOWN;
present_params.BackBufferCount = 1;
present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
present_params.hDeviceWindow = video_window;
present_params.Windowed = TRUE;
present_params.Flags = D3DPRESENTFLAG_VIDEO;
present_params.FullScreen_RefreshRateInHz = 0;
present_params.PresentationInterval = 0;
ScopedComPtr<IDirect3DDevice9> temp_device;
// D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
// (Is it even needed for just video decoding?)
HRESULT hr = d3d->CreateDevice(D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
video_window,
D3DCREATE_HARDWARE_VERTEXPROCESSING,
&present_params,
temp_device.Receive());
if (FAILED(hr)) {
LOG(ERROR) << "Failed to create D3D Device";
return NULL;
}
UINT dev_manager_reset_token = 0;
hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
dev_manager.Receive());
if (FAILED(hr)) {
LOG(ERROR) << "Couldn't create D3D Device manager";
return NULL;
}
hr = dev_manager->ResetDevice(temp_device.get(), dev_manager_reset_token);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to set device to device manager";
return NULL;
}
*direct3d = d3d.Detach();
*device = temp_device.Detach();
return dev_manager.Detach();
}
// Example usage of how to get a decoded frame from the decoder.
bool GetDecodedSample(FFmpegFileReader* reader, H264Mft* decoder,
scoped_refptr<VideoFrame>* decoded_frame) {
// Keep feeding the MFT with inputs until it spits out an output.
for (;;) {
// First check if there is output.
H264Mft::DecoderOutputState state = decoder->GetOutput(decoded_frame);
if (state == H264Mft::kOutputOk) {
LOG(INFO) << "Got an output from decoder";
return true;
} else if (state == H264Mft::kResetOutputStreamFailed) {
LOG(ERROR) << "Reset output stream failed, quitting";
return false;
} else if (state == H264Mft::kResetOutputStreamOk) {
LOG(INFO) << "Reset output stream, try to get output again";
continue;
} else if (state == H264Mft::kNeedMoreInput) {
LOG(INFO) << "Need more input";
uint8* input_stream_dummy;
int size;
int duration;
int64 timestamp;
reader->Read(&input_stream_dummy, &size, &duration, &timestamp);
scoped_array<uint8> input_stream(input_stream_dummy);
if (input_stream.get() == NULL) {
LOG(INFO) << "No more input, sending drain message to decoder";
if (!decoder->SendDrainMessage()) {
LOG(ERROR) << "Failed to send drain message, quitting";
return false;
} else {
continue; // Try reading the rest of the drained outputs.
}
} else {
// We read an input stream, we can feed it into the decoder.
if (!decoder->SendInput(input_stream.get(), size,
reader->ConvertFFmpegTimeBaseTo100Ns(timestamp),
reader->ConvertFFmpegTimeBaseTo100Ns(duration))) {
LOG(ERROR) << "Failed to send input, dropping frame...";
}
continue; // Try reading the output after attempting to send an input.
}
} else if (state == H264Mft::kNoMoreOutput) {
LOG(INFO) << "Decoder has processed everything, quitting";
return false;
} else if (state == H264Mft::kUnspecifiedError) {
LOG(ERROR) << "Unknown error, quitting";
return false;
} else if (state == H264Mft::kNoMemory) {
LOG(ERROR) << "Not enough memory for sample, quitting";
return false;
} else if (state == H264Mft::kOutputSampleError) {
LOG(ERROR) << "Inconsistent sample, dropping...";
continue;
} else {
NOTREACHED();
}
} // for (;;)
NOTREACHED();
}
static void ReleaseOutputBuffer(VideoFrame* frame) {
if (frame->type() == VideoFrame::TYPE_MFBUFFER ||
frame->type() == VideoFrame::TYPE_DIRECT3DSURFACE) {
static_cast<IMFMediaBuffer*>(frame->private_buffer())->Release();
} else {
return;
}
}
int Run(bool use_dxva, const std::string& input_file) {
scoped_ptr<FFmpegFileReader> reader(new FFmpegFileReader(input_file));
if (reader.get() == NULL) {
LOG(ERROR) << "Failed to create reader";
return -1;
}
if (!reader->Initialize()) {
LOG(ERROR) << "Failed to initialize reader";
return -1;
}
int frame_rate_num = 0, frame_rate_denom = 0;
if (!reader->GetFrameRate(&frame_rate_num, &frame_rate_denom)) {
LOG(WARNING) << "Failed to get frame rate from reader";
}
int width = 0, height = 0;
if (!reader->GetWidth(&width) || !reader->GetHeight(&height)) {
LOG(WARNING) << "Failed to get width/height from reader";
}
int aspect_ratio_num = 0, aspect_ratio_denom = 0;
if (!reader->GetAspectRatio(&aspect_ratio_num, &aspect_ratio_denom)) {
LOG(WARNING) << "Failed to get aspect ratio from reader";
}
ScopedComPtr<IDirect3D9> d3d9;
ScopedComPtr<IDirect3DDevice9> device;
ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
if (use_dxva) {
dev_manager.Attach(CreateD3DDevManager(GetDesktopWindow(),
width,
height,
d3d9.Receive(),
device.Receive()));
if (dev_manager.get() == NULL) {
LOG(ERROR) << "Cannot create D3D9 manager";
return -1;
}
}
scoped_ptr<H264Mft> mft(new H264Mft(use_dxva));
if (mft.get() == NULL) {
LOG(ERROR) << "Failed to create MFT";
return -1;
}
if (!mft->Init(dev_manager, frame_rate_num, frame_rate_denom, width, height,
aspect_ratio_num, aspect_ratio_denom)) {
LOG(ERROR) << "Failed to initialize mft";
return -1;
}
base::TimeDelta decode_time;
while (true) {
// Do nothing with the sample except to let it go out of scope
scoped_refptr<VideoFrame> decoded_frame;
base::Time decode_start(base::Time::Now());
if (!GetDecodedSample(reader.get(), mft.get(), &decoded_frame))
break;
decode_time += base::Time::Now() - decode_start;
ReleaseOutputBuffer(decoded_frame.get());
}
printf("All done, frames read: %d, frames decoded: %d\n",
mft->frames_read(), mft->frames_decoded());
printf("Took %lldms\n", decode_time.InMilliseconds());
return 0;
}
} // namespace
int main(int argc, char** argv) {
CommandLine::Init(argc, argv);
if (argc == 1) {
fprintf(stderr, "Not enough arguments\n");
usage();
return -1;
}
const CommandLine& cmd_line = *CommandLine::ForCurrentProcess();
if (cmd_line.HasSwitch("help")) {
usage();
return -1;
}
bool use_dxva = cmd_line.HasSwitch("enable-dxva");
std::string input_file = cmd_line.GetSwitchValueASCII("input-file");
if (input_file.empty()) {
fprintf(stderr, "No input file provided\n");
usage();
return -1;
}
printf("enable-dxva: %d\n", use_dxva);
printf("input-file: %s\n", input_file.c_str());
if (!InitFFmpeg()) {
LOG(ERROR) << "InitFFMpeg() failed";
return -1;
}
if (!InitComLibraries()) {
LOG(ERROR) << "InitComLibraries() failed";
return -1;
}
int ret = Run(use_dxva, input_file);
ShutdownComLibraries();
printf("Done\n");
return ret;
}
......@@ -47,8 +47,9 @@ enum PixelFormat {
PixelFormatRgba = 5;
PixelFormatYv12 = 6;
PixelFormatYv16 = 7;
PixelFormatEmpty = 8;
PixelFormatAscii = 9;
PixelFormatNv12 = 8;
PixelFormatEmpty = 9;
PixelFormatAscii = 10;
}
// A message that denotes the beginning of an updating rectangle in an update
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment