Commit 7c035fee authored by vmpstr's avatar vmpstr Committed by Commit bot

media: Change auto to not deduce raw pointers.

This patch updates the code to prevent auto deducing to a raw pointer.

R=dalecurtis@chromium.org
BUG=554600

Review-Url: https://codereview.chromium.org/2181163002
Cr-Commit-Position: refs/heads/master@{#407699}
parent b6b53857
......@@ -35,7 +35,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) {
base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]),
numbers.size(), &kCFTypeArrayCallBacks));
for (auto& number : numbers) {
for (auto* number : numbers) {
CFRelease(number);
}
return array;
......@@ -49,7 +49,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegerAndFloat(int int_val,
base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
kCFAllocatorDefault, reinterpret_cast<const void**>(numbers.data()),
numbers.size(), &kCFTypeArrayCallBacks));
for (auto& number : numbers)
for (auto* number : numbers)
CFRelease(number);
return array;
}
......@@ -142,9 +142,9 @@ bool CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
OSStatus status;
// Get the sample buffer's block buffer and format description.
auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
auto* bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
DCHECK(bb);
auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
auto* fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
DCHECK(fdesc);
size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
......
......@@ -542,7 +542,7 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
source_offset * sizeof(float) % AudioBus::kChannelAlignment == 0) {
DCHECK_EQ(buffer_fill_offset, 0);
for (int ch = 0; ch < audio_bus->channels(); ++ch) {
auto samples = const_cast<float*>(audio_bus->channel(ch));
auto* samples = const_cast<float*>(audio_bus->channel(ch));
input_bus_->SetChannelData(ch, samples + source_offset);
}
return;
......@@ -605,9 +605,9 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
AudioStreamPacketDescription** out_packet_desc,
void* in_encoder) {
DCHECK(in_encoder);
auto encoder = reinterpret_cast<AppleAacImpl*>(in_encoder);
auto input_buffer = encoder->input_buffer_.get();
auto input_bus = encoder->input_bus_.get();
auto* encoder = reinterpret_cast<AppleAacImpl*>(in_encoder);
auto* input_buffer = encoder->input_buffer_.get();
auto* input_bus = encoder->input_bus_.get();
DCHECK_EQ(static_cast<int>(*io_num_packets), kAccessUnitSamples);
DCHECK_EQ(io_data->mNumberBuffers,
......@@ -644,8 +644,8 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
UInt32* out_size) {
DCHECK(in_encoder);
DCHECK(in_buffer);
auto encoder = reinterpret_cast<const AppleAacImpl*>(in_encoder);
auto buffer = reinterpret_cast<const std::string::value_type*>(in_buffer);
auto* encoder = reinterpret_cast<const AppleAacImpl*>(in_encoder);
auto* buffer = reinterpret_cast<const std::string::value_type*>(in_buffer);
std::string* const output_buffer = encoder->output_buffer_;
DCHECK(output_buffer);
......
......@@ -183,7 +183,7 @@ H264VideoToolboxEncoder::H264VideoToolboxEncoder(
weak_factory_.GetWeakPtr(), cast_environment_));
// Register for power state changes.
auto power_monitor = base::PowerMonitor::Get();
auto* power_monitor = base::PowerMonitor::Get();
if (power_monitor) {
power_monitor->AddObserver(this);
VLOG(1) << "Registered for power state changes.";
......@@ -200,7 +200,7 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
// If video_frame_factory_ is not null, the encoder registered for power state
// changes in the ctor and it must now unregister.
if (video_frame_factory_) {
auto power_monitor = base::PowerMonitor::Get();
auto* power_monitor = base::PowerMonitor::Get();
if (power_monitor)
power_monitor->RemoveObserver(this);
}
......@@ -253,7 +253,7 @@ void H264VideoToolboxEncoder::ResetCompressionSession() {
video_toolbox::DictionaryWithKeysAndValues(
buffer_attributes_keys, buffer_attributes_values,
arraysize(buffer_attributes_keys));
for (auto& v : buffer_attributes_values)
for (auto* v : buffer_attributes_values)
CFRelease(v);
// Create the compression session.
......@@ -515,7 +515,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
OSStatus status,
VTEncodeInfoFlags info,
CMSampleBufferRef sbuf) {
auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
auto* encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
const std::unique_ptr<InProgressFrameEncode> request(
reinterpret_cast<InProgressFrameEncode*>(request_opaque));
bool keyframe = false;
......@@ -529,7 +529,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
} else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
DVLOG(2) << " frame dropped";
} else {
auto sample_attachments =
auto* sample_attachments =
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
0));
......
......@@ -177,10 +177,10 @@ void CreateFrameAndMemsetPlane(VideoFrameFactory* const video_frame_factory) {
video_frame_factory->MaybeCreateFrame(
gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta());
ASSERT_TRUE(video_frame.get());
auto cv_pixel_buffer = video_frame->cv_pixel_buffer();
auto* cv_pixel_buffer = video_frame->cv_pixel_buffer();
ASSERT_TRUE(cv_pixel_buffer);
CVPixelBufferLockBaseAddress(cv_pixel_buffer, 0);
auto ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0);
auto* ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0);
ASSERT_TRUE(ptr);
memset(ptr, 0xfe, CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) *
CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0));
......
......@@ -1437,7 +1437,7 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr;
}
auto gl_context = get_gl_context_cb_.Run();
auto* gl_context = get_gl_context_cb_.Run();
RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false);
// The decoder should use DX11 iff
......
......@@ -382,7 +382,7 @@ void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
// one that calls VTCompressionSessionEncodeFrame.
DVLOG(3) << __FUNCTION__;
auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
auto* encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
DCHECK(encoder);
// InProgressFrameEncode holds timestamp information of the encoded frame.
......@@ -441,10 +441,11 @@ void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
return;
}
auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
encode_output->sample_buffer.get(), true),
0));
auto* sample_attachments =
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
encode_output->sample_buffer.get(), true),
0));
const bool keyframe = !CFDictionaryContainsKey(
sample_attachments, CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
......@@ -481,7 +482,7 @@ bool VTVideoEncodeAccelerator::ResetCompressionSession() {
const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
video_toolbox::DictionaryWithKeysAndValues(
attributes_keys, attributes_values, arraysize(attributes_keys));
for (auto& v : attributes_values)
for (auto* v : attributes_values)
CFRelease(v);
bool session_rv =
......
......@@ -106,14 +106,14 @@ void MidiManagerAndroid::OnAttached(JNIEnv* env,
void MidiManagerAndroid::OnDetached(JNIEnv* env,
const JavaParamRef<jobject>& caller,
const JavaParamRef<jobject>& raw_device) {
for (const auto& device : devices_) {
for (auto* device : devices_) {
if (device->HasRawDevice(env, raw_device)) {
for (const auto& port : device->input_ports()) {
for (auto* port : device->input_ports()) {
DCHECK(input_port_to_index_.end() != input_port_to_index_.find(port));
size_t index = input_port_to_index_[port];
SetInputPortState(index, MIDI_PORT_DISCONNECTED);
}
for (const auto& port : device->output_ports()) {
for (auto* port : device->output_ports()) {
DCHECK(output_port_to_index_.end() != output_port_to_index_.find(port));
size_t index = output_port_to_index_[port];
SetOutputPortState(index, MIDI_PORT_DISCONNECTED);
......@@ -123,7 +123,7 @@ void MidiManagerAndroid::OnDetached(JNIEnv* env,
}
void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) {
for (auto& port : device->input_ports()) {
for (auto* port : device->input_ports()) {
// We implicitly open input ports here, because there are no signal
// from the renderer when to open.
// TODO(yhirano): Implement open operation in Blink.
......@@ -142,7 +142,7 @@ void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) {
device->GetProductName(),
device->GetDeviceVersion(), state));
}
for (const auto& port : device->output_ports()) {
for (auto* port : device->output_ports()) {
const size_t index = all_output_ports_.size();
all_output_ports_.push_back(port);
......
......@@ -445,7 +445,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
input_devices.push_back(it.first);
}
{
for (const auto handle : input_devices) {
for (auto* handle : input_devices) {
MMRESULT result = midiInClose(handle);
if (result == MIDIERR_STILLPLAYING) {
result = midiInReset(handle);
......@@ -466,7 +466,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
output_devices.push_back(it.first);
}
{
for (const auto handle : output_devices) {
for (auto* handle : output_devices) {
MMRESULT result = midiOutClose(handle);
if (result == MIDIERR_STILLPLAYING) {
result = midiOutReset(handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment