Commit 7c035fee authored by vmpstr's avatar vmpstr Committed by Commit bot

media: Change auto to not deduce raw pointers.

This patch updates the code to prevent auto deducing to a raw pointer.

R=dalecurtis@chromium.org
BUG=554600

Review-Url: https://codereview.chromium.org/2181163002
Cr-Commit-Position: refs/heads/master@{#407699}
parent b6b53857
...@@ -35,7 +35,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { ...@@ -35,7 +35,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) {
base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]),
numbers.size(), &kCFTypeArrayCallBacks)); numbers.size(), &kCFTypeArrayCallBacks));
for (auto& number : numbers) { for (auto* number : numbers) {
CFRelease(number); CFRelease(number);
} }
return array; return array;
...@@ -49,7 +49,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegerAndFloat(int int_val, ...@@ -49,7 +49,7 @@ base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegerAndFloat(int int_val,
base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
kCFAllocatorDefault, reinterpret_cast<const void**>(numbers.data()), kCFAllocatorDefault, reinterpret_cast<const void**>(numbers.data()),
numbers.size(), &kCFTypeArrayCallBacks)); numbers.size(), &kCFTypeArrayCallBacks));
for (auto& number : numbers) for (auto* number : numbers)
CFRelease(number); CFRelease(number);
return array; return array;
} }
...@@ -142,9 +142,9 @@ bool CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, ...@@ -142,9 +142,9 @@ bool CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
OSStatus status; OSStatus status;
// Get the sample buffer's block buffer and format description. // Get the sample buffer's block buffer and format description.
auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); auto* bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
DCHECK(bb); DCHECK(bb);
auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); auto* fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
DCHECK(fdesc); DCHECK(fdesc);
size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
......
...@@ -542,7 +542,7 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase { ...@@ -542,7 +542,7 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
source_offset * sizeof(float) % AudioBus::kChannelAlignment == 0) { source_offset * sizeof(float) % AudioBus::kChannelAlignment == 0) {
DCHECK_EQ(buffer_fill_offset, 0); DCHECK_EQ(buffer_fill_offset, 0);
for (int ch = 0; ch < audio_bus->channels(); ++ch) { for (int ch = 0; ch < audio_bus->channels(); ++ch) {
auto samples = const_cast<float*>(audio_bus->channel(ch)); auto* samples = const_cast<float*>(audio_bus->channel(ch));
input_bus_->SetChannelData(ch, samples + source_offset); input_bus_->SetChannelData(ch, samples + source_offset);
} }
return; return;
...@@ -605,9 +605,9 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase { ...@@ -605,9 +605,9 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
AudioStreamPacketDescription** out_packet_desc, AudioStreamPacketDescription** out_packet_desc,
void* in_encoder) { void* in_encoder) {
DCHECK(in_encoder); DCHECK(in_encoder);
auto encoder = reinterpret_cast<AppleAacImpl*>(in_encoder); auto* encoder = reinterpret_cast<AppleAacImpl*>(in_encoder);
auto input_buffer = encoder->input_buffer_.get(); auto* input_buffer = encoder->input_buffer_.get();
auto input_bus = encoder->input_bus_.get(); auto* input_bus = encoder->input_bus_.get();
DCHECK_EQ(static_cast<int>(*io_num_packets), kAccessUnitSamples); DCHECK_EQ(static_cast<int>(*io_num_packets), kAccessUnitSamples);
DCHECK_EQ(io_data->mNumberBuffers, DCHECK_EQ(io_data->mNumberBuffers,
...@@ -644,8 +644,8 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase { ...@@ -644,8 +644,8 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
UInt32* out_size) { UInt32* out_size) {
DCHECK(in_encoder); DCHECK(in_encoder);
DCHECK(in_buffer); DCHECK(in_buffer);
auto encoder = reinterpret_cast<const AppleAacImpl*>(in_encoder); auto* encoder = reinterpret_cast<const AppleAacImpl*>(in_encoder);
auto buffer = reinterpret_cast<const std::string::value_type*>(in_buffer); auto* buffer = reinterpret_cast<const std::string::value_type*>(in_buffer);
std::string* const output_buffer = encoder->output_buffer_; std::string* const output_buffer = encoder->output_buffer_;
DCHECK(output_buffer); DCHECK(output_buffer);
......
...@@ -183,7 +183,7 @@ H264VideoToolboxEncoder::H264VideoToolboxEncoder( ...@@ -183,7 +183,7 @@ H264VideoToolboxEncoder::H264VideoToolboxEncoder(
weak_factory_.GetWeakPtr(), cast_environment_)); weak_factory_.GetWeakPtr(), cast_environment_));
// Register for power state changes. // Register for power state changes.
auto power_monitor = base::PowerMonitor::Get(); auto* power_monitor = base::PowerMonitor::Get();
if (power_monitor) { if (power_monitor) {
power_monitor->AddObserver(this); power_monitor->AddObserver(this);
VLOG(1) << "Registered for power state changes."; VLOG(1) << "Registered for power state changes.";
...@@ -200,7 +200,7 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { ...@@ -200,7 +200,7 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
// If video_frame_factory_ is not null, the encoder registered for power state // If video_frame_factory_ is not null, the encoder registered for power state
// changes in the ctor and it must now unregister. // changes in the ctor and it must now unregister.
if (video_frame_factory_) { if (video_frame_factory_) {
auto power_monitor = base::PowerMonitor::Get(); auto* power_monitor = base::PowerMonitor::Get();
if (power_monitor) if (power_monitor)
power_monitor->RemoveObserver(this); power_monitor->RemoveObserver(this);
} }
...@@ -253,7 +253,7 @@ void H264VideoToolboxEncoder::ResetCompressionSession() { ...@@ -253,7 +253,7 @@ void H264VideoToolboxEncoder::ResetCompressionSession() {
video_toolbox::DictionaryWithKeysAndValues( video_toolbox::DictionaryWithKeysAndValues(
buffer_attributes_keys, buffer_attributes_values, buffer_attributes_keys, buffer_attributes_values,
arraysize(buffer_attributes_keys)); arraysize(buffer_attributes_keys));
for (auto& v : buffer_attributes_values) for (auto* v : buffer_attributes_values)
CFRelease(v); CFRelease(v);
// Create the compression session. // Create the compression session.
...@@ -515,7 +515,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, ...@@ -515,7 +515,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
OSStatus status, OSStatus status,
VTEncodeInfoFlags info, VTEncodeInfoFlags info,
CMSampleBufferRef sbuf) { CMSampleBufferRef sbuf) {
auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); auto* encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
const std::unique_ptr<InProgressFrameEncode> request( const std::unique_ptr<InProgressFrameEncode> request(
reinterpret_cast<InProgressFrameEncode*>(request_opaque)); reinterpret_cast<InProgressFrameEncode*>(request_opaque));
bool keyframe = false; bool keyframe = false;
...@@ -529,7 +529,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, ...@@ -529,7 +529,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
} else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) { } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
DVLOG(2) << " frame dropped"; DVLOG(2) << " frame dropped";
} else { } else {
auto sample_attachments = auto* sample_attachments =
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
0)); 0));
......
...@@ -177,10 +177,10 @@ void CreateFrameAndMemsetPlane(VideoFrameFactory* const video_frame_factory) { ...@@ -177,10 +177,10 @@ void CreateFrameAndMemsetPlane(VideoFrameFactory* const video_frame_factory) {
video_frame_factory->MaybeCreateFrame( video_frame_factory->MaybeCreateFrame(
gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()); gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta());
ASSERT_TRUE(video_frame.get()); ASSERT_TRUE(video_frame.get());
auto cv_pixel_buffer = video_frame->cv_pixel_buffer(); auto* cv_pixel_buffer = video_frame->cv_pixel_buffer();
ASSERT_TRUE(cv_pixel_buffer); ASSERT_TRUE(cv_pixel_buffer);
CVPixelBufferLockBaseAddress(cv_pixel_buffer, 0); CVPixelBufferLockBaseAddress(cv_pixel_buffer, 0);
auto ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0); auto* ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0);
ASSERT_TRUE(ptr); ASSERT_TRUE(ptr);
memset(ptr, 0xfe, CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) * memset(ptr, 0xfe, CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) *
CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0)); CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0));
......
...@@ -1437,7 +1437,7 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { ...@@ -1437,7 +1437,7 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr; DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr;
} }
auto gl_context = get_gl_context_cb_.Run(); auto* gl_context = get_gl_context_cb_.Run();
RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false); RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false);
// The decoder should use DX11 iff // The decoder should use DX11 iff
......
...@@ -382,7 +382,7 @@ void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque, ...@@ -382,7 +382,7 @@ void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
// one that calls VTCompressionSessionEncodeFrame. // one that calls VTCompressionSessionEncodeFrame.
DVLOG(3) << __FUNCTION__; DVLOG(3) << __FUNCTION__;
auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque); auto* encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
DCHECK(encoder); DCHECK(encoder);
// InProgressFrameEncode holds timestamp information of the encoded frame. // InProgressFrameEncode holds timestamp information of the encoded frame.
...@@ -441,10 +441,11 @@ void VTVideoEncodeAccelerator::ReturnBitstreamBuffer( ...@@ -441,10 +441,11 @@ void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
return; return;
} }
auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( auto* sample_attachments =
CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray( static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
encode_output->sample_buffer.get(), true), CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
0)); encode_output->sample_buffer.get(), true),
0));
const bool keyframe = !CFDictionaryContainsKey( const bool keyframe = !CFDictionaryContainsKey(
sample_attachments, CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); sample_attachments, CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
...@@ -481,7 +482,7 @@ bool VTVideoEncodeAccelerator::ResetCompressionSession() { ...@@ -481,7 +482,7 @@ bool VTVideoEncodeAccelerator::ResetCompressionSession() {
const base::ScopedCFTypeRef<CFDictionaryRef> attributes = const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
video_toolbox::DictionaryWithKeysAndValues( video_toolbox::DictionaryWithKeysAndValues(
attributes_keys, attributes_values, arraysize(attributes_keys)); attributes_keys, attributes_values, arraysize(attributes_keys));
for (auto& v : attributes_values) for (auto* v : attributes_values)
CFRelease(v); CFRelease(v);
bool session_rv = bool session_rv =
......
...@@ -106,14 +106,14 @@ void MidiManagerAndroid::OnAttached(JNIEnv* env, ...@@ -106,14 +106,14 @@ void MidiManagerAndroid::OnAttached(JNIEnv* env,
void MidiManagerAndroid::OnDetached(JNIEnv* env, void MidiManagerAndroid::OnDetached(JNIEnv* env,
const JavaParamRef<jobject>& caller, const JavaParamRef<jobject>& caller,
const JavaParamRef<jobject>& raw_device) { const JavaParamRef<jobject>& raw_device) {
for (const auto& device : devices_) { for (auto* device : devices_) {
if (device->HasRawDevice(env, raw_device)) { if (device->HasRawDevice(env, raw_device)) {
for (const auto& port : device->input_ports()) { for (auto* port : device->input_ports()) {
DCHECK(input_port_to_index_.end() != input_port_to_index_.find(port)); DCHECK(input_port_to_index_.end() != input_port_to_index_.find(port));
size_t index = input_port_to_index_[port]; size_t index = input_port_to_index_[port];
SetInputPortState(index, MIDI_PORT_DISCONNECTED); SetInputPortState(index, MIDI_PORT_DISCONNECTED);
} }
for (const auto& port : device->output_ports()) { for (auto* port : device->output_ports()) {
DCHECK(output_port_to_index_.end() != output_port_to_index_.find(port)); DCHECK(output_port_to_index_.end() != output_port_to_index_.find(port));
size_t index = output_port_to_index_[port]; size_t index = output_port_to_index_[port];
SetOutputPortState(index, MIDI_PORT_DISCONNECTED); SetOutputPortState(index, MIDI_PORT_DISCONNECTED);
...@@ -123,7 +123,7 @@ void MidiManagerAndroid::OnDetached(JNIEnv* env, ...@@ -123,7 +123,7 @@ void MidiManagerAndroid::OnDetached(JNIEnv* env,
} }
void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) { void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) {
for (auto& port : device->input_ports()) { for (auto* port : device->input_ports()) {
// We implicitly open input ports here, because there are no signal // We implicitly open input ports here, because there are no signal
// from the renderer when to open. // from the renderer when to open.
// TODO(yhirano): Implement open operation in Blink. // TODO(yhirano): Implement open operation in Blink.
...@@ -142,7 +142,7 @@ void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) { ...@@ -142,7 +142,7 @@ void MidiManagerAndroid::AddDevice(std::unique_ptr<MidiDeviceAndroid> device) {
device->GetProductName(), device->GetProductName(),
device->GetDeviceVersion(), state)); device->GetDeviceVersion(), state));
} }
for (const auto& port : device->output_ports()) { for (auto* port : device->output_ports()) {
const size_t index = all_output_ports_.size(); const size_t index = all_output_ports_.size();
all_output_ports_.push_back(port); all_output_ports_.push_back(port);
......
...@@ -445,7 +445,7 @@ class MidiServiceWinImpl : public MidiServiceWin, ...@@ -445,7 +445,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
input_devices.push_back(it.first); input_devices.push_back(it.first);
} }
{ {
for (const auto handle : input_devices) { for (auto* handle : input_devices) {
MMRESULT result = midiInClose(handle); MMRESULT result = midiInClose(handle);
if (result == MIDIERR_STILLPLAYING) { if (result == MIDIERR_STILLPLAYING) {
result = midiInReset(handle); result = midiInReset(handle);
...@@ -466,7 +466,7 @@ class MidiServiceWinImpl : public MidiServiceWin, ...@@ -466,7 +466,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
output_devices.push_back(it.first); output_devices.push_back(it.first);
} }
{ {
for (const auto handle : output_devices) { for (auto* handle : output_devices) {
MMRESULT result = midiOutClose(handle); MMRESULT result = midiOutClose(handle);
if (result == MIDIERR_STILLPLAYING) { if (result == MIDIERR_STILLPLAYING) {
result = midiOutReset(handle); result = midiOutReset(handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment