Commit aa60d106 authored by Daniel Bratell's avatar Daniel Bratell Committed by Commit Bot

Gave two kBufferSize constants unique names.

In some (extreme) jumbo configurations on Mac two kBufferSize
constants ended up in the same translation unit and caused
a compilation error. This patch gives them unique names to
not cause any future problems.

Change-Id: I96d5e51024ff99fcb838346726aa1635127841eb
Reviewed-on: https://chromium-review.googlesource.com/978209
Commit-Queue: Kent Tamura <tkent@chromium.org>
Reviewed-by: default avatarKent Tamura <tkent@chromium.org>
Cr-Commit-Position: refs/heads/master@{#545720}
parent 95767e0e
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
namespace blink { namespace blink {
#if defined(OS_MACOSX) #if defined(OS_MACOSX)
const int kBufferSize = 1024; const int kBiquadBufferSize = 1024;
#endif #endif
// Compute 10^x = exp(x*log(10)) // Compute 10^x = exp(x*log(10))
...@@ -54,8 +54,8 @@ static double pow10(double x) { ...@@ -54,8 +54,8 @@ static double pow10(double x) {
Biquad::Biquad() : has_sample_accurate_values_(false) { Biquad::Biquad() : has_sample_accurate_values_(false) {
#if defined(OS_MACOSX) #if defined(OS_MACOSX)
// Allocate two samples more for filter history // Allocate two samples more for filter history
input_buffer_.Allocate(kBufferSize + 2); input_buffer_.Allocate(kBiquadBufferSize + 2);
output_buffer_.Allocate(kBufferSize + 2); output_buffer_.Allocate(kBiquadBufferSize + 2);
#endif #endif
// Allocate enough space for the a-rate filter coefficients to handle a // Allocate enough space for the a-rate filter coefficients to handle a
...@@ -209,12 +209,12 @@ void Biquad::ProcessFast(const float* source_p, ...@@ -209,12 +209,12 @@ void Biquad::ProcessFast(const float* source_p,
double* input2p = input_p + 2; double* input2p = input_p + 2;
double* output2p = output_p + 2; double* output2p = output_p + 2;
// Break up processing into smaller slices (kBufferSize) if necessary. // Break up processing into smaller slices (kBiquadBufferSize) if necessary.
int n = frames_to_process; int n = frames_to_process;
while (n > 0) { while (n > 0) {
int frames_this_time = n < kBufferSize ? n : kBufferSize; int frames_this_time = n < kBiquadBufferSize ? n : kBiquadBufferSize;
// Copy input to input buffer // Copy input to input buffer
for (int i = 0; i < frames_this_time; ++i) for (int i = 0; i < frames_this_time; ++i)
......
...@@ -116,19 +116,19 @@ PNGImageReader::~PNGImageReader() { ...@@ -116,19 +116,19 @@ PNGImageReader::~PNGImageReader() {
// Pre-conditions before using this: // Pre-conditions before using this:
// - |reader|.size() >= |read_offset| + |length| // - |reader|.size() >= |read_offset| + |length|
// - |buffer|.size() >= |length| // - |buffer|.size() >= |length|
// - |length| <= |kBufferSize| // - |length| <= |kPngReadBufferSize|
// //
// The reason for the last two precondition is that currently the png signature // The reason for the last two precondition is that currently the png signature
// plus IHDR chunk (8B + 25B = 33B) is the largest chunk that is read using this // plus IHDR chunk (8B + 25B = 33B) is the largest chunk that is read using this
// method. If the data is not consecutive, it is stored in |buffer|, which must // method. If the data is not consecutive, it is stored in |buffer|, which must
// have the size of (at least) |length|, but there's no need for it to be larger // have the size of (at least) |length|, but there's no need for it to be larger
// than |kBufferSize|. // than |kPngReadBufferSize|.
static constexpr size_t kBufferSize = 33; static constexpr size_t kPngReadBufferSize = 33;
const png_byte* ReadAsConstPngBytep(const FastSharedBufferReader& reader, const png_byte* ReadAsConstPngBytep(const FastSharedBufferReader& reader,
size_t read_offset, size_t read_offset,
size_t length, size_t length,
char* buffer) { char* buffer) {
DCHECK(length <= kBufferSize); DCHECK(length <= kPngReadBufferSize);
return reinterpret_cast<const png_byte*>( return reinterpret_cast<const png_byte*>(
reader.GetConsecutiveData(read_offset, length, buffer)); reader.GetConsecutiveData(read_offset, length, buffer));
} }
...@@ -397,7 +397,7 @@ bool PNGImageReader::Parse(SegmentReader& data, ParseQuery query) { ...@@ -397,7 +397,7 @@ bool PNGImageReader::Parse(SegmentReader& data, ParseQuery query) {
// libpng for processing. A frame is registered on the next fcTL chunk or // libpng for processing. A frame is registered on the next fcTL chunk or
// when the IEND chunk is found. This ensures that only complete frames are // when the IEND chunk is found. This ensures that only complete frames are
// reported, unless there is an error in the stream. // reported, unless there is an error in the stream.
char read_buffer[kBufferSize]; char read_buffer[kPngReadBufferSize];
while (reader.size() >= read_offset_ + 8) { while (reader.size() >= read_offset_ + 8) {
const png_byte* chunk = const png_byte* chunk =
ReadAsConstPngBytep(reader, read_offset_, 8, read_buffer); ReadAsConstPngBytep(reader, read_offset_, 8, read_buffer);
...@@ -513,7 +513,7 @@ bool PNGImageReader::ParseSize(const FastSharedBufferReader& reader) { ...@@ -513,7 +513,7 @@ bool PNGImageReader::ParseSize(const FastSharedBufferReader& reader) {
if (decoder_->IsDecodedSizeAvailable()) if (decoder_->IsDecodedSizeAvailable())
return true; return true;
char read_buffer[kBufferSize]; char read_buffer[kPngReadBufferSize];
if (setjmp(JMPBUF(png_))) if (setjmp(JMPBUF(png_)))
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment