Commit 2a6ea0f9 authored by Anand K. Mistry's avatar Anand K. Mistry Committed by Commit Bot

Move constants out of headers and into implementation files.

As a result, also use std::unique_ptr<char[]> instead of char[] class
members. This makes zip archiver more closely follow chromium style.

BUG=889703

Change-Id: I551557958beff280e344134f15be20a406fb5ea8
Reviewed-on: https://chromium-review.googlesource.com/1242643Reviewed-by: default avatarNoel Gordon <noel@chromium.org>
Commit-Queue: Anand Mistry <amistry@chromium.org>
Cr-Commit-Position: refs/heads/master@{#594621}
parent ecb6e0bb
......@@ -7,6 +7,7 @@
#include <algorithm>
#include <cerrno>
#include <cstring>
#include <utility>
#include "base/time/time.h"
#include "chrome/browser/resources/chromeos/zip_archiver/cpp/compressor_io_javascript_stream.h"
......@@ -15,6 +16,13 @@
namespace {
const char kCreateArchiveError[] = "Failed to create archive.";
const char kAddToArchiveError[] = "Failed to add entry to archive.";
const char kCloseArchiveError[] = "Failed to close archive.";
// We need at least 256KB for MiniZip.
const int64_t kMaximumDataChunkSize = 512 * 1024;
uint32_t UnixToDosdate(const int64_t datetime) {
tm tm_datetime;
localtime_r(&datetime, &tm_datetime);
......@@ -122,15 +130,11 @@ CompressorArchiveMinizip::CompressorArchiveMinizip(
: CompressorArchive(compressor_stream),
compressor_stream_(compressor_stream),
zip_file_(nullptr),
destination_buffer_(std::make_unique<char[]>(kMaximumDataChunkSize)),
offset_(0),
length_(0) {
destination_buffer_ =
new char[compressor_stream_constants::kMaximumDataChunkSize];
}
length_(0) {}
CompressorArchiveMinizip::~CompressorArchiveMinizip() {
delete destination_buffer_;
}
CompressorArchiveMinizip::~CompressorArchiveMinizip() = default;
bool CompressorArchiveMinizip::CreateArchive() {
// Set up archive object.
......@@ -147,7 +151,7 @@ bool CompressorArchiveMinizip::CreateArchive() {
zip_file_ = zipOpen2(nullptr /* pathname */, APPEND_STATUS_CREATE,
nullptr /* globalcomment */, &zip_funcs);
if (!zip_file_) {
set_error_message(compressor_archive_constants::kCreateArchiveError);
set_error_message(kCreateArchiveError);
return false /* Error */;
}
return true /* Success */;
......@@ -204,7 +208,7 @@ bool CompressorArchiveMinizip::AddToArchive(const std::string& filename,
LANGUAGE_ENCODING_FLAG); // flagBase
if (open_result != ZIP_OK) {
CloseArchive(true /* has_error */);
set_error_message(compressor_archive_constants::kAddToArchiveError);
set_error_message(kAddToArchiveError);
return false /* Error */;
}
......@@ -212,12 +216,11 @@ bool CompressorArchiveMinizip::AddToArchive(const std::string& filename,
if (!is_directory) {
int64_t remaining_size = file_size;
while (remaining_size > 0) {
int64_t chunk_size = std::min(
remaining_size, compressor_stream_constants::kMaximumDataChunkSize);
int64_t chunk_size = std::min(remaining_size, kMaximumDataChunkSize);
PP_DCHECK(chunk_size > 0);
int64_t read_bytes =
compressor_stream_->Read(chunk_size, destination_buffer_);
compressor_stream_->Read(chunk_size, destination_buffer_.get());
// Negative read_bytes indicates an error occurred when reading chunks.
// 0 just means there is no more data available, but here we need positive
// length of bytes, so this is also an error here.
......@@ -230,8 +233,8 @@ bool CompressorArchiveMinizip::AddToArchive(const std::string& filename,
break;
}
if (zipWriteInFileInZip(zip_file_, destination_buffer_, read_bytes) !=
ZIP_OK) {
if (zipWriteInFileInZip(zip_file_, destination_buffer_.get(),
read_bytes) != ZIP_OK) {
has_error = true;
break;
}
......@@ -244,7 +247,7 @@ bool CompressorArchiveMinizip::AddToArchive(const std::string& filename,
if (has_error) {
CloseArchive(true /* has_error */);
set_error_message(compressor_archive_constants::kAddToArchiveError);
set_error_message(kAddToArchiveError);
return false /* Error */;
}
......@@ -258,12 +261,12 @@ bool CompressorArchiveMinizip::AddToArchive(const std::string& filename,
bool CompressorArchiveMinizip::CloseArchive(bool has_error) {
if (zipClose(zip_file_, nullptr /* global_comment */) != ZIP_OK) {
set_error_message(compressor_archive_constants::kCloseArchiveError);
set_error_message(kCloseArchiveError);
return false /* Error */;
}
if (!has_error) {
if (compressor_stream()->Flush() < 0) {
set_error_message(compressor_archive_constants::kCloseArchiveError);
set_error_message(kCloseArchiveError);
return false /* Error */;
}
}
......
......@@ -5,6 +5,7 @@
#ifndef CHROME_BROWSER_RESOURCES_CHROMEOS_ZIP_ARCHIVER_CPP_COMPRESSOR_ARCHIVE_MINIZIP_H_
#define CHROME_BROWSER_RESOURCES_CHROMEOS_ZIP_ARCHIVER_CPP_COMPRESSOR_ARCHIVE_MINIZIP_H_
#include <memory>
#include <string>
#include "chrome/browser/resources/chromeos/zip_archiver/cpp/compressor_archive.h"
......@@ -13,15 +14,6 @@
class CompressorStream;
// A namespace with constants used by CompressorArchiveMinizip.
namespace compressor_archive_constants {
const char kCreateArchiveError[] = "Failed to create archive.";
const char kAddToArchiveError[] = "Failed to add entry to archive.";
const char kCloseArchiveError[] = "Failed to close archive.";
} // namespace compressor_archive_constants
// A name space with custom functions passed to minizip.
namespace compressor_archive_functions {
......@@ -98,7 +90,7 @@ class CompressorArchiveMinizip : public CompressorArchive {
zipFile zip_file_;
// The buffer used to store the data read from JavaScript.
char* destination_buffer_;
std::unique_ptr<char[]> destination_buffer_;
// The current offset of the zip archive file.
int64_t offset_;
......
......@@ -11,6 +11,13 @@
#include "chrome/browser/resources/chromeos/zip_archiver/cpp/javascript_compressor_requestor_interface.h"
#include "ppapi/cpp/logging.h"
namespace {
// We need at least 256KB for MiniZip.
const int64_t kMaximumDataChunkSize = 512 * 1024;
} // namespace
CompressorIOJavaScriptStream::CompressorIOJavaScriptStream(
JavaScriptCompressorRequestorInterface* requestor)
: requestor_(requestor), buffer_offset_(-1), buffer_data_length_(0) {
......@@ -20,7 +27,7 @@ CompressorIOJavaScriptStream::CompressorIOJavaScriptStream(
pthread_mutex_lock(&shared_state_lock_);
available_data_ = false;
buffer_ = new char[compressor_stream_constants::kMaximumDataChunkSize];
buffer_ = new char[kMaximumDataChunkSize];
pthread_mutex_unlock(&shared_state_lock_);
}
......@@ -91,8 +98,7 @@ int64_t CompressorIOJavaScriptStream::Write(int64_t zip_offset,
// the buffer, and then cache the data in the buffer.
if (buffer_offset_ >= 0 && /* 1 */
(current_offset != buffer_offset_ + buffer_data_length_ || /* 2 */
buffer_data_length_ + left_length >
compressor_stream_constants::kMaximumDataChunkSize) && /* 3 */
buffer_data_length_ + left_length > kMaximumDataChunkSize) && /* 3 */
(current_offset < buffer_offset_ ||
buffer_offset_ + buffer_data_length_ <
current_offset + left_length) /* 4 */) {
......@@ -104,8 +110,7 @@ int64_t CompressorIOJavaScriptStream::Write(int64_t zip_offset,
}
// How many bytes we should copy to buffer_ in this iteration.
int64_t copy_length = std::min(
left_length, compressor_stream_constants::kMaximumDataChunkSize);
int64_t copy_length = std::min(left_length, kMaximumDataChunkSize);
// Set up the buffer_offset_ if the buffer_ has no data.
if (buffer_offset_ == -1 /* initial state */)
buffer_offset_ = current_offset;
......
......@@ -18,12 +18,6 @@
class JavaScriptCompressorRequestorInterface;
// A namespace with constants used by CompressorArchiveMinizip.
namespace compressor_stream_constants {
// We need at least 256KB for MiniZip.
const int64_t kMaximumDataChunkSize = 512 * 1024;
} // namespace compressor_stream_constants
class CompressorIOJavaScriptStream : public CompressorStream {
public:
CompressorIOJavaScriptStream(
......
......@@ -16,6 +16,38 @@
namespace {
const char kArchiveOpenError[] = "Failed to open archive.";
const char kArchiveNextHeaderError[] =
"Failed to open current file in archive.";
const char kArchiveReadDataError[] = "Failed to read archive data.";
const char kArchiveReadFreeError[] = "Failed to close archive.";
// The size of the buffer used to skip unnecessary data. Should be positive and
// UINT16_MAX or less. unzReadCurrentFile in third_party/minizip/src/unzip.c
// supports to read a data up to UINT16_MAX at a time.
const int64_t kDummyBufferSize = UINT16_MAX; // ~64 KB
// The size of the buffer used by ReadInProgress to decompress data. Should be
// positive and UINT16_MAX or less. unzReadCurrentFile in
// third_party/minizip/src/unzip.c supports to read a data up to UINT16_MAX at a
// time.
const int64_t kDecompressBufferSize = UINT16_MAX; // ~64 KB.
// The maximum data chunk size for VolumeReader::Read requests.
// Should be positive.
const int64_t kMaximumDataChunkSize = 512 * 1024; // 512 KB.
// The minimum data chunk size for VolumeReader::Read requests.
// Should be positive.
const int64_t kMinimumDataChunkSize = 32 * 1024; // 16 KB.
// Maximum length of filename in zip archive.
const int kZipMaxPath = 256;
// The size of the static cache. We need at least 64KB to cache whole
// 'end of central directory' data.
const int64_t kStaticCacheSize = 128 * 1024;
base::Time::Exploded ExplodeDosdate(uint32_t dos_timedate) {
base::Time::Exploded exploded_time = {};
exploded_time.year = 1980 + ((dos_timedate >> 25) & 0x7f);
......@@ -45,12 +77,11 @@ int64_t DynamicCache(VolumeArchiveMinizip* archive, int64_t unzip_size) {
return -1 /* Error */;
}
int64_t bytes_to_read =
std::min(volume_archive_constants::kMaximumDataChunkSize,
int64_t bytes_to_read = std::min(kMaximumDataChunkSize,
archive->reader()->archive_size() - offset);
PP_DCHECK(bytes_to_read > 0);
int64_t left_length = bytes_to_read;
char* buffer_pointer = archive->dynamic_cache_;
char* buffer_pointer = archive->dynamic_cache_.get();
const void* destination_buffer;
do {
......@@ -88,7 +119,8 @@ uint32_t CustomArchiveRead(void* archive,
if (offset >= archive_minizip->static_cache_offset_) {
// Relative offset in the central directory.
int64_t offset_in_cache = offset - archive_minizip->static_cache_offset_;
memcpy(buffer, archive_minizip->static_cache_ + offset_in_cache, size);
memcpy(buffer, archive_minizip->static_cache_.get() + offset_in_cache,
size);
if (archive_minizip->reader()->Seek(static_cast<int64_t>(size),
ZLIB_FILEFUNC_SEEK_CUR) < 0) {
return -1 /* Error */;
......@@ -117,7 +149,8 @@ uint32_t CustomArchiveRead(void* archive,
int64_t copy_length = std::min(
left_length, archive_minizip->dynamic_cache_size_ - offset_in_cache);
memcpy(unzip_buffer_pointer,
archive_minizip->dynamic_cache_ + offset_in_cache, copy_length);
archive_minizip->dynamic_cache_.get() + offset_in_cache,
copy_length);
unzip_buffer_pointer += copy_length;
left_length -= copy_length;
if (archive_minizip->reader()->Seek(static_cast<int64_t>(copy_length),
......@@ -173,15 +206,20 @@ std::unique_ptr<std::string> GetPassphrase(
VolumeArchiveMinizip::VolumeArchiveMinizip(VolumeReader* reader)
: VolumeArchive(reader),
reader_data_size_(volume_archive_constants::kMinimumDataChunkSize),
reader_data_size_(kMinimumDataChunkSize),
zip_file_(nullptr),
dynamic_cache_(std::make_unique<char[]>(kMaximumDataChunkSize)),
dynamic_cache_offset_(0),
dynamic_cache_size_(0),
static_cache_(std::make_unique<char[]>(kStaticCacheSize)),
static_cache_offset_(0),
static_cache_size_(0),
last_read_data_offset_(0),
last_read_data_length_(0),
dummy_buffer_(std::make_unique<char[]>(kDummyBufferSize)),
decompressed_data_(nullptr),
decompressed_data_buffer_(
std::make_unique<char[]>(kDecompressBufferSize)),
decompressed_data_size_(0),
decompressed_error_(false) {}
......@@ -203,15 +241,14 @@ bool VolumeArchiveMinizip::Init(const std::string& encoding) {
// Load maximum static_cache_size_ bytes from the end of the archive to
// static_cache_.
static_cache_size_ = std::min(volume_archive_constants::kStaticCacheSize,
reader()->archive_size());
static_cache_size_ = std::min(kStaticCacheSize, reader()->archive_size());
int64_t previous_offset = reader()->offset();
char* buffer_pointer = static_cache_;
char* buffer_pointer = static_cache_.get();
int64_t left_length = static_cache_size_;
static_cache_offset_ =
std::max(reader()->archive_size() - static_cache_size_, 0LL);
if (reader()->Seek(static_cache_offset_, ZLIB_FILEFUNC_SEEK_SET) < 0) {
set_error_message(volume_archive_constants::kArchiveOpenError);
set_error_message(kArchiveOpenError);
return false /* Error */;
}
do {
......@@ -224,13 +261,13 @@ bool VolumeArchiveMinizip::Init(const std::string& encoding) {
// Set the offset to the original position.
if (reader()->Seek(previous_offset, ZLIB_FILEFUNC_SEEK_SET) < 0) {
set_error_message(volume_archive_constants::kArchiveOpenError);
set_error_message(kArchiveOpenError);
return false /* Error */;
}
zip_file_ = unzOpen2(nullptr /* filename */, &zip_funcs);
if (!zip_file_) {
set_error_message(volume_archive_constants::kArchiveOpenError);
set_error_message(kArchiveOpenError);
return false;
}
......@@ -246,7 +283,7 @@ VolumeArchive::Result VolumeArchiveMinizip::GetCurrentFileInfo(
// Headers are being read from the central directory (in the ZIP format), so
// use a large block size to save on IPC calls. The headers in EOCD are
// grouped one by one.
reader_data_size_ = volume_archive_constants::kMaximumDataChunkSize;
reader_data_size_ = kMaximumDataChunkSize;
// Reset to 0 for new VolumeArchive::ReadData operation.
last_read_data_offset_ = 0;
......@@ -254,13 +291,13 @@ VolumeArchive::Result VolumeArchiveMinizip::GetCurrentFileInfo(
unz_file_pos position = {};
if (unzGetFilePos(zip_file_, &position) != UNZ_OK) {
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return VolumeArchive::RESULT_FAIL;
}
// Get the information of the opened file.
unz_file_info raw_file_info = {};
char raw_file_name_in_zip[volume_archive_constants::kZipMaxPath] = {};
char raw_file_name_in_zip[kZipMaxPath] = {};
const int result =
unzGetCurrentFileInfo(zip_file_, &raw_file_info, raw_file_name_in_zip,
sizeof(raw_file_name_in_zip) - 1,
......@@ -270,7 +307,7 @@ VolumeArchive::Result VolumeArchiveMinizip::GetCurrentFileInfo(
0); // commentBufferSize.
if (result != UNZ_OK || raw_file_name_in_zip[0] == '\0') {
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return VolumeArchive::RESULT_FAIL;
}
*pathname = std::string(raw_file_name_in_zip);
......@@ -302,7 +339,7 @@ VolumeArchive::Result VolumeArchiveMinizip::GoToNextFile() {
if (return_value == UNZ_OK)
return VolumeArchive::RESULT_SUCCESS;
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return VolumeArchive::RESULT_FAIL;
}
......@@ -315,19 +352,19 @@ bool VolumeArchiveMinizip::SeekHeader(const std::string& path_name) {
// sensitive.
if (unzLocateFile(zip_file_, path_name.c_str(),
nullptr /* filename_compare_func */) != UNZ_OK) {
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return false;
}
unz_file_info raw_file_info = {};
char raw_file_name_in_zip[volume_archive_constants::kZipMaxPath] = {};
char raw_file_name_in_zip[kZipMaxPath] = {};
if (unzGetCurrentFileInfo(zip_file_, &raw_file_info, raw_file_name_in_zip,
sizeof(raw_file_name_in_zip) - 1,
nullptr, // extraField.
0, // extraFieldBufferSize.
nullptr, // szComment.
0) != UNZ_OK) {
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return false;
}
......@@ -362,7 +399,7 @@ bool VolumeArchiveMinizip::SeekHeader(const std::string& path_name) {
}
if (open_result != UNZ_OK) {
set_error_message(volume_archive_constants::kArchiveNextHeaderError);
set_error_message(kArchiveNextHeaderError);
return false;
}
......@@ -377,8 +414,7 @@ void VolumeArchiveMinizip::DecompressData(int64_t offset, int64_t length) {
// Requests with offset smaller than last read offset are not supported.
if (offset < last_read_data_offset_) {
set_error_message(
std::string(volume_archive_constants::kArchiveReadDataError));
set_error_message(std::string(kArchiveReadDataError));
decompressed_error_ = true;
return;
}
......@@ -393,23 +429,21 @@ void VolumeArchiveMinizip::DecompressData(int64_t offset, int64_t length) {
// offset - last_read_data_offset_, kMaximumDataChunkSize in case the former
// is too big or kMinimumDataChunkSize in case its too small and we might
// end up with too many IPCs.
reader_data_size_ =
std::max(std::min(offset - last_read_data_offset_,
volume_archive_constants::kMaximumDataChunkSize),
volume_archive_constants::kMinimumDataChunkSize);
reader_data_size_ = std::max(
std::min(offset - last_read_data_offset_, kMaximumDataChunkSize),
kMinimumDataChunkSize);
// No need for an offset in dummy_buffer as it will be ignored anyway.
// archive_read_data receives size_t as length parameter, but we limit it to
// volume_archive_constants::kDummyBufferSize which is positive and less
// kDummyBufferSize which is positive and less
// than size_t maximum. So conversion from int64_t to size_t is safe here.
size = unzReadCurrentFile(
zip_file_, dummy_buffer_,
std::min(offset - last_read_data_offset_,
volume_archive_constants::kDummyBufferSize));
zip_file_, dummy_buffer_.get(),
std::min(offset - last_read_data_offset_, kDummyBufferSize));
PP_DCHECK(size != 0); // The actual read is done below. We shouldn't get to
// end of file here.
if (size < 0) { // Error.
set_error_message(volume_archive_constants::kArchiveReadDataError);
set_error_message(kArchiveReadDataError);
decompressed_error_ = true;
return;
}
......@@ -418,29 +452,27 @@ void VolumeArchiveMinizip::DecompressData(int64_t offset, int64_t length) {
// Do not decompress more bytes than we can store internally. The
// kDecompressBufferSize limit is used to avoid huge memory usage.
int64_t left_length =
std::min(length, volume_archive_constants::kDecompressBufferSize);
int64_t left_length = std::min(length, kDecompressBufferSize);
// ReadData will call CustomArchiveRead when calling archive_read_data. The
// read should be done with a value similar to length, which is the requested
// number of bytes, or kMaximumDataChunkSize / kMinimumDataChunkSize
// in case length is too big or too small.
reader_data_size_ =
std::max(std::min(static_cast<int64_t>(left_length),
volume_archive_constants::kMaximumDataChunkSize),
volume_archive_constants::kMinimumDataChunkSize);
reader_data_size_ = std::max(
std::min(static_cast<int64_t>(left_length), kMaximumDataChunkSize),
kMinimumDataChunkSize);
// Perform the actual copy.
int64_t bytes_read = 0;
do {
// archive_read_data receives size_t as length parameter, but we limit it to
// volume_archive_constants::kMinimumDataChunkSize (see left_length
// kMinimumDataChunkSize (see left_length
// initialization), which is positive and less than size_t maximum.
// So conversion from int64_t to size_t is safe here.
size = unzReadCurrentFile(zip_file_, decompressed_data_buffer_ + bytes_read,
left_length);
size = unzReadCurrentFile(
zip_file_, decompressed_data_buffer_.get() + bytes_read, left_length);
if (size < 0) { // Error.
set_error_message(volume_archive_constants::kArchiveReadDataError);
set_error_message(kArchiveReadDataError);
decompressed_error_ = true;
return;
}
......@@ -452,7 +484,7 @@ void VolumeArchiveMinizip::DecompressData(int64_t offset, int64_t length) {
// beginning of the buffer. VolumeArchiveMinizip::ConsumeData is used
// to preserve the bytes that are decompressed but not required by
// VolumeArchiveMinizip::ReadData.
decompressed_data_ = decompressed_data_buffer_;
decompressed_data_ = decompressed_data_buffer_.get();
decompressed_data_size_ = bytes_read;
}
......@@ -460,7 +492,7 @@ bool VolumeArchiveMinizip::Cleanup() {
bool returnValue = true;
if (zip_file_) {
if (unzClose(zip_file_) != UNZ_OK) {
set_error_message(volume_archive_constants::kArchiveReadFreeError);
set_error_message(kArchiveReadFreeError);
returnValue = false;
}
}
......@@ -486,7 +518,7 @@ int64_t VolumeArchiveMinizip::ReadData(int64_t offset,
// Decompressed failed.
if (decompressed_error_) {
set_error_message(volume_archive_constants::kArchiveReadDataError);
set_error_message(kArchiveReadDataError);
return -1 /* Error */;
}
......@@ -502,8 +534,7 @@ int64_t VolumeArchiveMinizip::ReadData(int64_t offset,
last_read_data_offset_ += read_bytes;
PP_DCHECK(decompressed_data_ + decompressed_data_size_ <=
decompressed_data_buffer_ +
volume_archive_constants::kDecompressBufferSize);
decompressed_data_buffer_.get() + kDecompressBufferSize);
return read_bytes;
}
......
......@@ -14,46 +14,6 @@
#include "chrome/browser/resources/chromeos/zip_archiver/cpp/volume_archive.h"
// A namespace with constants used by VolumeArchiveMinizip.
namespace volume_archive_constants {
const char kArchiveReadNewError[] = "Could not allocate archive.";
const char kFileNotFound[] = "File not found for read data request.";
const char kVolumeReaderError[] = "VolumeReader failed to retrieve data.";
const char kArchiveOpenError[] = "Failed to open archive.";
const char kArchiveNextHeaderError[] =
"Failed to open current file in archive.";
const char kArchiveReadDataError[] = "Failed to read archive data.";
const char kArchiveReadFreeError[] = "Failed to close archive.";
// The size of the buffer used to skip unnecessary data. Should be positive and
// UINT16_MAX or less. unzReadCurrentFile in third_party/minizip/src/unzip.c
// supports to read a data up to UINT16_MAX at a time.
const int64_t kDummyBufferSize = UINT16_MAX; // ~64 KB
// The size of the buffer used by ReadInProgress to decompress data. Should be
// positive and UINT16_MAX or less. unzReadCurrentFile in
// third_party/minizip/src/unzip.c supports to read a data up to UINT16_MAX at a
// time.
const int64_t kDecompressBufferSize = UINT16_MAX; // ~64 KB.
// The maximum data chunk size for VolumeReader::Read requests.
// Should be positive.
const int64_t kMaximumDataChunkSize = 512 * 1024; // 512 KB.
// The minimum data chunk size for VolumeReader::Read requests.
// Should be positive.
const int64_t kMinimumDataChunkSize = 32 * 1024; // 16 KB.
// Maximum length of filename in zip archive.
const int kZipMaxPath = 256;
// The size of the static cache. We need at least 64KB to cache whole
// 'end of central directory' data.
const int64_t kStaticCacheSize = 128 * 1024;
} // namespace volume_archive_constants
class VolumeArchiveMinizip;
// A namespace with custom functions passed to minizip.
......@@ -148,7 +108,7 @@ class VolumeArchiveMinizip : public VolumeArchive {
// chunk is small, we load larger size of bytes from the archive and cache
// them in dynamic_cache_. If the range of the next requested chunk is within
// the cache, we don't read the archive and just return the data in the cache.
char dynamic_cache_[volume_archive_constants::kMaximumDataChunkSize];
std::unique_ptr<char[]> dynamic_cache_;
// The offset from which dynamic_cache_ has the data of the archive.
int64_t dynamic_cache_offset_;
......@@ -163,7 +123,7 @@ class VolumeArchiveMinizip : public VolumeArchive {
// cache a certain length of data from the end into static_cache_. The data
// in this buffer is also used when the data in the central directory is
// requested by MiniZip later.
char static_cache_[volume_archive_constants::kStaticCacheSize];
std::unique_ptr<char[]> static_cache_;
// The offset from which static_cache_ has the data of the archive.
int64_t static_cache_offset_;
......@@ -197,7 +157,7 @@ class VolumeArchiveMinizip : public VolumeArchive {
// offsets different from last_read_data_offset_. In this case some bytes
// must be skipped. Because seeking is not possible inside compressed files,
// the bytes will be discarded using this buffer.
char dummy_buffer_[volume_archive_constants::kDummyBufferSize];
std::unique_ptr<char[]> dummy_buffer_;
// The address where the decompressed data starting from
// decompressed_offset_ is stored. It should point to a valid location
......@@ -207,8 +167,7 @@ class VolumeArchiveMinizip : public VolumeArchive {
char* decompressed_data_;
// The actual buffer that contains the decompressed data.
char decompressed_data_buffer_
[volume_archive_constants::kDecompressBufferSize];
std::unique_ptr<char[]> decompressed_data_buffer_;
// The size of valid data starting from decompressed_data_ that is stored
// inside decompressed_data_buffer_.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment