Commit ade347f5 authored by agrieve's avatar agrieve Committed by Commit Bot

Add deduplication logic to .pak files

Now, when multiple entries contain the same content, multiple
table-of-contents entries will be created, but only one data region.

As of now, en-US.pak has ~3200 entries, and 350 of them are duplicates.

For MonochromePublic.apk, shrinks uncompressed .pak size by ~130kb, and
compressed .pak size by 32kb.

BUG=738566

Review-Url: https://codereview.chromium.org/2969123002
Cr-Commit-Position: refs/heads/master@{#488215}
parent a8360926
...@@ -562,12 +562,17 @@ def PrintPakAnalysis(apk_filename, min_pak_resource_size): ...@@ -562,12 +562,17 @@ def PrintPakAnalysis(apk_filename, min_pak_resource_size):
# Calculate aggregate stats about resources across pak files. # Calculate aggregate stats about resources across pak files.
resource_count_map = collections.defaultdict(int) resource_count_map = collections.defaultdict(int)
resource_size_map = collections.defaultdict(int) resource_size_map = collections.defaultdict(int)
seen_data_ids = set()
alias_overhead_bytes = 4
resource_overhead_bytes = 6 resource_overhead_bytes = 6
for pak in paks: for pak in paks:
for r in pak.resources: for k, v in pak.resources.iteritems():
resource_count_map[r] += 1 resource_count_map[k] += 1
resource_size_map[r] += len(pak.resources[r]) + resource_overhead_bytes if id(v) not in seen_data_ids:
seen_data_ids.add(id(v))
resource_size_map[k] += resource_overhead_bytes + len(v)
else:
resource_size_map[k] += alias_overhead_bytes
# Output the overall resource summary. # Output the overall resource summary.
total_resource_size = sum(resource_size_map.values()) total_resource_size = sum(resource_size_map.values())
total_resource_count = len(resource_count_map) total_resource_count = len(resource_count_map)
......
...@@ -21,9 +21,7 @@ from grit.node import message ...@@ -21,9 +21,7 @@ from grit.node import message
from grit.node import structure from grit.node import structure
PACK_FILE_VERSION = 4 PACK_FILE_VERSION = 5
HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) and
# one uint8 (encoding of text resources)
BINARY, UTF8, UTF16 = range(3) BINARY, UTF8, UTF16 = range(3)
...@@ -31,6 +29,10 @@ class WrongFileVersion(Exception): ...@@ -31,6 +29,10 @@ class WrongFileVersion(Exception):
pass pass
class CorruptDataPack(Exception):
pass
DataPackContents = collections.namedtuple( DataPackContents = collections.namedtuple(
'DataPackContents', 'resources encoding') 'DataPackContents', 'resources encoding')
...@@ -49,56 +51,100 @@ def Format(root, lang='en', output_dir='.'): ...@@ -49,56 +51,100 @@ def Format(root, lang='en', output_dir='.'):
def ReadDataPack(input_file): def ReadDataPack(input_file):
return ReadDataPackFromString(util.ReadFile(input_file, util.BINARY))
def ReadDataPackFromString(data):
"""Reads a data pack file and returns a dictionary.""" """Reads a data pack file and returns a dictionary."""
data = util.ReadFile(input_file, util.BINARY)
original_data = data original_data = data
# Read the header. # Read the header.
version, num_entries, encoding = struct.unpack('<IIB', data[:HEADER_LENGTH]) version = struct.unpack('<I', data[:4])[0]
if version != PACK_FILE_VERSION: if version == 4:
print 'Wrong file version in ', input_file resource_count, encoding = struct.unpack('<IB', data[4:9])
raise WrongFileVersion alias_count = 0
data = data[9:]
elif version == 5:
encoding, resource_count, alias_count = struct.unpack('<BxxxHH', data[4:12])
data = data[12:]
else:
raise WrongFileVersion('Found version: ' + str(version))
resources = {} resources = {}
if num_entries == 0:
return DataPackContents(resources, encoding)
# Read the index and data.
data = data[HEADER_LENGTH:]
kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32. kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32.
for _ in range(num_entries): def entry_at_index(idx):
id, offset = struct.unpack('<HI', data[:kIndexEntrySize]) offset = idx * kIndexEntrySize
data = data[kIndexEntrySize:] return struct.unpack('<HI', data[offset:offset + kIndexEntrySize])
next_id, next_offset = struct.unpack('<HI', data[:kIndexEntrySize])
resources[id] = original_data[offset:next_offset] prev_resource_id, prev_offset = entry_at_index(0)
for i in xrange(1, resource_count + 1):
resource_id, offset = entry_at_index(i)
resources[prev_resource_id] = original_data[prev_offset:offset]
prev_resource_id, prev_offset = resource_id, offset
# Read the alias table.
alias_data = data[(resource_count + 1) * kIndexEntrySize:]
kAliasEntrySize = 2 + 2 # uint16, uint16
def alias_at_index(idx):
offset = idx * kAliasEntrySize
return struct.unpack('<HH', alias_data[offset:offset + kAliasEntrySize])
for i in xrange(alias_count):
resource_id, index = alias_at_index(i)
aliased_id = entry_at_index(index)[0]
resources[resource_id] = resources[aliased_id]
return DataPackContents(resources, encoding) return DataPackContents(resources, encoding)
def WriteDataPackToString(resources, encoding): def WriteDataPackToString(resources, encoding):
"""Returns a string with a map of id=>data in the data pack format.""" """Returns a string with a map of id=>data in the data pack format."""
ids = sorted(resources.keys())
ret = [] ret = []
# Write file header. # Compute alias map.
ret.append(struct.pack('<IIB', PACK_FILE_VERSION, len(ids), encoding)) resource_ids = sorted(resources)
HEADER_LENGTH = 2 * 4 + 1 # Two uint32s and one uint8. # Use reversed() so that for duplicates lower IDs clobber higher ones.
id_by_data = {resources[k]: k for k in reversed(resource_ids)}
# Each entry is a uint16 + a uint32s. We have one extra entry for the last # Map of resource_id -> resource_id, where value < key.
# item. alias_map = {k: id_by_data[v] for k, v in resources.iteritems()
index_length = (len(ids) + 1) * (2 + 4) if id_by_data[v] != k}
# Write index.
data_offset = HEADER_LENGTH + index_length
for id in ids:
ret.append(struct.pack('<HI', id, data_offset))
data_offset += len(resources[id])
# Write file header.
resource_count = len(resources) - len(alias_map)
# Padding bytes added for alignment.
ret.append(struct.pack('<IBxxxHH', PACK_FILE_VERSION, encoding,
resource_count, len(alias_map)))
HEADER_LENGTH = 4 + 4 + 2 + 2
# Each main table entry is: uint16 + uint32 (and an extra entry at the end).
# Each alias table entry is: uint16 + uint16.
data_offset = HEADER_LENGTH + (resource_count + 1) * 6 + len(alias_map) * 4
# Write main table.
index_by_id = {}
deduped_data = []
index = 0
for resource_id in resource_ids:
if resource_id in alias_map:
continue
data = resources[resource_id]
index_by_id[resource_id] = index
ret.append(struct.pack('<HI', resource_id, data_offset))
data_offset += len(data)
deduped_data.append(data)
index += 1
assert index == resource_count
# Add an extra entry at the end.
ret.append(struct.pack('<HI', 0, data_offset)) ret.append(struct.pack('<HI', 0, data_offset))
# Write alias table.
for resource_id in sorted(alias_map):
index = index_by_id[alias_map[resource_id]]
ret.append(struct.pack('<HH', resource_id, index))
# Write data. # Write data.
for id in ids: ret.extend(deduped_data)
ret.append(resources[id])
return ''.join(ret) return ''.join(ret)
......
...@@ -17,8 +17,8 @@ from grit.format import data_pack ...@@ -17,8 +17,8 @@ from grit.format import data_pack
class FormatDataPackUnittest(unittest.TestCase): class FormatDataPackUnittest(unittest.TestCase):
def testWriteDataPack(self): def testReadDataPackV4(self):
expected = ( expected_data = (
'\x04\x00\x00\x00' # header(version '\x04\x00\x00\x00' # header(version
'\x04\x00\x00\x00' # no. entries, '\x04\x00\x00\x00' # no. entries,
'\x01' # encoding) '\x01' # encoding)
...@@ -28,9 +28,42 @@ class FormatDataPackUnittest(unittest.TestCase): ...@@ -28,9 +28,42 @@ class FormatDataPackUnittest(unittest.TestCase):
'\x0a\x00\x3f\x00\x00\x00' # index entry 10 '\x0a\x00\x3f\x00\x00\x00' # index entry 10
'\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last '\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last
'this is id 4this is id 6') # data 'this is id 4this is id 6') # data
input = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''} expected_resources = {
output = data_pack.WriteDataPackToString(input, data_pack.UTF8) 1: '',
self.failUnless(output == expected) 4: 'this is id 4',
6: 'this is id 6',
10: '',
}
expected_data_pack = data_pack.DataPackContents(
expected_resources, data_pack.UTF8)
loaded = data_pack.ReadDataPackFromString(expected_data)
self.assertEquals(loaded, expected_data_pack)
def testReadWriteDataPackV5(self):
expected_data = (
'\x05\x00\x00\x00' # version
'\x01\x00\x00\x00' # encoding & padding
'\x03\x00' # resource_count
'\x01\x00' # alias_count
'\x01\x00\x28\x00\x00\x00' # index entry 1
'\x04\x00\x28\x00\x00\x00' # index entry 4
'\x06\x00\x34\x00\x00\x00' # index entry 6
'\x00\x00\x40\x00\x00\x00' # extra entry for the size of last
'\x0a\x00\x01\x00' # alias table
'this is id 4this is id 6') # data
expected_resources = {
1: '',
4: 'this is id 4',
6: 'this is id 6',
10: 'this is id 4',
}
data = data_pack.WriteDataPackToString(expected_resources, data_pack.UTF8)
self.assertEquals(data, expected_data)
expected_data_pack = data_pack.DataPackContents(
expected_resources, data_pack.UTF8)
loaded = data_pack.ReadDataPackFromString(expected_data)
self.assertEquals(loaded, expected_data_pack)
def testRePackUnittest(self): def testRePackUnittest(self):
expected_with_whitelist = { expected_with_whitelist = {
...@@ -50,12 +83,14 @@ class FormatDataPackUnittest(unittest.TestCase): ...@@ -50,12 +83,14 @@ class FormatDataPackUnittest(unittest.TestCase):
in inputs] in inputs]
# RePack using whitelist # RePack using whitelist
output, _ = data_pack.RePackFromDataPackStrings(inputs, whitelist) output, _ = data_pack.RePackFromDataPackStrings(
inputs, whitelist, suppress_removed_key_output=True)
self.assertDictEqual(expected_with_whitelist, output, self.assertDictEqual(expected_with_whitelist, output,
'Incorrect resource output') 'Incorrect resource output')
# RePack a None whitelist # RePack a None whitelist
output, _ = data_pack.RePackFromDataPackStrings(inputs, None) output, _ = data_pack.RePackFromDataPackStrings(
inputs, None, suppress_removed_key_output=True)
self.assertDictEqual(expected_without_whitelist, output, self.assertDictEqual(expected_without_whitelist, output,
'Incorrect resource output') 'Incorrect resource output')
......
...@@ -33,6 +33,7 @@ def _ExtractMain(args): ...@@ -33,6 +33,7 @@ def _ExtractMain(args):
def _PrintMain(args): def _PrintMain(args):
pak = data_pack.ReadDataPack(args.pak_file) pak = data_pack.ReadDataPack(args.pak_file)
id_map = {id(v): k for k, v in sorted(pak.resources.items(), reverse=True)}
encoding = 'binary' encoding = 'binary'
if pak.encoding == 1: if pak.encoding == 1:
encoding = 'utf-8' encoding = 'utf-8'
...@@ -57,8 +58,13 @@ def _PrintMain(args): ...@@ -57,8 +58,13 @@ def _PrintMain(args):
except UnicodeDecodeError: except UnicodeDecodeError:
pass pass
sha1 = hashlib.sha1(data).hexdigest()[:10] sha1 = hashlib.sha1(data).hexdigest()[:10]
canonical_id = id_map[id(data)]
if resource_id == canonical_id:
line = u'Entry(id={}, len={}, sha1={}): {}'.format( line = u'Entry(id={}, len={}, sha1={}): {}'.format(
resource_id, len(data), sha1, desc) resource_id, len(data), sha1, desc)
else:
line = u'Entry(id={}, alias_of={}): {}'.format(
resource_id, canonical_id, desc)
print line.encode('utf-8') print line.encode('utf-8')
......
...@@ -788,6 +788,7 @@ test("ui_base_unittests") { ...@@ -788,6 +788,7 @@ test("ui_base_unittests") {
"material_design/material_design_controller_unittest.cc", "material_design/material_design_controller_unittest.cc",
"models/tree_node_iterator_unittest.cc", "models/tree_node_iterator_unittest.cc",
"resource/data_pack_literal.cc", "resource/data_pack_literal.cc",
"resource/data_pack_literal.h",
"resource/data_pack_unittest.cc", "resource/data_pack_unittest.cc",
"resource/resource_bundle_unittest.cc", "resource/resource_bundle_unittest.cc",
"resource/scale_factor_unittest.cc", "resource/scale_factor_unittest.cc",
......
...@@ -24,31 +24,14 @@ ...@@ -24,31 +24,14 @@
namespace { namespace {
static const uint32_t kFileFormatVersion = 4; static const uint32_t kFileFormatV4 = 4;
// Length of file header: version, entry count and text encoding type. static const uint32_t kFileFormatV5 = 5;
static const size_t kHeaderLength = 2 * sizeof(uint32_t) + sizeof(uint8_t); // int32(version), int32(resource_count), int8(encoding)
static const size_t kHeaderLengthV4 = 2 * sizeof(uint32_t) + sizeof(uint8_t);
#pragma pack(push, 2) // int32(version), int8(encoding), 3 bytes padding,
struct DataPackEntry { // int16(resource_count), int16(alias_count)
uint16_t resource_id; static const size_t kHeaderLengthV5 =
uint32_t file_offset; sizeof(uint32_t) + sizeof(uint8_t) * 4 + sizeof(uint16_t) * 2;
static int CompareById(const void* void_key, const void* void_entry) {
uint16_t key = *reinterpret_cast<const uint16_t*>(void_key);
const DataPackEntry* entry =
reinterpret_cast<const DataPackEntry*>(void_entry);
if (key < entry->resource_id) {
return -1;
} else if (key > entry->resource_id) {
return 1;
} else {
return 0;
}
}
};
#pragma pack(pop)
static_assert(sizeof(DataPackEntry) == 6, "size of entry must be six");
// We're crashing when trying to load a pak file on Windows. Add some error // We're crashing when trying to load a pak file on Windows. Add some error
// codes for logging. // codes for logging.
...@@ -102,6 +85,30 @@ void MaybePrintResourceId(uint16_t resource_id) { ...@@ -102,6 +85,30 @@ void MaybePrintResourceId(uint16_t resource_id) {
namespace ui { namespace ui {
#pragma pack(push, 2)
struct DataPack::Entry {
uint16_t resource_id;
uint32_t file_offset;
static int CompareById(const void* void_key, const void* void_entry) {
uint16_t key = *reinterpret_cast<const uint16_t*>(void_key);
const Entry* entry = reinterpret_cast<const Entry*>(void_entry);
return key - entry->resource_id;
}
};
struct DataPack::Alias {
uint16_t resource_id;
uint16_t entry_index;
static int CompareById(const void* void_key, const void* void_entry) {
uint16_t key = *reinterpret_cast<const uint16_t*>(void_key);
const Alias* entry = reinterpret_cast<const Alias*>(void_entry);
return key - entry->resource_id;
}
};
#pragma pack(pop)
// Abstraction of a data source (memory mapped file or in-memory buffer). // Abstraction of a data source (memory mapped file or in-memory buffer).
class DataPack::DataSource { class DataPack::DataSource {
public: public:
...@@ -149,9 +156,15 @@ class DataPack::BufferDataSource : public DataPack::DataSource { ...@@ -149,9 +156,15 @@ class DataPack::BufferDataSource : public DataPack::DataSource {
}; };
DataPack::DataPack(ui::ScaleFactor scale_factor) DataPack::DataPack(ui::ScaleFactor scale_factor)
: resource_count_(0), : resource_table_(nullptr),
resource_count_(0),
alias_table_(nullptr),
alias_count_(0),
text_encoding_type_(BINARY), text_encoding_type_(BINARY),
scale_factor_(scale_factor) { scale_factor_(scale_factor) {
// Static assert must be within a DataPack member to appease visiblity rules.
static_assert(sizeof(Entry) == 6, "size of Entry must be 6");
static_assert(sizeof(Alias) == 4, "size of Alias must be 4");
} }
DataPack::~DataPack() { DataPack::~DataPack() {
...@@ -193,29 +206,37 @@ bool DataPack::LoadFromBuffer(base::StringPiece buffer) { ...@@ -193,29 +206,37 @@ bool DataPack::LoadFromBuffer(base::StringPiece buffer) {
} }
bool DataPack::LoadImpl(std::unique_ptr<DataPack::DataSource> data_source) { bool DataPack::LoadImpl(std::unique_ptr<DataPack::DataSource> data_source) {
// Sanity check the header of the file. const uint8_t* data = data_source->GetData();
if (kHeaderLength > data_source->GetLength()) { size_t data_length = data_source->GetLength();
// Parse the version and check for truncated header.
uint32_t version = 0;
if (data_length > sizeof(version))
version = reinterpret_cast<const uint32_t*>(data)[0];
size_t header_length =
version == kFileFormatV4 ? kHeaderLengthV4 : kHeaderLengthV5;
if (version == 0 || data_length < header_length) {
DLOG(ERROR) << "Data pack file corruption: incomplete file header."; DLOG(ERROR) << "Data pack file corruption: incomplete file header.";
LogDataPackError(HEADER_TRUNCATED); LogDataPackError(HEADER_TRUNCATED);
return false; return false;
} }
// Parse the header of the file. // Parse the header of the file.
// First uint32_t: version; second: resource count; if (version == kFileFormatV4) {
const uint32_t* ptr = resource_count_ = reinterpret_cast<const uint32_t*>(data)[1];
reinterpret_cast<const uint32_t*>(data_source->GetData()); alias_count_ = 0;
uint32_t version = ptr[0]; text_encoding_type_ = static_cast<TextEncodingType>(data[8]);
if (version != kFileFormatVersion) { } else if (version == kFileFormatV5) {
// Version 5 added the alias table and changed the header format.
text_encoding_type_ = static_cast<TextEncodingType>(data[4]);
resource_count_ = reinterpret_cast<const uint16_t*>(data)[4];
alias_count_ = reinterpret_cast<const uint16_t*>(data)[5];
} else {
LOG(ERROR) << "Bad data pack version: got " << version << ", expected " LOG(ERROR) << "Bad data pack version: got " << version << ", expected "
<< kFileFormatVersion; << kFileFormatV4 << " or " << kFileFormatV5;
LogDataPackError(BAD_VERSION); LogDataPackError(BAD_VERSION);
return false; return false;
} }
resource_count_ = ptr[1];
// third: text encoding.
const uint8_t* ptr_encoding = reinterpret_cast<const uint8_t*>(ptr + 2);
text_encoding_type_ = static_cast<TextEncodingType>(*ptr_encoding);
if (text_encoding_type_ != UTF8 && text_encoding_type_ != UTF16 && if (text_encoding_type_ != UTF8 && text_encoding_type_ != UTF16 &&
text_encoding_type_ != BINARY) { text_encoding_type_ != BINARY) {
LOG(ERROR) << "Bad data pack text encoding: got " << text_encoding_type_ LOG(ERROR) << "Bad data pack text encoding: got " << text_encoding_type_
...@@ -227,35 +248,63 @@ bool DataPack::LoadImpl(std::unique_ptr<DataPack::DataSource> data_source) { ...@@ -227,35 +248,63 @@ bool DataPack::LoadImpl(std::unique_ptr<DataPack::DataSource> data_source) {
// Sanity check the file. // Sanity check the file.
// 1) Check we have enough entries. There's an extra entry after the last item // 1) Check we have enough entries. There's an extra entry after the last item
// which gives the length of the last item. // which gives the length of the last item.
if (kHeaderLength + (resource_count_ + 1) * sizeof(DataPackEntry) > size_t resource_table_size = (resource_count_ + 1) * sizeof(Entry);
data_source->GetLength()) { size_t alias_table_size = alias_count_ * sizeof(Alias);
LOG(ERROR) << "Data pack file corruption: too short for number of " if (header_length + resource_table_size + alias_table_size > data_length) {
"entries specified."; LOG(ERROR) << "Data pack file corruption: "
<< "too short for number of entries.";
LogDataPackError(INDEX_TRUNCATED); LogDataPackError(INDEX_TRUNCATED);
return false; return false;
} }
resource_table_ = reinterpret_cast<const Entry*>(&data[header_length]);
alias_table_ = reinterpret_cast<const Alias*>(
&data[header_length + resource_table_size]);
// 2) Verify the entries are within the appropriate bounds. There's an extra // 2) Verify the entries are within the appropriate bounds. There's an extra
// entry after the last item which gives us the length of the last item. // entry after the last item which gives us the length of the last item.
for (size_t i = 0; i < resource_count_ + 1; ++i) { for (size_t i = 0; i < resource_count_ + 1; ++i) {
const DataPackEntry* entry = reinterpret_cast<const DataPackEntry*>( if (resource_table_[i].file_offset > data_length) {
data_source->GetData() + kHeaderLength + (i * sizeof(DataPackEntry))); LOG(ERROR) << "Data pack file corruption: "
if (entry->file_offset > data_source->GetLength()) { << "Entry #" << i << " past end.";
LOG(ERROR) << "Entry #" << i << " in data pack points off end of file. "
<< "Was the file corrupted?";
LogDataPackError(ENTRY_NOT_FOUND); LogDataPackError(ENTRY_NOT_FOUND);
return false; return false;
} }
} }
data_source_ = std::move(data_source); // 3) Verify the aliases are within the appropriate bounds.
for (size_t i = 0; i < alias_count_; ++i) {
if (alias_table_[i].entry_index >= resource_count_) {
LOG(ERROR) << "Data pack file corruption: "
<< "Alias #" << i << " past end.";
LogDataPackError(ENTRY_NOT_FOUND);
return false;
}
}
data_source_ = std::move(data_source);
return true; return true;
} }
const DataPack::Entry* DataPack::LookupEntryById(uint16_t resource_id) const {
// Search the resource table first as most resources will be in there.
const Entry* ret = reinterpret_cast<const Entry*>(
bsearch(&resource_id, resource_table_, resource_count_, sizeof(Entry),
Entry::CompareById));
if (ret == nullptr) {
// Search the alias table for the ~10% of entries which are aliases.
const Alias* alias = reinterpret_cast<const Alias*>(
bsearch(&resource_id, alias_table_, alias_count_, sizeof(Alias),
Alias::CompareById));
if (alias != nullptr) {
ret = &resource_table_[alias->entry_index];
}
}
return ret;
}
bool DataPack::HasResource(uint16_t resource_id) const { bool DataPack::HasResource(uint16_t resource_id) const {
return !!bsearch(&resource_id, data_source_->GetData() + kHeaderLength, return !!LookupEntryById(resource_id);
resource_count_, sizeof(DataPackEntry),
DataPackEntry::CompareById);
} }
bool DataPack::GetStringPiece(uint16_t resource_id, bool DataPack::GetStringPiece(uint16_t resource_id,
...@@ -271,20 +320,19 @@ bool DataPack::GetStringPiece(uint16_t resource_id, ...@@ -271,20 +320,19 @@ bool DataPack::GetStringPiece(uint16_t resource_id,
#error DataPack assumes little endian #error DataPack assumes little endian
#endif #endif
const DataPackEntry* target = reinterpret_cast<const DataPackEntry*>(bsearch( const Entry* target = LookupEntryById(resource_id);
&resource_id, data_source_->GetData() + kHeaderLength, resource_count_, if (!target)
sizeof(DataPackEntry), DataPackEntry::CompareById));
if (!target) {
return false; return false;
}
const DataPackEntry* next_entry = target + 1; const Entry* next_entry = target + 1;
// If the next entry points beyond the end of the file this data pack's entry // If the next entry points beyond the end of the file this data pack's entry
// table is corrupt. Log an error and return false. See // table is corrupt. Log an error and return false. See
// http://crbug.com/371301. // http://crbug.com/371301.
if (next_entry->file_offset > data_source_->GetLength()) { size_t entry_offset =
size_t entry_index = target - reinterpret_cast<const DataPackEntry*>( reinterpret_cast<const uint8_t*>(next_entry) - data_source_->GetData();
data_source_->GetData() + kHeaderLength); size_t pak_size = data_source_->GetLength();
if (entry_offset > pak_size || next_entry->file_offset > pak_size) {
size_t entry_index = target - resource_table_;
LOG(ERROR) << "Entry #" << entry_index << " in data pack points off end " LOG(ERROR) << "Entry #" << entry_index << " in data pack points off end "
<< "of file. This should have been caught when loading. Was the " << "of file. This should have been caught when loading. Was the "
<< "file modified?"; << "file modified?";
...@@ -320,9 +368,7 @@ ui::ScaleFactor DataPack::GetScaleFactor() const { ...@@ -320,9 +368,7 @@ ui::ScaleFactor DataPack::GetScaleFactor() const {
void DataPack::CheckForDuplicateResources( void DataPack::CheckForDuplicateResources(
const std::vector<std::unique_ptr<ResourceHandle>>& packs) { const std::vector<std::unique_ptr<ResourceHandle>>& packs) {
for (size_t i = 0; i < resource_count_ + 1; ++i) { for (size_t i = 0; i < resource_count_ + 1; ++i) {
const DataPackEntry* entry = reinterpret_cast<const DataPackEntry*>( const uint16_t resource_id = resource_table_[i].resource_id;
data_source_->GetData() + kHeaderLength + (i * sizeof(DataPackEntry)));
const uint16_t resource_id = entry->resource_id;
const float resource_scale = GetScaleForScaleFactor(scale_factor_); const float resource_scale = GetScaleForScaleFactor(scale_factor_);
for (const auto& handle : packs) { for (const auto& handle : packs) {
if (GetScaleForScaleFactor(handle->GetScaleFactor()) != resource_scale) if (GetScaleForScaleFactor(handle->GetScaleFactor()) != resource_scale)
...@@ -339,56 +385,44 @@ void DataPack::CheckForDuplicateResources( ...@@ -339,56 +385,44 @@ void DataPack::CheckForDuplicateResources(
bool DataPack::WritePack(const base::FilePath& path, bool DataPack::WritePack(const base::FilePath& path,
const std::map<uint16_t, base::StringPiece>& resources, const std::map<uint16_t, base::StringPiece>& resources,
TextEncodingType textEncodingType) { TextEncodingType textEncodingType) {
FILE* file = base::OpenFile(path, "wb");
if (!file)
return false;
if (fwrite(&kFileFormatVersion, sizeof(kFileFormatVersion), 1, file) != 1) {
LOG(ERROR) << "Failed to write file version";
base::CloseFile(file);
return false;
}
// Note: the python version of this function explicitly sorted keys, but
// std::map is a sorted associative container, we shouldn't have to do that.
uint32_t entry_count = resources.size();
if (fwrite(&entry_count, sizeof(entry_count), 1, file) != 1) {
LOG(ERROR) << "Failed to write entry count";
base::CloseFile(file);
return false;
}
if (textEncodingType != UTF8 && textEncodingType != UTF16 && if (textEncodingType != UTF8 && textEncodingType != UTF16 &&
textEncodingType != BINARY) { textEncodingType != BINARY) {
LOG(ERROR) << "Invalid text encoding type, got " << textEncodingType LOG(ERROR) << "Invalid text encoding type, got " << textEncodingType
<< ", expected between " << BINARY << " and " << UTF16; << ", expected between " << BINARY << " and " << UTF16;
base::CloseFile(file);
return false; return false;
} }
uint8_t write_buffer = static_cast<uint8_t>(textEncodingType); FILE* file = base::OpenFile(path, "wb");
if (fwrite(&write_buffer, sizeof(uint8_t), 1, file) != 1) { if (!file)
LOG(ERROR) << "Failed to write file text resources encoding"; return false;
uint32_t encoding = static_cast<uint32_t>(textEncodingType);
// Note: the python version of this function explicitly sorted keys, but
// std::map is a sorted associative container, we shouldn't have to do that.
uint16_t entry_count = resources.size();
// Don't bother computing aliases (revisit if it becomes worth it).
uint16_t alias_count = 0;
if (fwrite(&kFileFormatV5, sizeof(kFileFormatV5), 1, file) != 1 ||
fwrite(&encoding, sizeof(uint32_t), 1, file) != 1 ||
fwrite(&entry_count, sizeof(entry_count), 1, file) != 1 ||
fwrite(&alias_count, sizeof(alias_count), 1, file) != 1) {
LOG(ERROR) << "Failed to write header";
base::CloseFile(file); base::CloseFile(file);
return false; return false;
} }
// Each entry is a uint16_t + a uint32_t. We have an extra entry after the // Each entry is a uint16_t + a uint32_t. We have an extra entry after the
// last item so we can compute the size of the list item. // last item so we can compute the size of the list item.
uint32_t index_length = (entry_count + 1) * sizeof(DataPackEntry); uint32_t index_length = (entry_count + 1) * sizeof(Entry);
uint32_t data_offset = kHeaderLength + index_length; uint32_t data_offset = kHeaderLengthV5 + index_length;
for (std::map<uint16_t, base::StringPiece>::const_iterator it = for (std::map<uint16_t, base::StringPiece>::const_iterator it =
resources.begin(); resources.begin();
it != resources.end(); ++it) { it != resources.end(); ++it) {
uint16_t resource_id = it->first; uint16_t resource_id = it->first;
if (fwrite(&resource_id, sizeof(resource_id), 1, file) != 1) { if (fwrite(&resource_id, sizeof(resource_id), 1, file) != 1 ||
LOG(ERROR) << "Failed to write id for " << resource_id; fwrite(&data_offset, sizeof(data_offset), 1, file) != 1) {
base::CloseFile(file); LOG(ERROR) << "Failed to write entry for " << resource_id;
return false;
}
if (fwrite(&data_offset, sizeof(data_offset), 1, file) != 1) {
LOG(ERROR) << "Failed to write offset for " << resource_id;
base::CloseFile(file); base::CloseFile(file);
return false; return false;
} }
......
...@@ -75,6 +75,8 @@ class UI_DATA_PACK_EXPORT DataPack : public ResourceHandle { ...@@ -75,6 +75,8 @@ class UI_DATA_PACK_EXPORT DataPack : public ResourceHandle {
#endif #endif
private: private:
struct Entry;
struct Alias;
class DataSource; class DataSource;
class BufferDataSource; class BufferDataSource;
class MemoryMappedDataSource; class MemoryMappedDataSource;
...@@ -82,11 +84,14 @@ class UI_DATA_PACK_EXPORT DataPack : public ResourceHandle { ...@@ -82,11 +84,14 @@ class UI_DATA_PACK_EXPORT DataPack : public ResourceHandle {
// Does the actual loading of a pack file. // Does the actual loading of a pack file.
// Called by Load and LoadFromFile and LoadFromBuffer. // Called by Load and LoadFromFile and LoadFromBuffer.
bool LoadImpl(std::unique_ptr<DataSource> data_source); bool LoadImpl(std::unique_ptr<DataSource> data_source);
const Entry* LookupEntryById(uint16_t resource_id) const;
std::unique_ptr<DataSource> data_source_; std::unique_ptr<DataSource> data_source_;
// Number of resources in the data. const Entry* resource_table_;
size_t resource_count_; size_t resource_count_;
const Alias* alias_table_;
size_t alias_count_;
// Type of encoding for text resources. // Type of encoding for text resources.
TextEncodingType text_encoding_type_; TextEncodingType text_encoding_type_;
......
...@@ -4,9 +4,11 @@ ...@@ -4,9 +4,11 @@
#include <stddef.h> #include <stddef.h>
#include "ui/base/resource/data_pack_literal.h"
namespace ui { namespace ui {
extern const char kSamplePakContents[] = { const char kSamplePakContentsV4[] = {
0x04, 0x00, 0x00, 0x00, // header(version 0x04, 0x00, 0x00, 0x00, // header(version
0x04, 0x00, 0x00, 0x00, // no. entries 0x04, 0x00, 0x00, 0x00, // no. entries
0x01, // encoding) 0x01, // encoding)
...@@ -16,12 +18,25 @@ extern const char kSamplePakContents[] = { ...@@ -16,12 +18,25 @@ extern const char kSamplePakContents[] = {
0x0a, 0x00, 0x3f, 0x00, 0x00, 0x00, // index entry 10 0x0a, 0x00, 0x3f, 0x00, 0x00, 0x00, // index entry 10
0x00, 0x00, 0x3f, 0x00, 0x00, 0x00, // extra entry for the size of last 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00, // extra entry for the size of last
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4',
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6' 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6'};
};
const size_t kSamplePakSizeV4 = sizeof(kSamplePakContentsV4);
const char kSamplePakContentsV5[] = {
0x05, 0x00, 0x00, 0x00, // version
0x01, 0x00, 0x00, 0x00, // encoding + padding
0x03, 0x00, 0x01, 0x00, // num_resources, num_aliases
0x01, 0x00, 0x28, 0x00, 0x00, 0x00, // index entry 1
0x04, 0x00, 0x28, 0x00, 0x00, 0x00, // index entry 4
0x06, 0x00, 0x34, 0x00, 0x00, 0x00, // index entry 6
0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // extra entry for the size of last
0x0a, 0x00, 0x01, 0x00, // alias table
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4',
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6'};
extern const size_t kSamplePakSize = sizeof(kSamplePakContents); const size_t kSamplePakSizeV5 = sizeof(kSamplePakContentsV5);
extern const char kSampleCorruptPakContents[] = { const char kSampleCorruptPakContents[] = {
0x04, 0x00, 0x00, 0x00, // header(version 0x04, 0x00, 0x00, 0x00, // header(version
0x04, 0x00, 0x00, 0x00, // no. entries 0x04, 0x00, 0x00, 0x00, // no. entries
0x01, // encoding) 0x01, // encoding)
...@@ -31,30 +46,29 @@ extern const char kSampleCorruptPakContents[] = { ...@@ -31,30 +46,29 @@ extern const char kSampleCorruptPakContents[] = {
0x0a, 0x00, 0x3f, 0x00, 0x00, 0x00, // index entry 10 0x0a, 0x00, 0x3f, 0x00, 0x00, 0x00, // index entry 10
0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // extra entry for the size of last, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // extra entry for the size of last,
// extends past END OF FILE. // extends past END OF FILE.
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4', 't', 'h', 'i',
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6' 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6'};
};
extern const size_t kSampleCorruptPakSize = sizeof(kSampleCorruptPakContents); const size_t kSampleCorruptPakSize = sizeof(kSampleCorruptPakContents);
extern const char kSamplePakContents2x[] = { const char kSamplePakContents2x[] = {
0x04, 0x00, 0x00, 0x00, // header(version 0x04, 0x00, 0x00, 0x00, // header(version
0x01, 0x00, 0x00, 0x00, // no. entries 0x01, 0x00, 0x00, 0x00, // no. entries
0x01, // encoding) 0x01, // encoding)
0x04, 0x00, 0x15, 0x00, 0x00, 0x00, // index entry 4 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, // index entry 4
0x00, 0x00, 0x24, 0x00, 0x00, 0x00, // extra entry for the size of last 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, // extra entry for the size of last
't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4', ' ', '2', 'x' 't', 'h', 'i', 's', ' ', 'i', 's', ' ',
}; 'i', 'd', ' ', '4', ' ', '2', 'x'};
extern const size_t kSamplePakSize2x = sizeof(kSamplePakContents2x); const size_t kSamplePakSize2x = sizeof(kSamplePakContents2x);
extern const char kEmptyPakContents[] = { const char kEmptyPakContents[] = {
0x04, 0x00, 0x00, 0x00, // header(version 0x04, 0x00, 0x00, 0x00, // header(version
0x00, 0x00, 0x00, 0x00, // no. entries 0x00, 0x00, 0x00, 0x00, // no. entries
0x01, // encoding) 0x01, // encoding)
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00 // extra entry for the size of last 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00 // extra entry for the size of last
}; };
extern const size_t kEmptyPakSize = sizeof(kEmptyPakContents); const size_t kEmptyPakSize = sizeof(kEmptyPakContents);
} // namespace ui } // namespace ui
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_BASE_RESOURCE_DATA_PACK_LITERAL_H_
#define UI_BASE_RESOURCE_DATA_PACK_LITERAL_H_
namespace ui {
extern const char kSamplePakContentsV4[];
extern const size_t kSamplePakSizeV4;
extern const char kSamplePakContentsV5[];
extern const size_t kSamplePakSizeV5;
extern const char kSamplePakContents2x[];
extern const size_t kSamplePakSize2x;
extern const char kEmptyPakContents[];
extern const size_t kEmptyPakSize;
extern const char kSampleCorruptPakContents[];
extern const size_t kSampleCorruptPakSize;
} // namespace ui
#endif // UI_BASE_RESOURCE_DATA_PACK_LITERAL_H_
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "base/strings/string_piece.h" #include "base/strings/string_piece.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "ui/base/resource/data_pack_literal.h"
#include "ui/base/ui_base_paths.h" #include "ui/base/ui_base_paths.h"
namespace ui { namespace ui {
...@@ -27,11 +28,6 @@ class DataPackTest ...@@ -27,11 +28,6 @@ class DataPackTest
DataPackTest() {} DataPackTest() {}
}; };
extern const char kSamplePakContents[];
extern const char kSampleCorruptPakContents[];
extern const size_t kSamplePakSize;
extern const size_t kSampleCorruptPakSize;
TEST(DataPackTest, LoadFromPath) { TEST(DataPackTest, LoadFromPath) {
base::ScopedTempDir dir; base::ScopedTempDir dir;
ASSERT_TRUE(dir.CreateUniqueTempDir()); ASSERT_TRUE(dir.CreateUniqueTempDir());
...@@ -39,8 +35,8 @@ TEST(DataPackTest, LoadFromPath) { ...@@ -39,8 +35,8 @@ TEST(DataPackTest, LoadFromPath) {
dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak")); dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak"));
// Dump contents into the pak file. // Dump contents into the pak file.
ASSERT_EQ(base::WriteFile(data_path, kSamplePakContents, kSamplePakSize), ASSERT_EQ(base::WriteFile(data_path, kSamplePakContentsV4, kSamplePakSizeV4),
static_cast<int>(kSamplePakSize)); static_cast<int>(kSamplePakSizeV4));
// Load the file through the data pack API. // Load the file through the data pack API.
DataPack pack(SCALE_FACTOR_100P); DataPack pack(SCALE_FACTOR_100P);
...@@ -72,8 +68,8 @@ TEST(DataPackTest, LoadFromFile) { ...@@ -72,8 +68,8 @@ TEST(DataPackTest, LoadFromFile) {
dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak")); dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak"));
// Dump contents into the pak file. // Dump contents into the pak file.
ASSERT_EQ(base::WriteFile(data_path, kSamplePakContents, kSamplePakSize), ASSERT_EQ(base::WriteFile(data_path, kSamplePakContentsV4, kSamplePakSizeV4),
static_cast<int>(kSamplePakSize)); static_cast<int>(kSamplePakSizeV4));
base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ); base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
ASSERT_TRUE(file.IsValid()); ASSERT_TRUE(file.IsValid());
...@@ -112,15 +108,15 @@ TEST(DataPackTest, LoadFromFileRegion) { ...@@ -112,15 +108,15 @@ TEST(DataPackTest, LoadFromFileRegion) {
const char kPadding[5678] = {0}; const char kPadding[5678] = {0};
ASSERT_EQ(static_cast<int>(sizeof(kPadding)), ASSERT_EQ(static_cast<int>(sizeof(kPadding)),
base::WriteFile(data_path, kPadding, sizeof(kPadding))); base::WriteFile(data_path, kPadding, sizeof(kPadding)));
ASSERT_TRUE(base::AppendToFile( ASSERT_TRUE(
data_path, kSamplePakContents, kSamplePakSize)); base::AppendToFile(data_path, kSamplePakContentsV4, kSamplePakSizeV4));
base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ); base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
ASSERT_TRUE(file.IsValid()); ASSERT_TRUE(file.IsValid());
// Load the file through the data pack API. // Load the file through the data pack API.
DataPack pack(SCALE_FACTOR_100P); DataPack pack(SCALE_FACTOR_100P);
base::MemoryMappedFile::Region region = {sizeof(kPadding), kSamplePakSize}; base::MemoryMappedFile::Region region = {sizeof(kPadding), kSamplePakSizeV4};
ASSERT_TRUE(pack.LoadFromFileRegion(std::move(file), region)); ASSERT_TRUE(pack.LoadFromFileRegion(std::move(file), region));
base::StringPiece data; base::StringPiece data;
...@@ -142,11 +138,11 @@ TEST(DataPackTest, LoadFromFileRegion) { ...@@ -142,11 +138,11 @@ TEST(DataPackTest, LoadFromFileRegion) {
ASSERT_FALSE(pack.GetStringPiece(140, &data)); ASSERT_FALSE(pack.GetStringPiece(140, &data));
} }
TEST(DataPackTest, LoadFromBuffer) { TEST(DataPackTest, LoadFromBufferV4) {
DataPack pack(SCALE_FACTOR_100P); DataPack pack(SCALE_FACTOR_100P);
ASSERT_TRUE(pack.LoadFromBuffer( ASSERT_TRUE(pack.LoadFromBuffer(
base::StringPiece(kSamplePakContents, kSamplePakSize))); base::StringPiece(kSamplePakContentsV4, kSamplePakSizeV4)));
base::StringPiece data; base::StringPiece data;
ASSERT_TRUE(pack.HasResource(4)); ASSERT_TRUE(pack.HasResource(4));
...@@ -167,6 +163,31 @@ TEST(DataPackTest, LoadFromBuffer) { ...@@ -167,6 +163,31 @@ TEST(DataPackTest, LoadFromBuffer) {
ASSERT_FALSE(pack.GetStringPiece(140, &data)); ASSERT_FALSE(pack.GetStringPiece(140, &data));
} }
TEST(DataPackTest, LoadFromBufferV5) {
DataPack pack(SCALE_FACTOR_100P);
ASSERT_TRUE(pack.LoadFromBuffer(
base::StringPiece(kSamplePakContentsV5, kSamplePakSizeV5)));
base::StringPiece data;
ASSERT_TRUE(pack.HasResource(4));
ASSERT_TRUE(pack.GetStringPiece(4, &data));
EXPECT_EQ("this is id 4", data);
ASSERT_TRUE(pack.HasResource(6));
ASSERT_TRUE(pack.GetStringPiece(6, &data));
EXPECT_EQ("this is id 6", data);
// Try reading zero-length data blobs, just in case.
ASSERT_TRUE(pack.GetStringPiece(1, &data));
EXPECT_EQ(0U, data.length());
ASSERT_TRUE(pack.GetStringPiece(10, &data));
EXPECT_EQ("this is id 4", data);
// Try looking up an invalid key.
ASSERT_FALSE(pack.HasResource(140));
ASSERT_FALSE(pack.GetStringPiece(140, &data));
}
INSTANTIATE_TEST_CASE_P(WriteBINARY, DataPackTest, ::testing::Values( INSTANTIATE_TEST_CASE_P(WriteBINARY, DataPackTest, ::testing::Values(
DataPack::BINARY)); DataPack::BINARY));
INSTANTIATE_TEST_CASE_P(WriteUTF8, DataPackTest, ::testing::Values( INSTANTIATE_TEST_CASE_P(WriteUTF8, DataPackTest, ::testing::Values(
...@@ -228,8 +249,8 @@ TEST(DataPackTest, ModifiedWhileUsed) { ...@@ -228,8 +249,8 @@ TEST(DataPackTest, ModifiedWhileUsed) {
dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak")); dir.GetPath().Append(FILE_PATH_LITERAL("sample.pak"));
// Dump contents into the pak file. // Dump contents into the pak file.
ASSERT_EQ(base::WriteFile(data_path, kSamplePakContents, kSamplePakSize), ASSERT_EQ(base::WriteFile(data_path, kSamplePakContentsV4, kSamplePakSizeV4),
static_cast<int>(kSamplePakSize)); static_cast<int>(kSamplePakSizeV4));
base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ); base::File file(data_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
ASSERT_TRUE(file.IsValid()); ASSERT_TRUE(file.IsValid());
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "third_party/skia/include/core/SkBitmap.h" #include "third_party/skia/include/core/SkBitmap.h"
#include "ui/base/layout.h" #include "ui/base/layout.h"
#include "ui/base/resource/data_pack.h" #include "ui/base/resource/data_pack.h"
#include "ui/base/resource/data_pack_literal.h"
#include "ui/gfx/codec/png_codec.h" #include "ui/gfx/codec/png_codec.h"
#include "ui/gfx/font_list.h" #include "ui/gfx/font_list.h"
#include "ui/gfx/image/image_skia.h" #include "ui/gfx/image/image_skia.h"
...@@ -38,14 +39,6 @@ using ::testing::Return; ...@@ -38,14 +39,6 @@ using ::testing::Return;
using ::testing::ReturnArg; using ::testing::ReturnArg;
namespace ui { namespace ui {
extern const char kSamplePakContents[];
extern const size_t kSamplePakSize;
extern const char kSamplePakContents2x[];
extern const size_t kSamplePakSize2x;
extern const char kEmptyPakContents[];
extern const size_t kEmptyPakSize;
namespace { namespace {
const unsigned char kPngMagic[8] = { 0x89, 'P', 'N', 'G', 13, 10, 26, 10 }; const unsigned char kPngMagic[8] = { 0x89, 'P', 'N', 'G', 13, 10, 26, 10 };
...@@ -416,8 +409,8 @@ TEST_F(ResourceBundleImageTest, GetRawDataResource) { ...@@ -416,8 +409,8 @@ TEST_F(ResourceBundleImageTest, GetRawDataResource) {
dir_path().Append(FILE_PATH_LITERAL("sample_2x.pak")); dir_path().Append(FILE_PATH_LITERAL("sample_2x.pak"));
// Dump contents into the pak files. // Dump contents into the pak files.
ASSERT_EQ(base::WriteFile(data_path, kSamplePakContents, ASSERT_EQ(base::WriteFile(data_path, kSamplePakContentsV4, kSamplePakSizeV4),
kSamplePakSize), static_cast<int>(kSamplePakSize)); static_cast<int>(kSamplePakSizeV4));
ASSERT_EQ(base::WriteFile(data_2x_path, kSamplePakContents2x, ASSERT_EQ(base::WriteFile(data_2x_path, kSamplePakContents2x,
kSamplePakSize2x), static_cast<int>(kSamplePakSize2x)); kSamplePakSize2x), static_cast<int>(kSamplePakSize2x));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment