Commit daa6b810 authored by Samuel Huang's avatar Samuel Huang Committed by Commit Bot

[SuperSize] Simple style fixes and reformat for SuperSize-archive.

This CL gets some simple fixes out of the while, found while prototyping
support for multiple input files.

Bug: 900259, 1040645
Change-Id: Ifc7ba4395108b356d754e13d468af7e6ca8f4cdd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2129785
Commit-Queue: Samuel Huang <huangs@chromium.org>
Reviewed-by: default avatarAndrew Grieve <agrieve@chromium.org>
Cr-Commit-Position: refs/heads/master@{#754961}
parent fd8ec60a
......@@ -486,7 +486,10 @@ def _CreateMergeStringsReplacements(merge_string_syms,
for offset, size in positions:
address = merge_sym_address + offset
symbol = models.Symbol(
models.SECTION_RODATA, size, address, STRING_LITERAL_NAME,
models.SECTION_RODATA,
size,
address=address,
full_name=STRING_LITERAL_NAME,
object_path=object_path)
new_symbols.append(symbol)
......
......@@ -5,8 +5,8 @@
"""Deals with loading & saving .size and .sizediff files.
The .size file is written in the following format. There are no section
delimeters, instead the end of a section is usually determined by a row count
on the first line of a section, followed by that amount of rows. In other
delimiters, instead the end of a section is usually determined by a row count
on the first line of a section, followed by that number of rows. In other
cases, the sections have a known size.
Header
......@@ -14,8 +14,8 @@ Header
4 lines long.
Line 0 of the file is a header comment.
Line 1 is the serialization version of the file.
Line 2 is the number of characters in the metadata string.
Line 3 is the metadata string, a stringified JSON object.
Line 2 is the number of characters in the header fields string.
Line 3 is the header fields string, a stringified JSON object.
Path list
---------
......@@ -28,7 +28,7 @@ Component list
A list of components. The first line is the size of the list,
and the next N lines that follow are items in the list. Each item is a unique
COMPONENT which is referenced later.
This section is only present if 'has_components' is True in the metadata.
This section is only present if 'has_components' is True in header fields.
Symbol counts
-------------
......@@ -54,7 +54,7 @@ The number of bytes this symbol takes up.
Padding
~~~~~~~
The number of padding bytes this symbol has.
This section is only present if 'has_padding' is True in the metadata.
This section is only present if 'has_padding' is True in header fields.
Path indices
~~~~~~~~~~~~~
......@@ -64,7 +64,7 @@ Component indices
~~~~~~~~~~~~~~~~~~
Indices that reference components in the prior Component list section.
Delta-encoded.
This section is only present if 'has_components' is True in the metadata.
This section is only present if 'has_components' is True in header fields.
Symbols
-------
......@@ -85,14 +85,14 @@ Header
------
3 lines long.
Line 0 of the file is a header comment.
Line 1 is the number of characters in the metadata string.
Line 2 is the metadata string, a stringified JSON object. This currently
Line 1 is the number of characters in the header fields string.
Line 2 is the header fields string, a stringified JSON object. This currently
contains two fields, 'before_length' (the length in bytes of the 'before'
section) and 'version', which is always 1.
Before
------
The next |metadata.before_length| bytes are a valid gzipped sparse .size file
The next |header.before_length| bytes are a valid gzipped sparse .size file
containing the "before" snapshot.
After
......@@ -251,16 +251,16 @@ def _SaveSizeInfoToFile(size_info,
# Created by supersize header
w.WriteLine('# Created by //tools/binary_size')
w.WriteLine(_SERIALIZATION_VERSION)
# JSON metadata
headers = {
# JSON header fields
fields = {
'metadata': size_info.metadata,
'section_sizes': size_info.section_sizes,
'has_components': True,
'has_padding': include_padding,
}
metadata_str = json.dumps(headers, indent=2, sort_keys=True)
w.WriteLine(str(len(metadata_str)))
w.WriteLine(metadata_str)
fields_str = json.dumps(fields, indent=2, sort_keys=True)
w.WriteLine(str(len(fields_str)))
w.WriteLine(fields_str)
w.LogSize('header') # For libchrome: 570 bytes.
# Store a single copy of all paths and have them referenced by index.
......@@ -375,15 +375,15 @@ def _LoadSizeInfoFromFile(file_obj, size_path):
actual_version = _ReadLine(lines)
assert actual_version == _SERIALIZATION_VERSION, (
'Version mismatch. Need to write some upgrade code.')
# JSON metadata
# JSON header fields
json_len = int(_ReadLine(lines))
json_str = lines.read(json_len)
headers = json.loads(json_str)
section_sizes = headers['section_sizes']
metadata = headers.get('metadata')
has_components = headers.get('has_components', False)
has_padding = headers.get('has_padding', False)
fields = json.loads(json_str)
section_sizes = fields['section_sizes']
metadata = fields.get('metadata')
has_components = fields.get('has_components', False)
has_padding = fields.get('has_padding', False)
# Eat empty line.
_ReadLine(lines)
......@@ -591,14 +591,14 @@ def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
# WriteString() instead of WriteLine().
w.WriteString(_SIZEDIFF_HEADER)
# JSON metadata
headers = {
# JSON header fields
fields = {
'version': 1,
'before_length': before_size_file.tell(),
}
metadata_str = json.dumps(headers, indent=2, sort_keys=True)
w.WriteLine(str(len(metadata_str)))
w.WriteLine(metadata_str)
fields_str = json.dumps(fields, indent=2, sort_keys=True)
w.WriteLine(str(len(fields_str)))
w.WriteLine(fields_str)
before_size_file.seek(0)
shutil.copyfileobj(before_size_file, output_file)
......
......@@ -143,8 +143,11 @@ class MapFileParserGold(object):
if not parts:
break
name, size_str, path = parts
sym = models.Symbol(models.SECTION_BSS, int(size_str[2:], 16),
full_name=name, object_path=path)
sym = models.Symbol(
models.SECTION_BSS,
int(size_str[2:], 16),
full_name=name,
object_path=path)
ret.append(sym)
return ret
......
......@@ -398,9 +398,17 @@ class Symbol(BaseSymbol):
'component',
)
def __init__(self, section_name, size_without_padding, address=None,
full_name=None, template_name=None, name=None, source_path=None,
object_path=None, flags=0, aliases=None):
def __init__(self,
section_name,
size_without_padding,
address=None,
full_name=None,
template_name=None,
name=None,
source_path=None,
object_path=None,
flags=0,
aliases=None):
self.section_name = section_name
self.address = address or 0
self.full_name = full_name or ''
......@@ -610,8 +618,13 @@ class SymbolGroup(BaseSymbol):
)
# template_name and full_name are useful when clustering symbol clones.
def __init__(self, symbols, filtered_symbols=None, full_name=None,
template_name=None, name='', section_name=None,
def __init__(self,
symbols,
filtered_symbols=None,
full_name=None,
template_name=None,
name='',
section_name=None,
is_default_sorted=False):
self._padding = None
self._size = None
......@@ -750,17 +763,26 @@ class SymbolGroup(BaseSymbol):
def CountUniqueSymbols(self):
return sum(1 for s in self.IterUniqueSymbols())
def _CreateTransformed(self, symbols, filtered_symbols=None, full_name=None,
template_name=None, name=None, section_name=None,
def _CreateTransformed(self,
symbols,
filtered_symbols=None,
full_name=None,
template_name=None,
name=None,
section_name=None,
is_default_sorted=None):
if is_default_sorted is None:
is_default_sorted = self.is_default_sorted
if section_name is None:
section_name = self.section_name
return self.__class__(symbols, filtered_symbols=filtered_symbols,
full_name=full_name, template_name=template_name,
name=name, section_name=section_name,
is_default_sorted=is_default_sorted)
return self.__class__(
symbols,
filtered_symbols=filtered_symbols,
full_name=full_name,
template_name=template_name,
name=name,
section_name=section_name,
is_default_sorted=is_default_sorted)
def Sorted(self, cmp_func=None, key=None, reverse=False):
"""Sorts by abs(PSS)."""
......@@ -916,7 +938,8 @@ class SymbolGroup(BaseSymbol):
symbols.WherePathMatches(r'third_party').WhereMatches('foo').Inverted()
"""
return self._CreateTransformed(
self._filtered_symbols, filtered_symbols=self._symbols,
self._filtered_symbols,
filtered_symbols=self._symbols,
section_name=SECTION_MULTIPLE)
def GroupedBy(self, func, min_count=0, group_factory=None):
......@@ -1020,11 +1043,17 @@ class SymbolGroup(BaseSymbol):
sym = symbols[0]
if token[1].startswith('*'):
return self._CreateTransformed(
symbols, full_name=full_name, template_name=full_name,
name=full_name, section_name=sym.section_name)
symbols,
full_name=full_name,
template_name=full_name,
name=full_name,
section_name=sym.section_name)
return self._CreateTransformed(
symbols, full_name=full_name, template_name=sym.template_name,
name=sym.name, section_name=sym.section_name)
symbols,
full_name=full_name,
template_name=sym.template_name,
name=sym.name,
section_name=sym.section_name)
# A full second faster to cluster per-section. Plus, don't need create
# (section_name, name) tuples in cluster_func.
......@@ -1052,8 +1081,11 @@ class SymbolGroup(BaseSymbol):
def group_factory(_, symbols):
sym = symbols[0]
return self._CreateTransformed(
symbols, full_name=sym.full_name, template_name=sym.template_name,
name=sym.name, section_name=sym.section_name)
symbols,
full_name=sym.full_name,
template_name=sym.template_name,
name=sym.name,
section_name=sym.section_name)
return self.GroupedBy(
lambda s: (same_name_only and s.full_name, id(s.aliases or s)),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment