Commit f48b1f3e authored by Albert J. Wong's avatar Albert J. Wong Committed by Commit Bot

PA: PartitionRootGeneric funcs -> methods

This is a mechanical refactor moving all free functions that take
PartitionRootGeneric as the first parameter into the PartitionRootGeneric struct.

See related bug for more details on methodology.

Bug: 787153
Change-Id: Ia95fc30ed030397303dc1999f4fdac8a72c01c9b
Reviewed-on: https://chromium-review.googlesource.com/780759
Commit-Queue: Albert J. Wong <ajwong@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#518438}
parent c4fd39b5
......@@ -52,15 +52,15 @@ bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential
alloctions in a SuperPage.
`PartitionAllocGeneric` acquires a lock for thread safety. (The current
`PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
implementation uses a spin lock on the assumption that thread contention will be
rare in its callers. The original caller was Blink, where this is generally
true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a
`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
`PartitionAllocGeneric`). Callers can also arrange for low contention, such as
by using a dedicated partition for single-threaded, latency-critical
`PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
such as by using a dedicated partition for single-threaded, latency-critical
allocations.
Because PartitionAlloc guarantees that address space regions used for one
......
......@@ -190,10 +190,10 @@ void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
}
}
void PartitionAllocGenericInit(PartitionRootGeneric* root) {
subtle::SpinLock::Guard guard(root->lock);
void PartitionRootGeneric::Init() {
subtle::SpinLock::Guard guard(this->lock);
PartitionAllocBaseInit(root);
PartitionAllocBaseInit(this);
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
......@@ -209,7 +209,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
order_index_shift = 0;
else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
root->order_index_shifts[order] = order_index_shift;
this->order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask;
if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift.
......@@ -219,7 +219,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1);
}
root->order_sub_index_masks[order] = sub_order_index_mask;
this->order_sub_index_masks[order] = sub_order_index_mask;
}
// Set up the actual usable buckets first.
......@@ -232,11 +232,11 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
size_t current_size = kGenericSmallestBucket;
size_t currentIncrement =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
PartitionBucket* bucket = &root->buckets[0];
PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
bucket->slot_size = current_size;
PartitionBucketInitBase(bucket, root);
PartitionBucketInitBase(bucket, this);
// Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
bucket->active_pages_head = nullptr;
......@@ -246,16 +246,16 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
currentIncrement <<= 1;
}
DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table.
bucket = &root->buckets[0];
PartitionBucket** bucketPtr = &root->bucket_lookups[0];
bucket = &this->buckets[0];
PartitionBucket** bucketPtr = &this->bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc.
*bucketPtr++ = &root->buckets[0];
*bucketPtr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
*bucketPtr++ = &g_sentinel_bucket;
} else {
......@@ -268,9 +268,8 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
}
}
}
DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
DCHECK(bucketPtr ==
&root->bucket_lookups[0] +
DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
DCHECK(bucketPtr == &this->bucket_lookups[0] +
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
......@@ -810,9 +809,9 @@ void* PartitionAllocSlowPath(PartitionRootBase* root,
PartitionPage* new_page = nullptr;
// For the PartitionAllocGeneric API, we have a bunch of buckets marked
// as special cases. We bounce them through to the slow path so that we
// can still have a blazing fast hot path due to lack of corner-case
// For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
// marked as special cases. We bounce them through to the slow path so that
// we can still have a blazing fast hot path due to lack of corner-case
// branches.
//
// Note: The ordering of the conditionals matter! In particular,
......@@ -1081,17 +1080,16 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
return true;
}
void* PartitionReallocGeneric(PartitionRootGeneric* root,
void* ptr,
void* PartitionRootGeneric::Realloc(void* ptr,
size_t new_size,
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return realloc(ptr, new_size);
#else
if (UNLIKELY(!ptr))
return PartitionAllocGeneric(root, new_size, type_name);
return this->Alloc(new_size, type_name);
if (UNLIKELY(!new_size)) {
PartitionFreeGeneric(root, ptr);
this->Free(ptr);
return nullptr;
}
......@@ -1107,13 +1105,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
if (PartitionReallocDirectMappedInPlace(this, page, new_size)) {
PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
return ptr;
}
}
size_t actual_new_size = PartitionAllocActualSize(root, new_size);
size_t actual_new_size = this->ActualSize(new_size);
size_t actual_old_size = PartitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
......@@ -1134,13 +1132,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
}
// This realloc cannot be resized in-place. Sadness.
void* ret = PartitionAllocGeneric(root, new_size, type_name);
void* ret = this->Alloc(new_size, type_name);
size_t copy_size = actual_old_size;
if (new_size < copy_size)
copy_size = new_size;
memcpy(ret, ptr, copy_size);
PartitionFreeGeneric(root, ptr);
this->Free(ptr);
return ret;
#endif
}
......@@ -1301,13 +1299,13 @@ void PartitionRoot::PurgeMemory(int flags) {
// at the moment.
}
void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
subtle::SpinLock::Guard guard(root->lock);
void PartitionRootGeneric::PurgeMemory(int flags) {
subtle::SpinLock::Guard guard(this->lock);
if (flags & PartitionPurgeDecommitEmptyPages)
PartitionDecommitEmptyPages(root);
PartitionDecommitEmptyPages(this);
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
PartitionBucket* bucket = &root->buckets[i];
PartitionBucket* bucket = &this->buckets[i];
if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket);
}
......@@ -1393,14 +1391,13 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
}
}
void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
const char* partition_name,
void PartitionRootGeneric::DumpStats(const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes = partition->total_size_of_super_pages +
partition->total_size_of_direct_mapped_pages;
stats.total_committed_bytes = partition->total_size_of_committed_pages;
stats.total_mmapped_bytes =
this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
stats.total_committed_bytes = this->total_size_of_committed_pages;
size_t direct_mapped_allocations_total_size = 0;
......@@ -1417,13 +1414,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
size_t num_direct_mapped_allocations = 0;
{
subtle::SpinLock::Guard guard(partition->lock);
subtle::SpinLock::Guard guard(this->lock);
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
const PartitionBucket* bucket = &partition->buckets[i];
const PartitionBucket* bucket = &this->buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
// PartitionAllocGenericInit for details).
// PartitionRootGeneric::Init() for details).
if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false;
else
......@@ -1436,7 +1433,7 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
}
}
for (PartitionDirectMapExtent *extent = partition->direct_map_list;
for (PartitionDirectMapExtent *extent = this->direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent ||
......@@ -1451,8 +1448,8 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can
// try to allocate using |PartitionAllocGeneric| and it can't obtain the
// lock.
// try to allocate using |PartitionRootGeneric::Alloc()| and it can't
// obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
......@@ -1513,7 +1510,8 @@ void PartitionRoot::DumpStats(const char* partition_name,
}
if (!is_light_dump) {
// PartitionsDumpBucketStats is called after collecting stats because it
// can use PartitionAlloc to allocate and this can affect the statistics.
// can use PartitionRoot::Alloc() to allocate and this can affect the
// statistics.
for (size_t i = 0; i < partitionNumBuckets; ++i) {
if (memory_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
......
......@@ -6,8 +6,9 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION
// PartitionRoot::Alloc() / PartitionAllocGeneric() and PartitionFree() /
// PartitionFreeGeneric() are approximately analagous to malloc() and free().
// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
// PartitionRootGeneric::Free() are approximately analagous to malloc() and
// free().
//
// The main difference is that a PartitionRoot / PartitionRootGeneric object
// must be supplied to these functions, representing a specific "heap partition"
......@@ -23,14 +24,14 @@
// PartitionRoot is really just a header adjacent to other data areas provided
// by the allocator class.
//
// The partitionAlloc() variant of the API has the following caveats:
// The PartitionRoot::Alloc() variant of the API has the following caveats:
// - Allocations and frees against a single partition must be single threaded.
// - Allocations must not exceed a max size, chosen at compile-time via a
// templated parameter to PartitionAllocator.
// - Allocation sizes must be aligned to the system pointer size.
// - Allocations are bucketed exactly according to size.
//
// And for PartitionAllocGeneric():
// And for PartitionRootGeneric::Alloc():
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
// INT_MAX bytes for security reasons).
......@@ -94,8 +95,8 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up
// against the end of a system page.
// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
// up against the end of a system page.
static const size_t kPartitionPageShift = 14; // 16KB
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
......@@ -382,6 +383,22 @@ struct BASE_EXPORT PartitionRootGeneric : public PartitionRootBase {
bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
{};
PartitionBucket buckets[kGenericNumBuckets] = {};
// Public API.
void Init();
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void Free(void* ptr);
NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
ALWAYS_INLINE size_t ActualSize(size_t size);
void PurgeMemory(int flags);
void DumpStats(const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* partition_stats_dumper);
};
// Flags for PartitionAllocGenericFlags.
......@@ -436,24 +453,12 @@ class BASE_EXPORT PartitionStatsDumper {
};
BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
int,
size_t,
PartitionBucket*);
BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*,
void*,
size_t,
const char* type_name);
BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
const char* partition_name,
bool is_light_dump,
PartitionStatsDumper*);
class BASE_EXPORT PartitionAllocHooks {
public:
......@@ -836,17 +841,16 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
#endif
}
ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root,
size_t size,
ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
const char* type_name) {
return PartitionAllocGenericFlags(root, 0, size, type_name);
return PartitionAllocGenericFlags(this, 0, size, type_name);
}
ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
DCHECK(root->initialized);
DCHECK(this->initialized);
if (UNLIKELY(!ptr))
return;
......@@ -857,7 +861,7 @@ ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionPagePointerIsValid(page));
{
subtle::SpinLock::Guard guard(root->lock);
subtle::SpinLock::Guard guard(this->lock);
PartitionFreeWithPage(ptr, page);
}
#endif
......@@ -871,14 +875,13 @@ ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root,
size_t size) {
ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
DCHECK(root->initialized);
DCHECK(this->initialized);
size = PartitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
......@@ -933,7 +936,7 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionAllocatorGeneric();
~PartitionAllocatorGeneric();
void init() { PartitionAllocGenericInit(&partition_root_); }
void init() { partition_root_.Init(); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
private:
......
......@@ -198,8 +198,7 @@ class MemlogBrowserTest : public InProcessBrowserTest,
}
for (int i = 0; i < kPartitionAllocCount; ++i) {
leaks_.push_back(static_cast<char*>(
PartitionAllocGeneric(partition_allocator_.root(),
leaks_.push_back(static_cast<char*>(partition_allocator_.root()->Alloc(
kPartitionAllocSize, kPartitionAllocTypeName)));
}
......
......@@ -322,9 +322,8 @@ void ProfilingTestDriver::MakeTestAllocations() {
}
for (int i = 0; i < kPartitionAllocCount; ++i) {
leaks_.push_back(static_cast<char*>(
PartitionAllocGeneric(partition_allocator_.root(), kPartitionAllocSize,
kPartitionAllocTypeName)));
leaks_.push_back(static_cast<char*>(partition_allocator_.root()->Alloc(
kPartitionAllocSize, kPartitionAllocTypeName)));
}
for (int i = 0; i < kVariadicAllocCount; ++i) {
......
......@@ -35,8 +35,7 @@ class WTF_EXPORT PartitionAllocator {
template <typename T>
static size_t QuantizedSize(size_t count) {
CHECK_LE(count, MaxElementCountInBackingStore<T>());
return PartitionAllocActualSize(WTF::Partitions::BufferPartition(),
count * sizeof(T));
return WTF::Partitions::BufferPartition()->ActualSize(count * sizeof(T));
}
template <typename T>
static T* AllocateVectorBacking(size_t size) {
......
......@@ -90,14 +90,13 @@ void Partitions::DecommitFreeableMemory() {
if (!initialized_)
return;
base::PartitionPurgeMemoryGeneric(
ArrayBufferPartition(), base::PartitionPurgeDecommitEmptyPages |
ArrayBufferPartition()->PurgeMemory(
base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
base::PartitionPurgeMemoryGeneric(
BufferPartition(), base::PartitionPurgeDecommitEmptyPages |
BufferPartition()->PurgeMemory(base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
base::PartitionPurgeMemoryGeneric(
FastMallocPartition(), base::PartitionPurgeDecommitEmptyPages |
FastMallocPartition()->PurgeMemory(
base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
LayoutPartition()->PurgeMemory(base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
......@@ -127,12 +126,11 @@ void Partitions::DumpMemoryStats(
DCHECK(IsMainThread());
DecommitFreeableMemory();
PartitionDumpStatsGeneric(FastMallocPartition(), "fast_malloc", is_light_dump,
FastMallocPartition()->DumpStats("fast_malloc", is_light_dump,
partition_stats_dumper);
PartitionDumpStatsGeneric(ArrayBufferPartition(), "array_buffer",
is_light_dump, partition_stats_dumper);
PartitionDumpStatsGeneric(BufferPartition(), "buffer", is_light_dump,
ArrayBufferPartition()->DumpStats("array_buffer", is_light_dump,
partition_stats_dumper);
BufferPartition()->DumpStats("buffer", is_light_dump, partition_stats_dumper);
LayoutPartition()->DumpStats("layout", is_light_dump, partition_stats_dumper);
}
......
......@@ -106,22 +106,19 @@ class WTF_EXPORT Partitions {
static void DumpMemoryStats(bool is_light_dump, base::PartitionStatsDumper*);
ALWAYS_INLINE static void* BufferMalloc(size_t n, const char* type_name) {
return base::PartitionAllocGeneric(BufferPartition(), n, type_name);
return BufferPartition()->Alloc(n, type_name);
}
ALWAYS_INLINE static void* BufferRealloc(void* p,
size_t n,
const char* type_name) {
return base::PartitionReallocGeneric(BufferPartition(), p, n, type_name);
}
ALWAYS_INLINE static void BufferFree(void* p) {
base::PartitionFreeGeneric(BufferPartition(), p);
return BufferPartition()->Realloc(p, n, type_name);
}
ALWAYS_INLINE static void BufferFree(void* p) { BufferPartition()->Free(p); }
ALWAYS_INLINE static size_t BufferActualSize(size_t n) {
return base::PartitionAllocActualSize(BufferPartition(), n);
return BufferPartition()->ActualSize(n);
}
static void* FastMalloc(size_t n, const char* type_name) {
return base::PartitionAllocGeneric(Partitions::FastMallocPartition(), n,
type_name);
return Partitions::FastMallocPartition()->Alloc(n, type_name);
}
static void* FastZeroedMalloc(size_t n, const char* type_name) {
void* result = FastMalloc(n, type_name);
......@@ -129,12 +126,9 @@ class WTF_EXPORT Partitions {
return result;
}
static void* FastRealloc(void* p, size_t n, const char* type_name) {
return base::PartitionReallocGeneric(Partitions::FastMallocPartition(), p,
n, type_name);
}
static void FastFree(void* p) {
base::PartitionFreeGeneric(Partitions::FastMallocPartition(), p);
return Partitions::FastMallocPartition()->Realloc(p, n, type_name);
}
static void FastFree(void* p) { Partitions::FastMallocPartition()->Free(p); }
static void HandleOutOfMemory();
......
......@@ -155,7 +155,7 @@ void* ArrayBufferContents::ReserveMemory(size_t size) {
}
void ArrayBufferContents::FreeMemory(void* data) {
base::PartitionFreeGeneric(Partitions::ArrayBufferPartition(), data);
Partitions::ArrayBufferPartition()->Free(data);
}
void ArrayBufferContents::ReleaseReservedMemory(void* data, size_t size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment