Commit e5bf6002 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Move unmapping of large pages out of the spin-lock in PartitionAlloc

Instead of unmapping directly, Free() returns DeferredUnmap that needs
to be run after releasing the lock.

Bug: 1067006
Change-Id: I2638d370a5b36867d73adcbaf8988c4259ff05a3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2132155Reviewed-by: default avatarTom Sepez <tsepez@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#755954}
parent 9a14dcad
......@@ -366,7 +366,8 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page));
page->Free(ptr);
internal::DeferredUnmap deferred_unmap = page->Free(ptr);
deferred_unmap.Run();
#endif
}
......@@ -460,10 +461,12 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page));
internal::DeferredUnmap deferred_unmap;
{
subtle::SpinLock::Guard guard(lock);
page->Free(ptr);
deferred_unmap = page->Free(ptr);
}
deferred_unmap.Run();
#endif
}
......
......@@ -13,7 +13,7 @@ namespace internal {
namespace {
ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
const PartitionDirectMapExtent* extent =
PartitionDirectMapExtent::FromPage(page);
......@@ -46,8 +46,7 @@ ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
FreePages(ptr, unmap_size);
return {ptr, unmap_size};
}
ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
......@@ -90,13 +89,12 @@ PartitionPage* PartitionPage::get_sentinel_page() {
return &sentinel_page_;
}
void PartitionPage::FreeSlowPath() {
DeferredUnmap PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
PartitionDirectUnmap(this);
return;
return PartitionDirectUnmap(this);
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
......@@ -130,8 +128,9 @@ void PartitionPage::FreeSlowPath() {
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(num_allocated_slots == 0))
FreeSlowPath();
return FreeSlowPath();
}
return {};
}
void PartitionPage::Decommit(PartitionRootBase* root) {
......@@ -160,5 +159,9 @@ void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
Decommit(root);
}
void DeferredUnmap::Unmap() {
FreePages(ptr, size);
}
} // namespace internal
} // namespace base
......@@ -19,6 +19,20 @@ namespace internal {
struct PartitionRootBase;
// PartitionPage::Free() defers unmapping a large page until the lock is
// released. Callers of PartitionPage::Free() must invoke Run().
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
struct DeferredUnmap {
void* ptr = nullptr;
size_t size = 0;
// In most cases there is no page to unmap and ptr == nullptr. This function
// is inlined to avoid the overhead of a function call in the common case.
ALWAYS_INLINE void Run();
private:
BASE_EXPORT NOINLINE void Unmap();
};
// Some notes on page states. A page can be in one of four major states:
// 1) Active.
// 2) Full.
......@@ -62,8 +76,9 @@ struct PartitionPage {
// Public API
// Note the matching Alloc() functions are in PartitionPage.
BASE_EXPORT NOINLINE void FreeSlowPath();
ALWAYS_INLINE void Free(void* ptr);
// Callers must invoke DeferredUnmap::Run() after releasing the lock.
BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
void Decommit(PartitionRootBase* root);
void DecommitIfPossible(PartitionRootBase* root);
......@@ -201,7 +216,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
return 0;
}
ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) {
#if DCHECK_IS_ON()
size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size();
......@@ -229,12 +244,13 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
freelist_head = entry;
--num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) {
FreeSlowPath();
return FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
return {};
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
......@@ -287,6 +303,12 @@ ALWAYS_INLINE void PartitionPage::Reset() {
next_page = nullptr;
}
ALWAYS_INLINE void DeferredUnmap::Run() {
if (UNLIKELY(ptr)) {
Unmap();
}
}
} // namespace internal
} // namespace base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment