Commit e5bf6002 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Move unmapping of large pages out of the spin-lock in PartitionAlloc

Instead of unmapping directly, Free() returns DeferredUnmap that needs
to be run after releasing the lock.

Bug: 1067006
Change-Id: I2638d370a5b36867d73adcbaf8988c4259ff05a3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2132155Reviewed-by: default avatarTom Sepez <tsepez@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#755954}
parent 9a14dcad
...@@ -366,7 +366,8 @@ ALWAYS_INLINE void PartitionFree(void* ptr) { ...@@ -366,7 +366,8 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page)); DCHECK(internal::PartitionRootBase::IsValidPage(page));
page->Free(ptr); internal::DeferredUnmap deferred_unmap = page->Free(ptr);
deferred_unmap.Run();
#endif #endif
} }
...@@ -460,10 +461,12 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) { ...@@ -460,10 +461,12 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page)); DCHECK(IsValidPage(page));
internal::DeferredUnmap deferred_unmap;
{ {
subtle::SpinLock::Guard guard(lock); subtle::SpinLock::Guard guard(lock);
page->Free(ptr); deferred_unmap = page->Free(ptr);
} }
deferred_unmap.Run();
#endif #endif
} }
......
...@@ -13,7 +13,7 @@ namespace internal { ...@@ -13,7 +13,7 @@ namespace internal {
namespace { namespace {
ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) { ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page); PartitionRootBase* root = PartitionRootBase::FromPage(page);
const PartitionDirectMapExtent* extent = const PartitionDirectMapExtent* extent =
PartitionDirectMapExtent::FromPage(page); PartitionDirectMapExtent::FromPage(page);
...@@ -46,8 +46,7 @@ ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) { ...@@ -46,8 +46,7 @@ ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
// Account for the mapping starting a partition page before the actual // Account for the mapping starting a partition page before the actual
// allocation address. // allocation address.
ptr -= kPartitionPageSize; ptr -= kPartitionPageSize;
return {ptr, unmap_size};
FreePages(ptr, unmap_size);
} }
ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) { ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
...@@ -90,13 +89,12 @@ PartitionPage* PartitionPage::get_sentinel_page() { ...@@ -90,13 +89,12 @@ PartitionPage* PartitionPage::get_sentinel_page() {
return &sentinel_page_; return &sentinel_page_;
} }
void PartitionPage::FreeSlowPath() { DeferredUnmap PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page()); DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) { if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused. // Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) { if (UNLIKELY(bucket->is_direct_mapped())) {
PartitionDirectUnmap(this); return PartitionDirectUnmap(this);
return;
} }
// If it's the current active page, change it. We bounce the page to // If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation. // the empty list as a force towards defragmentation.
...@@ -130,8 +128,9 @@ void PartitionPage::FreeSlowPath() { ...@@ -130,8 +128,9 @@ void PartitionPage::FreeSlowPath() {
// Special case: for a partition page with just a single slot, it may // Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic. // now be empty and we want to run it through the empty logic.
if (UNLIKELY(num_allocated_slots == 0)) if (UNLIKELY(num_allocated_slots == 0))
FreeSlowPath(); return FreeSlowPath();
} }
return {};
} }
void PartitionPage::Decommit(PartitionRootBase* root) { void PartitionPage::Decommit(PartitionRootBase* root) {
...@@ -160,5 +159,9 @@ void PartitionPage::DecommitIfPossible(PartitionRootBase* root) { ...@@ -160,5 +159,9 @@ void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
Decommit(root); Decommit(root);
} }
void DeferredUnmap::Unmap() {
FreePages(ptr, size);
}
} // namespace internal } // namespace internal
} // namespace base } // namespace base
...@@ -19,6 +19,20 @@ namespace internal { ...@@ -19,6 +19,20 @@ namespace internal {
struct PartitionRootBase; struct PartitionRootBase;
// PartitionPage::Free() defers unmapping a large page until the lock is
// released. Callers of PartitionPage::Free() must invoke Run().
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
struct DeferredUnmap {
void* ptr = nullptr;
size_t size = 0;
// In most cases there is no page to unmap and ptr == nullptr. This function
// is inlined to avoid the overhead of a function call in the common case.
ALWAYS_INLINE void Run();
private:
BASE_EXPORT NOINLINE void Unmap();
};
// Some notes on page states. A page can be in one of four major states: // Some notes on page states. A page can be in one of four major states:
// 1) Active. // 1) Active.
// 2) Full. // 2) Full.
...@@ -62,8 +76,9 @@ struct PartitionPage { ...@@ -62,8 +76,9 @@ struct PartitionPage {
// Public API // Public API
// Note the matching Alloc() functions are in PartitionPage. // Note the matching Alloc() functions are in PartitionPage.
BASE_EXPORT NOINLINE void FreeSlowPath(); // Callers must invoke DeferredUnmap::Run() after releasing the lock.
ALWAYS_INLINE void Free(void* ptr); BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
void Decommit(PartitionRootBase* root); void Decommit(PartitionRootBase* root);
void DecommitIfPossible(PartitionRootBase* root); void DecommitIfPossible(PartitionRootBase* root);
...@@ -201,7 +216,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { ...@@ -201,7 +216,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
return 0; return 0;
} }
ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
size_t slot_size = bucket->slot_size; size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size(); const size_t raw_size = get_raw_size();
...@@ -229,12 +244,13 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ...@@ -229,12 +244,13 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
freelist_head = entry; freelist_head = entry;
--num_allocated_slots; --num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) { if (UNLIKELY(num_allocated_slots <= 0)) {
FreeSlowPath(); return FreeSlowPath();
} else { } else {
// All single-slot allocations must go through the slow path to // All single-slot allocations must go through the slow path to
// correctly update the size metadata. // correctly update the size metadata.
DCHECK(get_raw_size() == 0); DCHECK(get_raw_size() == 0);
} }
return {};
} }
ALWAYS_INLINE bool PartitionPage::is_active() const { ALWAYS_INLINE bool PartitionPage::is_active() const {
...@@ -287,6 +303,12 @@ ALWAYS_INLINE void PartitionPage::Reset() { ...@@ -287,6 +303,12 @@ ALWAYS_INLINE void PartitionPage::Reset() {
next_page = nullptr; next_page = nullptr;
} }
ALWAYS_INLINE void DeferredUnmap::Run() {
if (UNLIKELY(ptr)) {
Unmap();
}
}
} // namespace internal } // namespace internal
} // namespace base } // namespace base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment