[PartitionAlloc] Transition to SlotSpanMetadata
Currently all metadata is stored in PartitionPage, which is confusing because the most commonly used metadata is related to slot spans, and is stored only in the PartitionPage object that corresponds to the first partition page of the slot span. This CL introduces SlotSpanMetadata to clarify that confusion. Change-Id: Id8873dba1c9e3018a8643f4f9c93e694f2edb9c2 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2466007 Commit-Queue: Bartek Nowierski <bartekn@chromium.org> Reviewed-by: Kentaro Hara <haraken@chromium.org> Reviewed-by: Anton Bikineev <bikineev@chromium.org> Reviewed-by: Benoit L <lizeb@chromium.org> Cr-Commit-Position: refs/heads/master@{#817117}
This commit is contained in:

committed by
Commit Bot

parent
16d2ce3236
commit
22b2cdc0fa
base/allocator/partition_allocator
memory_reclaimer.ccpartition_alloc.ccpartition_alloc.hpartition_alloc_forward.hpartition_alloc_unittest.ccpartition_bucket.ccpartition_bucket.hpartition_direct_map_extent.hpartition_page.ccpartition_page.hpartition_ref_count.ccpartition_root.hpcscan.ccpcscan.hpcscan_unittest.cc
third_party/blink/renderer/platform/instrumentation
@ -105,8 +105,8 @@ void PartitionAllocMemoryReclaimer::Reclaim() {
|
||||
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
|
||||
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
|
||||
|
||||
constexpr int kFlags =
|
||||
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
|
||||
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
|
||||
PartitionPurgeDiscardUnusedSystemPages;
|
||||
|
||||
for (auto* partition : thread_safe_partitions_)
|
||||
partition->PurgeMemory(kFlags);
|
||||
|
@ -46,11 +46,11 @@ NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::DecommitEmptyPages() {
|
||||
for (Page*& page : global_empty_page_ring) {
|
||||
if (page)
|
||||
page->DecommitIfPossible(this);
|
||||
page = nullptr;
|
||||
void PartitionRoot<thread_safe>::DecommitEmptySlotSpans() {
|
||||
for (SlotSpan*& slot_span : global_empty_slot_span_ring) {
|
||||
if (slot_span)
|
||||
slot_span->DecommitIfPossible(this);
|
||||
slot_span = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,10 +283,10 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
if (thread_safe && opts.pcscan == PartitionOptions::PCScan::kEnabled)
|
||||
pcscan.emplace(this);
|
||||
|
||||
// We mark the sentinel bucket/page as free to make sure it is skipped by our
|
||||
// logic to find a new active page.
|
||||
// We mark the sentinel slot span as free to make sure it is skipped by our
|
||||
// logic to find a new active slot span.
|
||||
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
|
||||
sentinel_bucket.active_pages_head = Page::get_sentinel_page();
|
||||
sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
|
||||
|
||||
// This is a "magic" value so we can test if a root pointer is valid.
|
||||
inverted_self = ~reinterpret_cast<uintptr_t>(this);
|
||||
@ -306,7 +306,7 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
bucket->Init(current_size);
|
||||
// Disable pseudo buckets so that touching them faults.
|
||||
if (current_size % kSmallestBucket)
|
||||
bucket->active_pages_head = nullptr;
|
||||
bucket->active_slot_spans_head = nullptr;
|
||||
current_size += current_increment;
|
||||
++bucket;
|
||||
}
|
||||
@ -339,9 +339,9 @@ PartitionRoot<thread_safe>::~PartitionRoot() = default;
|
||||
|
||||
template <bool thread_safe>
|
||||
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
||||
internal::PartitionPage<thread_safe>* page,
|
||||
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||
size_t requested_size) {
|
||||
PA_DCHECK(page->bucket->is_direct_mapped());
|
||||
PA_DCHECK(slot_span->bucket->is_direct_mapped());
|
||||
|
||||
size_t raw_size =
|
||||
internal::PartitionSizeAdjustAdd(allow_extras, requested_size);
|
||||
@ -352,12 +352,12 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
||||
return false;
|
||||
|
||||
// bucket->slot_size is the current size of the allocation.
|
||||
size_t current_slot_size = page->bucket->slot_size;
|
||||
char* char_ptr = static_cast<char*>(Page::ToPointer(page));
|
||||
size_t current_slot_size = slot_span->bucket->slot_size;
|
||||
char* char_ptr = static_cast<char*>(SlotSpan::ToPointer(slot_span));
|
||||
if (new_slot_size == current_slot_size) {
|
||||
// No need to move any memory around, but update size and cookie below.
|
||||
} else if (new_slot_size < current_slot_size) {
|
||||
size_t map_size = DirectMapExtent::FromPage(page)->map_size;
|
||||
size_t map_size = DirectMapExtent::FromSlotSpan(slot_span)->map_size;
|
||||
|
||||
// Don't reallocate in-place if new size is less than 80 % of the full
|
||||
// map size, to avoid holding on to too much unused address space.
|
||||
@ -370,7 +370,8 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
||||
DecommitSystemPages(char_ptr + new_slot_size, decommit_size);
|
||||
SetSystemPagesAccess(char_ptr + new_slot_size, decommit_size,
|
||||
PageInaccessible);
|
||||
} else if (new_slot_size <= DirectMapExtent::FromPage(page)->map_size) {
|
||||
} else if (new_slot_size <=
|
||||
DirectMapExtent::FromSlotSpan(slot_span)->map_size) {
|
||||
// Grow within the actually allocated memory. Just need to make the
|
||||
// pages accessible again.
|
||||
size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
|
||||
@ -397,8 +398,8 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
||||
}
|
||||
#endif
|
||||
|
||||
page->SetRawSize(raw_size);
|
||||
page->bucket->slot_size = new_slot_size;
|
||||
slot_span->SetRawSize(raw_size);
|
||||
slot_span->bucket->slot_size = new_slot_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -438,19 +439,19 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
||||
&actual_old_size, ptr);
|
||||
}
|
||||
if (LIKELY(!overridden)) {
|
||||
auto* page = Page::FromPointer(
|
||||
auto* slot_span = SlotSpan::FromPointer(
|
||||
internal::PartitionPointerAdjustSubtract(allow_extras, ptr));
|
||||
bool success = false;
|
||||
{
|
||||
internal::ScopedGuard<thread_safe> guard{lock_};
|
||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||
PA_DCHECK(IsValidPage(page));
|
||||
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||
|
||||
if (UNLIKELY(page->bucket->is_direct_mapped())) {
|
||||
if (UNLIKELY(slot_span->bucket->is_direct_mapped())) {
|
||||
// We may be able to perform the realloc in place by changing the
|
||||
// accessibility of memory pages and, if reducing the size, decommitting
|
||||
// them.
|
||||
success = ReallocDirectMappedInPlace(page, new_size);
|
||||
success = ReallocDirectMappedInPlace(slot_span, new_size);
|
||||
}
|
||||
}
|
||||
if (success) {
|
||||
@ -471,10 +472,10 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
||||
// Trying to allocate a block of size |new_size| would give us a block of
|
||||
// the same size as the one we've already got, so re-use the allocation
|
||||
// after updating statistics (and cookies, if present).
|
||||
if (page->CanStoreRawSize()) {
|
||||
if (slot_span->CanStoreRawSize()) {
|
||||
size_t new_raw_size =
|
||||
internal::PartitionSizeAdjustAdd(allow_extras, new_size);
|
||||
page->SetRawSize(new_raw_size);
|
||||
slot_span->SetRawSize(new_raw_size);
|
||||
#if DCHECK_IS_ON()
|
||||
// Write a new trailing cookie only when it is possible to keep track
|
||||
// raw size (otherwise we wouldn't know where to look for it later).
|
||||
@ -508,23 +509,24 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
bool discard) {
|
||||
const internal::PartitionBucket<thread_safe>* bucket = page->bucket;
|
||||
static size_t PartitionPurgeSlotSpan(
|
||||
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||
bool discard) {
|
||||
const internal::PartitionBucket<thread_safe>* bucket = slot_span->bucket;
|
||||
size_t slot_size = bucket->slot_size;
|
||||
if (slot_size < SystemPageSize() || !page->num_allocated_slots)
|
||||
if (slot_size < SystemPageSize() || !slot_span->num_allocated_slots)
|
||||
return 0;
|
||||
|
||||
size_t bucket_num_slots = bucket->get_slots_per_span();
|
||||
size_t discardable_bytes = 0;
|
||||
|
||||
if (page->CanStoreRawSize()) {
|
||||
if (slot_span->CanStoreRawSize()) {
|
||||
uint32_t used_bytes =
|
||||
static_cast<uint32_t>(RoundUpToSystemPage(page->GetRawSize()));
|
||||
static_cast<uint32_t>(RoundUpToSystemPage(slot_span->GetRawSize()));
|
||||
discardable_bytes = bucket->slot_size - used_bytes;
|
||||
if (discardable_bytes && discard) {
|
||||
char* ptr = reinterpret_cast<char*>(
|
||||
internal::PartitionPage<thread_safe>::ToPointer(page));
|
||||
internal::SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||
ptr += used_bytes;
|
||||
DiscardSystemPages(ptr, discardable_bytes);
|
||||
}
|
||||
@ -545,8 +547,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
SystemPageSize());
|
||||
#endif
|
||||
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
|
||||
PA_DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
|
||||
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
|
||||
PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
|
||||
size_t num_slots = bucket_num_slots - slot_span->num_unprovisioned_slots;
|
||||
char slot_usage[kMaxSlotCount];
|
||||
#if !defined(OS_WIN)
|
||||
// The last freelist entry should not be discarded when using OS_WIN.
|
||||
@ -555,10 +557,11 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
#endif
|
||||
memset(slot_usage, 1, num_slots);
|
||||
char* ptr = reinterpret_cast<char*>(
|
||||
internal::PartitionPage<thread_safe>::ToPointer(page));
|
||||
// First, walk the freelist for this page and make a bitmap of which slots
|
||||
// are not in use.
|
||||
for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
|
||||
internal::SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||
// First, walk the freelist for this slot span and make a bitmap of which
|
||||
// slots are not in use.
|
||||
for (internal::PartitionFreelistEntry* entry = slot_span->freelist_head;
|
||||
entry;
|
||||
/**/) {
|
||||
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
|
||||
PA_DCHECK(slot_index < num_slots);
|
||||
@ -603,7 +606,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
if (unprovisioned_bytes && discard) {
|
||||
PA_DCHECK(truncated_slots > 0);
|
||||
size_t num_new_entries = 0;
|
||||
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
|
||||
slot_span->num_unprovisioned_slots +=
|
||||
static_cast<uint16_t>(truncated_slots);
|
||||
|
||||
// Rewrite the freelist.
|
||||
internal::PartitionFreelistEntry* head = nullptr;
|
||||
@ -627,11 +631,11 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
#endif
|
||||
}
|
||||
|
||||
page->freelist_head = head;
|
||||
slot_span->freelist_head = head;
|
||||
if (back)
|
||||
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
|
||||
|
||||
PA_DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
|
||||
PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
|
||||
// Discard the memory.
|
||||
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
|
||||
}
|
||||
@ -671,13 +675,15 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
||||
template <bool thread_safe>
|
||||
static void PartitionPurgeBucket(
|
||||
internal::PartitionBucket<thread_safe>* bucket) {
|
||||
if (bucket->active_pages_head !=
|
||||
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
|
||||
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
|
||||
page; page = page->next_page) {
|
||||
PA_DCHECK(page !=
|
||||
internal::PartitionPage<thread_safe>::get_sentinel_page());
|
||||
PartitionPurgePage(page, true);
|
||||
if (bucket->active_slot_spans_head !=
|
||||
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
|
||||
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||
bucket->active_slot_spans_head;
|
||||
slot_span; slot_span = slot_span->next_slot_span) {
|
||||
PA_DCHECK(
|
||||
slot_span !=
|
||||
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
PartitionPurgeSlotSpan(slot_span, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -686,8 +692,8 @@ template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
||||
{
|
||||
ScopedGuard guard{lock_};
|
||||
if (flags & PartitionPurgeDecommitEmptyPages)
|
||||
DecommitEmptyPages();
|
||||
if (flags & PartitionPurgeDecommitEmptySlotSpans)
|
||||
DecommitEmptySlotSpans();
|
||||
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
|
||||
for (size_t i = 0; i < kNumBuckets; ++i) {
|
||||
Bucket* bucket = &buckets[i];
|
||||
@ -702,36 +708,37 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
|
||||
internal::PartitionPage<thread_safe>* page) {
|
||||
uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
|
||||
static void PartitionDumpSlotSpanStats(
|
||||
PartitionBucketMemoryStats* stats_out,
|
||||
internal::SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
|
||||
|
||||
if (page->is_decommitted()) {
|
||||
++stats_out->num_decommitted_pages;
|
||||
if (slot_span->is_decommitted()) {
|
||||
++stats_out->num_decommitted_slot_spans;
|
||||
return;
|
||||
}
|
||||
|
||||
stats_out->discardable_bytes += PartitionPurgePage(page, false);
|
||||
stats_out->discardable_bytes += PartitionPurgeSlotSpan(slot_span, false);
|
||||
|
||||
if (page->CanStoreRawSize()) {
|
||||
stats_out->active_bytes += static_cast<uint32_t>(page->GetRawSize());
|
||||
if (slot_span->CanStoreRawSize()) {
|
||||
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
|
||||
} else {
|
||||
stats_out->active_bytes +=
|
||||
(page->num_allocated_slots * stats_out->bucket_slot_size);
|
||||
(slot_span->num_allocated_slots * stats_out->bucket_slot_size);
|
||||
}
|
||||
|
||||
size_t page_bytes_resident =
|
||||
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
|
||||
stats_out->bucket_slot_size);
|
||||
stats_out->resident_bytes += page_bytes_resident;
|
||||
if (page->is_empty()) {
|
||||
stats_out->decommittable_bytes += page_bytes_resident;
|
||||
++stats_out->num_empty_pages;
|
||||
} else if (page->is_full()) {
|
||||
++stats_out->num_full_pages;
|
||||
size_t slot_span_bytes_resident = RoundUpToSystemPage(
|
||||
(bucket_num_slots - slot_span->num_unprovisioned_slots) *
|
||||
stats_out->bucket_slot_size);
|
||||
stats_out->resident_bytes += slot_span_bytes_resident;
|
||||
if (slot_span->is_empty()) {
|
||||
stats_out->decommittable_bytes += slot_span_bytes_resident;
|
||||
++stats_out->num_empty_slot_spans;
|
||||
} else if (slot_span->is_full()) {
|
||||
++stats_out->num_full_slot_spans;
|
||||
} else {
|
||||
PA_DCHECK(page->is_active());
|
||||
++stats_out->num_active_pages;
|
||||
PA_DCHECK(slot_span->is_active());
|
||||
++stats_out->num_active_slot_spans;
|
||||
}
|
||||
}
|
||||
|
||||
@ -741,46 +748,51 @@ static void PartitionDumpBucketStats(
|
||||
const internal::PartitionBucket<thread_safe>* bucket) {
|
||||
PA_DCHECK(!bucket->is_direct_mapped());
|
||||
stats_out->is_valid = false;
|
||||
// If the active page list is empty (==
|
||||
// internal::PartitionPage::get_sentinel_page()), the bucket might still need
|
||||
// to be reported if it has a list of empty, decommitted or full pages.
|
||||
if (bucket->active_pages_head ==
|
||||
internal::PartitionPage<thread_safe>::get_sentinel_page() &&
|
||||
!bucket->empty_pages_head && !bucket->decommitted_pages_head &&
|
||||
!bucket->num_full_pages)
|
||||
// If the active slot span list is empty (==
|
||||
// internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
|
||||
// still need to be reported if it has a list of empty, decommitted or full
|
||||
// slot spans.
|
||||
if (bucket->active_slot_spans_head ==
|
||||
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() &&
|
||||
!bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
|
||||
!bucket->num_full_slot_spans)
|
||||
return;
|
||||
|
||||
memset(stats_out, '\0', sizeof(*stats_out));
|
||||
stats_out->is_valid = true;
|
||||
stats_out->is_direct_map = false;
|
||||
stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
|
||||
stats_out->num_full_slot_spans =
|
||||
static_cast<size_t>(bucket->num_full_slot_spans);
|
||||
stats_out->bucket_slot_size = bucket->slot_size;
|
||||
uint16_t bucket_num_slots = bucket->get_slots_per_span();
|
||||
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
|
||||
stats_out->allocated_page_size = bucket->get_bytes_per_span();
|
||||
stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
|
||||
stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
|
||||
stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
|
||||
stats_out->resident_bytes =
|
||||
bucket->num_full_pages * stats_out->allocated_page_size;
|
||||
bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
|
||||
|
||||
for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head;
|
||||
page; page = page->next_page) {
|
||||
PA_DCHECK(page->is_empty() || page->is_decommitted());
|
||||
PartitionDumpPageStats(stats_out, page);
|
||||
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||
bucket->empty_slot_spans_head;
|
||||
slot_span; slot_span = slot_span->next_slot_span) {
|
||||
PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
|
||||
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||
}
|
||||
for (internal::PartitionPage<thread_safe>* page =
|
||||
bucket->decommitted_pages_head;
|
||||
page; page = page->next_page) {
|
||||
PA_DCHECK(page->is_decommitted());
|
||||
PartitionDumpPageStats(stats_out, page);
|
||||
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||
bucket->decommitted_slot_spans_head;
|
||||
slot_span; slot_span = slot_span->next_slot_span) {
|
||||
PA_DCHECK(slot_span->is_decommitted());
|
||||
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||
}
|
||||
|
||||
if (bucket->active_pages_head !=
|
||||
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
|
||||
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
|
||||
page; page = page->next_page) {
|
||||
PA_DCHECK(page !=
|
||||
internal::PartitionPage<thread_safe>::get_sentinel_page());
|
||||
PartitionDumpPageStats(stats_out, page);
|
||||
if (bucket->active_slot_spans_head !=
|
||||
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
|
||||
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||
bucket->active_slot_spans_head;
|
||||
slot_span; slot_span = slot_span->next_slot_span) {
|
||||
PA_DCHECK(
|
||||
slot_span !=
|
||||
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -818,7 +830,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
||||
// Don't report the pseudo buckets that the generic allocator sets up in
|
||||
// order to preserve a fast size->bucket map (see
|
||||
// PartitionRoot::Init() for details).
|
||||
if (!bucket->active_pages_head)
|
||||
if (!bucket->active_slot_spans_head)
|
||||
bucket_stats[i].is_valid = false;
|
||||
else
|
||||
PartitionDumpBucketStats(&bucket_stats[i], bucket);
|
||||
@ -867,8 +879,8 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
||||
PartitionBucketMemoryStats mapped_stats = {};
|
||||
mapped_stats.is_valid = true;
|
||||
mapped_stats.is_direct_map = true;
|
||||
mapped_stats.num_full_pages = 1;
|
||||
mapped_stats.allocated_page_size = size;
|
||||
mapped_stats.num_full_slot_spans = 1;
|
||||
mapped_stats.allocated_slot_span_size = size;
|
||||
mapped_stats.bucket_slot_size = size;
|
||||
mapped_stats.active_bytes = size;
|
||||
mapped_stats.resident_bytes = size;
|
||||
@ -937,17 +949,19 @@ BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr) {
|
||||
// The only allocations that don't use tag are allocated outside of GigaCage,
|
||||
// hence we'd never get here in the use_tag=false case.
|
||||
ptr = internal::PartitionPointerAdjustSubtract(true /* use_tag */, ptr);
|
||||
auto* page =
|
||||
internal::PartitionAllocGetPageForSize<internal::ThreadSafe>(ptr);
|
||||
PA_DCHECK(PartitionRoot<internal::ThreadSafe>::FromPage(page)->allow_extras);
|
||||
auto* slot_span =
|
||||
internal::PartitionAllocGetSlotSpanForSizeQuery<internal::ThreadSafe>(
|
||||
ptr);
|
||||
PA_DCHECK(PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span)
|
||||
->allow_extras);
|
||||
|
||||
// Get the offset from the beginning of the slot span.
|
||||
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
|
||||
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
|
||||
internal::PartitionPage<internal::ThreadSafe>::ToPointer(page));
|
||||
internal::SlotSpanMetadata<internal::ThreadSafe>::ToPointer(slot_span));
|
||||
size_t offset_in_slot_span = ptr_addr - slot_span_start;
|
||||
|
||||
return page->bucket->GetSlotOffset(offset_in_slot_span);
|
||||
return slot_span->bucket->GetSlotOffset(offset_in_slot_span);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -226,12 +226,12 @@ static OomFunction g_oom_handling_function = nullptr;
|
||||
class PartitionStatsDumper;
|
||||
|
||||
enum PartitionPurgeFlags {
|
||||
// Decommitting the ring list of empty pages is reasonably fast.
|
||||
PartitionPurgeDecommitEmptyPages = 1 << 0,
|
||||
// Decommitting the ring list of empty slot spans is reasonably fast.
|
||||
PartitionPurgeDecommitEmptySlotSpans = 1 << 0,
|
||||
// Discarding unused system pages is slower, because it involves walking all
|
||||
// freelists in all active partition pages of all buckets >= system page
|
||||
// freelists in all active slot spans of all buckets >= system page
|
||||
// size. It often frees a similar amount of memory to decommitting the empty
|
||||
// pages, though.
|
||||
// slot spans, though.
|
||||
PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
|
||||
};
|
||||
|
||||
@ -256,20 +256,21 @@ struct PartitionBucketMemoryStats {
|
||||
bool is_valid; // Used to check if the stats is valid.
|
||||
bool is_direct_map; // True if this is a direct mapping; size will not be
|
||||
// unique.
|
||||
uint32_t bucket_slot_size; // The size of the slot in bytes.
|
||||
uint32_t allocated_page_size; // Total size the partition page allocated from
|
||||
// the system.
|
||||
uint32_t active_bytes; // Total active bytes used in the bucket.
|
||||
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
|
||||
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
|
||||
uint32_t discardable_bytes; // Total bytes that could be discarded.
|
||||
uint32_t num_full_pages; // Number of pages with all slots allocated.
|
||||
uint32_t num_active_pages; // Number of pages that have at least one
|
||||
// provisioned slot.
|
||||
uint32_t num_empty_pages; // Number of pages that are empty
|
||||
// but not decommitted.
|
||||
uint32_t num_decommitted_pages; // Number of pages that are empty
|
||||
// and decommitted.
|
||||
uint32_t bucket_slot_size; // The size of the slot in bytes.
|
||||
uint32_t allocated_slot_span_size; // Total size the slot span allocated
|
||||
// from the system (committed pages).
|
||||
uint32_t active_bytes; // Total active bytes used in the bucket.
|
||||
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
|
||||
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
|
||||
uint32_t discardable_bytes; // Total bytes that could be discarded.
|
||||
uint32_t num_full_slot_spans; // Number of slot spans with all slots
|
||||
// allocated.
|
||||
uint32_t num_active_slot_spans; // Number of slot spans that have at least
|
||||
// one provisioned slot.
|
||||
uint32_t num_empty_slot_spans; // Number of slot spans that are empty
|
||||
// but not decommitted.
|
||||
uint32_t num_decommitted_slot_spans; // Number of slot spans that are empty
|
||||
// and decommitted.
|
||||
};
|
||||
|
||||
// Interface that is passed to PartitionDumpStats and
|
||||
@ -380,42 +381,43 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
|
||||
bool* is_already_zeroed) {
|
||||
*is_already_zeroed = false;
|
||||
|
||||
Page* page = bucket->active_pages_head;
|
||||
// Check that this page is neither full nor freed.
|
||||
PA_DCHECK(page);
|
||||
PA_DCHECK(page->num_allocated_slots >= 0);
|
||||
SlotSpan* slot_span = bucket->active_slot_spans_head;
|
||||
// Check that this slot span is neither full nor freed.
|
||||
PA_DCHECK(slot_span);
|
||||
PA_DCHECK(slot_span->num_allocated_slots >= 0);
|
||||
*utilized_slot_size = bucket->slot_size;
|
||||
|
||||
void* ret = page->freelist_head;
|
||||
void* ret = slot_span->freelist_head;
|
||||
if (LIKELY(ret)) {
|
||||
// If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
|
||||
// if we can afford to make these CHECKs.
|
||||
PA_DCHECK(IsValidPage(page));
|
||||
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||
|
||||
// All large allocations must go through the slow path to correctly update
|
||||
// the size metadata.
|
||||
PA_DCHECK(!page->CanStoreRawSize());
|
||||
PA_DCHECK(!slot_span->CanStoreRawSize());
|
||||
internal::PartitionFreelistEntry* new_head =
|
||||
internal::EncodedPartitionFreelistEntry::Decode(
|
||||
page->freelist_head->next);
|
||||
page->freelist_head = new_head;
|
||||
page->num_allocated_slots++;
|
||||
slot_span->freelist_head->next);
|
||||
slot_span->freelist_head = new_head;
|
||||
slot_span->num_allocated_slots++;
|
||||
|
||||
PA_DCHECK(page->bucket == bucket);
|
||||
PA_DCHECK(slot_span->bucket == bucket);
|
||||
} else {
|
||||
ret = bucket->SlowPathAlloc(this, flags, raw_size, is_already_zeroed);
|
||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||
PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
|
||||
PA_DCHECK(!ret || IsValidSlotSpan(SlotSpan::FromPointer(ret)));
|
||||
|
||||
if (UNLIKELY(!ret))
|
||||
return nullptr;
|
||||
|
||||
page = Page::FromPointer(ret);
|
||||
slot_span = SlotSpan::FromPointer(ret);
|
||||
// For direct mapped allocations, |bucket| is the sentinel.
|
||||
PA_DCHECK((page->bucket == bucket) || (page->bucket->is_direct_mapped() &&
|
||||
(bucket == &sentinel_bucket)));
|
||||
PA_DCHECK((slot_span->bucket == bucket) ||
|
||||
(slot_span->bucket->is_direct_mapped() &&
|
||||
(bucket == &sentinel_bucket)));
|
||||
|
||||
*utilized_slot_size = page->GetUtilizedSlotSize();
|
||||
*utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -447,25 +449,26 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
|
||||
return;
|
||||
|
||||
// No check as the pointer hasn't been adjusted yet.
|
||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
||||
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||
PA_DCHECK(IsValidPage(page));
|
||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
||||
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||
auto* root = FromSlotSpan(slot_span);
|
||||
|
||||
// TODO(bikineev): Change the first condition to LIKELY once PCScan is enabled
|
||||
// by default.
|
||||
if (UNLIKELY(root->pcscan) && LIKELY(!page->bucket->is_direct_mapped())) {
|
||||
root->pcscan->MoveToQuarantine(ptr, page);
|
||||
if (UNLIKELY(root->pcscan) &&
|
||||
LIKELY(!slot_span->bucket->is_direct_mapped())) {
|
||||
root->pcscan->MoveToQuarantine(ptr, slot_span);
|
||||
return;
|
||||
}
|
||||
|
||||
root->FreeNoHooksImmediate(ptr, page);
|
||||
root->FreeNoHooksImmediate(ptr, slot_span);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
void* ptr,
|
||||
Page* page) {
|
||||
SlotSpan* slot_span) {
|
||||
// The thread cache is added "in the middle" of the main allocator, that is:
|
||||
// - After all the cookie/tag/ref-count management
|
||||
// - Before the "raw" allocator.
|
||||
@ -476,11 +479,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
// a. Return to the thread cache if possible. If it succeeds, return.
|
||||
// b. Otherwise, call the "raw" allocator <-- Locking
|
||||
PA_DCHECK(ptr);
|
||||
PA_DCHECK(page);
|
||||
PA_DCHECK(IsValidPage(page));
|
||||
PA_DCHECK(slot_span);
|
||||
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
size_t utilized_slot_size = page->GetUtilizedSlotSize();
|
||||
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||
#endif
|
||||
|
||||
if (allow_extras) {
|
||||
@ -511,14 +514,14 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
internal::PartitionCookieCheckValue(end_cookie_ptr);
|
||||
#endif
|
||||
|
||||
if (!page->bucket->is_direct_mapped()) {
|
||||
if (!slot_span->bucket->is_direct_mapped()) {
|
||||
// PartitionTagIncrementValue and PartitionTagClearValue require that the
|
||||
// size is tag_bitmap::kBytesPerPartitionTag-aligned (currently 16
|
||||
// bytes-aligned) when MTECheckedPtr is enabled. However,
|
||||
// utilized_slot_size may not be aligned for single-slot slot spans. So we
|
||||
// need the bucket's slot_size.
|
||||
size_t slot_size_with_no_extras =
|
||||
internal::PartitionSizeAdjustSubtract(true, page->bucket->slot_size);
|
||||
size_t slot_size_with_no_extras = internal::PartitionSizeAdjustSubtract(
|
||||
true, slot_span->bucket->slot_size);
|
||||
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR && MTE_CHECKED_PTR_SET_TAG_AT_FREE
|
||||
internal::PartitionTagIncrementValue(ptr, slot_size_with_no_extras);
|
||||
#else
|
||||
@ -556,24 +559,26 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
//
|
||||
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
|
||||
// make it statically known to the compiler.
|
||||
if (thread_safe && with_thread_cache && !page->bucket->is_direct_mapped()) {
|
||||
PA_DCHECK(page->bucket >= this->buckets &&
|
||||
page->bucket <= &this->sentinel_bucket);
|
||||
size_t bucket_index = page->bucket - this->buckets;
|
||||
if (thread_safe && with_thread_cache &&
|
||||
!slot_span->bucket->is_direct_mapped()) {
|
||||
PA_DCHECK(slot_span->bucket >= this->buckets &&
|
||||
slot_span->bucket <= &this->sentinel_bucket);
|
||||
size_t bucket_index = slot_span->bucket - this->buckets;
|
||||
auto* thread_cache = internal::ThreadCache::Get();
|
||||
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
|
||||
return;
|
||||
}
|
||||
|
||||
RawFree(ptr, page);
|
||||
RawFree(ptr, slot_span);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) {
|
||||
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr,
|
||||
SlotSpan* slot_span) {
|
||||
internal::DeferredUnmap deferred_unmap;
|
||||
{
|
||||
ScopedGuard guard{lock_};
|
||||
deferred_unmap = page->Free(ptr);
|
||||
deferred_unmap = slot_span->Free(ptr);
|
||||
}
|
||||
deferred_unmap.Run();
|
||||
}
|
||||
@ -581,23 +586,24 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) {
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::RawFreeStatic(void* ptr) {
|
||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
||||
root->RawFree(ptr, page);
|
||||
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||
auto* root = FromSlotSpan(slot_span);
|
||||
root->RawFree(ptr, slot_span);
|
||||
}
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) {
|
||||
PartitionRoot* root = FromPage(page);
|
||||
ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
|
||||
SlotSpan* slot_span) {
|
||||
PartitionRoot* root = FromSlotSpan(slot_span);
|
||||
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionRoot<thread_safe>* PartitionRoot<thread_safe>::FromPage(
|
||||
Page* page) {
|
||||
ALWAYS_INLINE PartitionRoot<thread_safe>*
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
|
||||
auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
|
||||
reinterpret_cast<uintptr_t>(page) & SystemPageBaseMask());
|
||||
reinterpret_cast<uintptr_t>(slot_span) & SystemPageBaseMask());
|
||||
return extent_entry->root;
|
||||
}
|
||||
|
||||
@ -637,20 +643,20 @@ BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
|
||||
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
|
||||
|
||||
namespace internal {
|
||||
// Gets the PartitionPage object for the first partition page of the slot span
|
||||
// that contains |ptr|. It's used with intention to do obtain the slot size.
|
||||
// CAUTION! It works well for normal buckets, but for direct-mapped allocations
|
||||
// it'll only work if |ptr| is in the first partition page of the allocation.
|
||||
// Gets the SlotSpanMetadata object of the slot span that contains |ptr|. It's
|
||||
// used with intention to do obtain the slot size. CAUTION! It works well for
|
||||
// normal buckets, but for direct-mapped allocations it'll only work if |ptr| is
|
||||
// in the first partition page of the allocation.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE internal::PartitionPage<thread_safe>*
|
||||
PartitionAllocGetPageForSize(void* ptr) {
|
||||
ALWAYS_INLINE internal::SlotSpanMetadata<thread_safe>*
|
||||
PartitionAllocGetSlotSpanForSizeQuery(void* ptr) {
|
||||
// No need to lock here. Only |ptr| being freed by another thread could
|
||||
// cause trouble, and the caller is responsible for that not happening.
|
||||
auto* page =
|
||||
internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
|
||||
auto* slot_span =
|
||||
internal::SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(ptr);
|
||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||
PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
|
||||
return page;
|
||||
PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
|
||||
return slot_span;
|
||||
}
|
||||
} // namespace internal
|
||||
|
||||
@ -662,10 +668,10 @@ PartitionAllocGetPageForSize(void* ptr) {
|
||||
// Used as malloc_usable_size.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
|
||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
||||
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||
auto* root = FromSlotSpan(slot_span);
|
||||
|
||||
size_t size = page->GetUtilizedSlotSize();
|
||||
size_t size = slot_span->GetUtilizedSlotSize();
|
||||
// Adjust back by subtracing extras (if any).
|
||||
size = internal::PartitionSizeAdjustSubtract(root->allow_extras, size);
|
||||
return size;
|
||||
@ -677,9 +683,10 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetSize(void* ptr) const {
|
||||
ptr = internal::PartitionPointerAdjustSubtract(allow_extras, ptr);
|
||||
auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
|
||||
size_t size = internal::PartitionSizeAdjustSubtract(allow_extras,
|
||||
page->bucket->slot_size);
|
||||
auto* slot_span =
|
||||
internal::PartitionAllocGetSlotSpanForSizeQuery<thread_safe>(ptr);
|
||||
size_t size = internal::PartitionSizeAdjustSubtract(
|
||||
allow_extras, slot_span->bucket->slot_size);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -815,12 +822,12 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
|
||||
// Make sure that the allocated pointer comes from the same place it would
|
||||
// for a non-thread cache allocation.
|
||||
if (ret) {
|
||||
Page* page = Page::FromPointerNoAlignmentCheck(ret);
|
||||
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ret);
|
||||
// All large allocations must go through the RawAlloc path to correctly
|
||||
// set |utilized_slot_size|.
|
||||
PA_DCHECK(!page->CanStoreRawSize());
|
||||
PA_DCHECK(IsValidPage(page));
|
||||
PA_DCHECK(page->bucket == &buckets[bucket_index]);
|
||||
PA_DCHECK(!slot_span->CanStoreRawSize());
|
||||
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||
PA_DCHECK(slot_span->bucket == &buckets[bucket_index]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ namespace base {
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
struct PartitionPage;
|
||||
struct SlotSpanMetadata;
|
||||
|
||||
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,7 +26,7 @@ namespace internal {
|
||||
namespace {
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
|
||||
size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size);
|
||||
@ -86,22 +86,23 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
||||
&metadata->page);
|
||||
|
||||
auto* page = &metadata->page;
|
||||
PA_DCHECK(!page->next_page);
|
||||
PA_DCHECK(!page->num_allocated_slots);
|
||||
PA_DCHECK(!page->num_unprovisioned_slots);
|
||||
PA_DCHECK(!page->page_offset);
|
||||
PA_DCHECK(!page->empty_cache_index);
|
||||
page->bucket = &metadata->bucket;
|
||||
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
|
||||
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||
PA_DCHECK(!page->slot_span_metadata.next_slot_span);
|
||||
PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
|
||||
PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
|
||||
PA_DCHECK(!page->slot_span_metadata.empty_cache_index);
|
||||
page->slot_span_metadata.bucket = &metadata->bucket;
|
||||
page->slot_span_metadata.freelist_head =
|
||||
reinterpret_cast<PartitionFreelistEntry*>(slot);
|
||||
|
||||
auto* next_entry = reinterpret_cast<PartitionFreelistEntry*>(slot);
|
||||
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
|
||||
|
||||
PA_DCHECK(!metadata->bucket.active_pages_head);
|
||||
PA_DCHECK(!metadata->bucket.empty_pages_head);
|
||||
PA_DCHECK(!metadata->bucket.decommitted_pages_head);
|
||||
PA_DCHECK(!metadata->bucket.active_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
|
||||
PA_DCHECK(!metadata->bucket.num_full_pages);
|
||||
PA_DCHECK(!metadata->bucket.num_full_slot_spans);
|
||||
metadata->bucket.slot_size = size;
|
||||
|
||||
auto* map_extent = &metadata->direct_map_extent;
|
||||
@ -115,7 +116,7 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
||||
map_extent->prev_extent = nullptr;
|
||||
root->direct_map_list = map_extent;
|
||||
|
||||
return page;
|
||||
return reinterpret_cast<SlotSpanMetadata<thread_safe>*>(page);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -188,10 +189,11 @@ template <bool thread_safe>
|
||||
void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
|
||||
slot_size = new_slot_size;
|
||||
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
|
||||
active_pages_head = PartitionPage<thread_safe>::get_sentinel_page();
|
||||
empty_pages_head = nullptr;
|
||||
decommitted_pages_head = nullptr;
|
||||
num_full_pages = 0;
|
||||
active_slot_spans_head =
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||
empty_slot_spans_head = nullptr;
|
||||
decommitted_slot_spans_head = nullptr;
|
||||
num_full_slot_spans = 0;
|
||||
num_system_pages_per_slot_span = get_system_pages_per_slot_span();
|
||||
}
|
||||
|
||||
@ -405,41 +407,40 @@ ALWAYS_INLINE uint16_t PartitionBucket<thread_safe>::get_pages_per_slot_span() {
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
|
||||
PartitionPage<thread_safe>* page) {
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
// The bucket never changes. We set it up once.
|
||||
page->bucket = this;
|
||||
page->empty_cache_index = -1;
|
||||
slot_span->bucket = this;
|
||||
slot_span->empty_cache_index = -1;
|
||||
|
||||
page->Reset();
|
||||
slot_span->Reset();
|
||||
|
||||
uint16_t num_partition_pages = get_pages_per_slot_span();
|
||||
char* page_char_ptr = reinterpret_cast<char*>(page);
|
||||
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
|
||||
for (uint16_t i = 1; i < num_partition_pages; ++i) {
|
||||
page_char_ptr += kPageMetadataSize;
|
||||
auto* secondary_page =
|
||||
reinterpret_cast<PartitionPage<thread_safe>*>(page_char_ptr);
|
||||
secondary_page->page_offset = i;
|
||||
auto* secondary_page = page + i;
|
||||
secondary_page->slot_span_metadata_offset = i;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
||||
PartitionPage<thread_safe>* page) {
|
||||
PA_DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
|
||||
uint16_t num_slots = page->num_unprovisioned_slots;
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span !=
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
uint16_t num_slots = slot_span->num_unprovisioned_slots;
|
||||
PA_DCHECK(num_slots);
|
||||
// We should only get here when _every_ slot is either used or unprovisioned.
|
||||
// (The third state is "on the freelist". If we have a non-empty freelist, we
|
||||
// should not get here.)
|
||||
PA_DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
|
||||
PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
|
||||
// Similarly, make explicitly sure that the freelist is empty.
|
||||
PA_DCHECK(!page->freelist_head);
|
||||
PA_DCHECK(page->num_allocated_slots >= 0);
|
||||
PA_DCHECK(!slot_span->freelist_head);
|
||||
PA_DCHECK(slot_span->num_allocated_slots >= 0);
|
||||
|
||||
size_t size = slot_size;
|
||||
char* base =
|
||||
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
|
||||
char* return_object = base + (size * page->num_allocated_slots);
|
||||
char* base = reinterpret_cast<char*>(
|
||||
SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||
char* return_object = base + (size * slot_span->num_allocated_slots);
|
||||
char* first_freelist_pointer = return_object + size;
|
||||
char* first_freelist_pointer_extent =
|
||||
first_freelist_pointer + sizeof(PartitionFreelistEntry*);
|
||||
@ -470,13 +471,13 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
||||
// sub page boundaries frequently for large bucket sizes.
|
||||
PA_DCHECK(num_new_freelist_entries + 1 <= num_slots);
|
||||
num_slots -= (num_new_freelist_entries + 1);
|
||||
page->num_unprovisioned_slots = num_slots;
|
||||
page->num_allocated_slots++;
|
||||
slot_span->num_unprovisioned_slots = num_slots;
|
||||
slot_span->num_allocated_slots++;
|
||||
|
||||
if (LIKELY(num_new_freelist_entries)) {
|
||||
char* freelist_pointer = first_freelist_pointer;
|
||||
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
|
||||
page->freelist_head = entry;
|
||||
slot_span->freelist_head = entry;
|
||||
while (--num_new_freelist_entries) {
|
||||
freelist_pointer += size;
|
||||
auto* next_entry =
|
||||
@ -486,56 +487,57 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
||||
}
|
||||
entry->next = PartitionFreelistEntry::Encode(nullptr);
|
||||
} else {
|
||||
page->freelist_head = nullptr;
|
||||
slot_span->freelist_head = nullptr;
|
||||
}
|
||||
return return_object;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
bool PartitionBucket<thread_safe>::SetNewActivePage() {
|
||||
PartitionPage<thread_safe>* page = active_pages_head;
|
||||
if (page == PartitionPage<thread_safe>::get_sentinel_page())
|
||||
bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
|
||||
SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
|
||||
if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
|
||||
return false;
|
||||
|
||||
PartitionPage<thread_safe>* next_page;
|
||||
SlotSpanMetadata<thread_safe>* next_slot_span;
|
||||
|
||||
for (; page; page = next_page) {
|
||||
next_page = page->next_page;
|
||||
PA_DCHECK(page->bucket == this);
|
||||
PA_DCHECK(page != empty_pages_head);
|
||||
PA_DCHECK(page != decommitted_pages_head);
|
||||
for (; slot_span; slot_span = next_slot_span) {
|
||||
next_slot_span = slot_span->next_slot_span;
|
||||
PA_DCHECK(slot_span->bucket == this);
|
||||
PA_DCHECK(slot_span != empty_slot_spans_head);
|
||||
PA_DCHECK(slot_span != decommitted_slot_spans_head);
|
||||
|
||||
if (LIKELY(page->is_active())) {
|
||||
// This page is usable because it has freelist entries, or has
|
||||
if (LIKELY(slot_span->is_active())) {
|
||||
// This slot span is usable because it has freelist entries, or has
|
||||
// unprovisioned slots we can create freelist entries from.
|
||||
active_pages_head = page;
|
||||
active_slot_spans_head = slot_span;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Deal with empty and decommitted pages.
|
||||
if (LIKELY(page->is_empty())) {
|
||||
page->next_page = empty_pages_head;
|
||||
empty_pages_head = page;
|
||||
} else if (LIKELY(page->is_decommitted())) {
|
||||
page->next_page = decommitted_pages_head;
|
||||
decommitted_pages_head = page;
|
||||
// Deal with empty and decommitted slot spans.
|
||||
if (LIKELY(slot_span->is_empty())) {
|
||||
slot_span->next_slot_span = empty_slot_spans_head;
|
||||
empty_slot_spans_head = slot_span;
|
||||
} else if (LIKELY(slot_span->is_decommitted())) {
|
||||
slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||
decommitted_slot_spans_head = slot_span;
|
||||
} else {
|
||||
PA_DCHECK(page->is_full());
|
||||
// If we get here, we found a full page. Skip over it too, and also
|
||||
PA_DCHECK(slot_span->is_full());
|
||||
// If we get here, we found a full slot span. Skip over it too, and also
|
||||
// tag it as full (via a negative value). We need it tagged so that
|
||||
// free'ing can tell, and move it back into the active page list.
|
||||
page->num_allocated_slots = -page->num_allocated_slots;
|
||||
++num_full_pages;
|
||||
// num_full_pages is a uint16_t for efficient packing so guard against
|
||||
// overflow to be safe.
|
||||
if (UNLIKELY(!num_full_pages))
|
||||
// free'ing can tell, and move it back into the active list.
|
||||
slot_span->num_allocated_slots = -slot_span->num_allocated_slots;
|
||||
++num_full_slot_spans;
|
||||
// num_full_slot_spans is a uint16_t for efficient packing so guard
|
||||
// against overflow to be safe.
|
||||
if (UNLIKELY(!num_full_slot_spans))
|
||||
OnFull();
|
||||
// Not necessary but might help stop accidents.
|
||||
page->next_page = nullptr;
|
||||
slot_span->next_slot_span = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
active_pages_head = PartitionPage<thread_safe>::get_sentinel_page();
|
||||
active_slot_spans_head =
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -546,14 +548,14 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
||||
size_t raw_size,
|
||||
bool* is_already_zeroed) {
|
||||
// The slow path is called when the freelist is empty.
|
||||
PA_DCHECK(!active_pages_head->freelist_head);
|
||||
PA_DCHECK(!active_slot_spans_head->freelist_head);
|
||||
|
||||
PartitionPage<thread_safe>* new_page = nullptr;
|
||||
// |new_page->bucket| will always be |this|, except when |this| is the
|
||||
SlotSpanMetadata<thread_safe>* new_slot_span = nullptr;
|
||||
// |new_slot_span->bucket| will always be |this|, except when |this| is the
|
||||
// sentinel bucket, which is used to signal a direct mapped allocation. In
|
||||
// this case |new_page_bucket| will be set properly later. This avoids a read
|
||||
// for most allocations.
|
||||
PartitionBucket* new_page_bucket = this;
|
||||
// this case |new_bucket| will be set properly later. This avoids a read for
|
||||
// most allocations.
|
||||
PartitionBucket* new_bucket = this;
|
||||
*is_already_zeroed = false;
|
||||
|
||||
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
|
||||
@ -562,15 +564,15 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
||||
// branches.
|
||||
//
|
||||
// Note: The ordering of the conditionals matter! In particular,
|
||||
// SetNewActivePage() has a side-effect even when returning
|
||||
// false where it sweeps the active page list and may move things into
|
||||
// the empty or decommitted lists which affects the subsequent conditional.
|
||||
// SetNewActiveSlotSpan() has a side-effect even when returning
|
||||
// false where it sweeps the active list and may move things into the empty or
|
||||
// decommitted lists which affects the subsequent conditional.
|
||||
bool return_null = flags & PartitionAllocReturnNull;
|
||||
if (UNLIKELY(is_direct_mapped())) {
|
||||
PA_DCHECK(raw_size > kMaxBucketed);
|
||||
PA_DCHECK(this == &root->sentinel_bucket);
|
||||
PA_DCHECK(active_pages_head ==
|
||||
PartitionPage<thread_safe>::get_sentinel_page());
|
||||
PA_DCHECK(active_slot_spans_head ==
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
if (raw_size > MaxDirectMapped()) {
|
||||
if (return_null)
|
||||
return nullptr;
|
||||
@ -599,62 +601,65 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
||||
PartitionExcessiveAllocationSize(raw_size);
|
||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||
}
|
||||
new_page = PartitionDirectMap(root, flags, raw_size);
|
||||
if (new_page)
|
||||
new_page_bucket = new_page->bucket;
|
||||
// New pages from PageAllocator are always zeroed.
|
||||
new_slot_span = PartitionDirectMap(root, flags, raw_size);
|
||||
if (new_slot_span)
|
||||
new_bucket = new_slot_span->bucket;
|
||||
// Memory from PageAllocator is always zeroed.
|
||||
*is_already_zeroed = true;
|
||||
} else if (LIKELY(SetNewActivePage())) {
|
||||
// First, did we find an active page in the active pages list?
|
||||
new_page = active_pages_head;
|
||||
PA_DCHECK(new_page->is_active());
|
||||
} else if (LIKELY(empty_pages_head != nullptr) ||
|
||||
LIKELY(decommitted_pages_head != nullptr)) {
|
||||
// Second, look in our lists of empty and decommitted pages.
|
||||
// Check empty pages first, which are preferred, but beware that an
|
||||
// empty page might have been decommitted.
|
||||
while (LIKELY((new_page = empty_pages_head) != nullptr)) {
|
||||
PA_DCHECK(new_page->bucket == this);
|
||||
PA_DCHECK(new_page->is_empty() || new_page->is_decommitted());
|
||||
empty_pages_head = new_page->next_page;
|
||||
// Accept the empty page unless it got decommitted.
|
||||
if (new_page->freelist_head) {
|
||||
new_page->next_page = nullptr;
|
||||
} else if (LIKELY(SetNewActiveSlotSpan())) {
|
||||
// First, did we find an active slot span in the active list?
|
||||
new_slot_span = active_slot_spans_head;
|
||||
PA_DCHECK(new_slot_span->is_active());
|
||||
} else if (LIKELY(empty_slot_spans_head != nullptr) ||
|
||||
LIKELY(decommitted_slot_spans_head != nullptr)) {
|
||||
// Second, look in our lists of empty and decommitted slot spans.
|
||||
// Check empty slot spans first, which are preferred, but beware that an
|
||||
// empty slot span might have been decommitted.
|
||||
while (LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
|
||||
PA_DCHECK(new_slot_span->bucket == this);
|
||||
PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
|
||||
empty_slot_spans_head = new_slot_span->next_slot_span;
|
||||
// Accept the empty slot span unless it got decommitted.
|
||||
if (new_slot_span->freelist_head) {
|
||||
new_slot_span->next_slot_span = nullptr;
|
||||
break;
|
||||
}
|
||||
PA_DCHECK(new_page->is_decommitted());
|
||||
new_page->next_page = decommitted_pages_head;
|
||||
decommitted_pages_head = new_page;
|
||||
PA_DCHECK(new_slot_span->is_decommitted());
|
||||
new_slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||
decommitted_slot_spans_head = new_slot_span;
|
||||
}
|
||||
if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
|
||||
new_page = decommitted_pages_head;
|
||||
PA_DCHECK(new_page->bucket == this);
|
||||
PA_DCHECK(new_page->is_decommitted());
|
||||
decommitted_pages_head = new_page->next_page;
|
||||
void* addr = PartitionPage<thread_safe>::ToPointer(new_page);
|
||||
root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
|
||||
new_page->Reset();
|
||||
if (UNLIKELY(!new_slot_span) &&
|
||||
LIKELY(decommitted_slot_spans_head != nullptr)) {
|
||||
new_slot_span = decommitted_slot_spans_head;
|
||||
PA_DCHECK(new_slot_span->bucket == this);
|
||||
PA_DCHECK(new_slot_span->is_decommitted());
|
||||
decommitted_slot_spans_head = new_slot_span->next_slot_span;
|
||||
void* addr = SlotSpanMetadata<thread_safe>::ToPointer(new_slot_span);
|
||||
root->RecommitSystemPages(addr,
|
||||
new_slot_span->bucket->get_bytes_per_span());
|
||||
new_slot_span->Reset();
|
||||
*is_already_zeroed = kDecommittedPagesAreAlwaysZeroed;
|
||||
}
|
||||
PA_DCHECK(new_page);
|
||||
PA_DCHECK(new_slot_span);
|
||||
} else {
|
||||
// Third. If we get here, we need a brand new page.
|
||||
// Third. If we get here, we need a brand new slot span.
|
||||
uint16_t num_partition_pages = get_pages_per_slot_span();
|
||||
void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages,
|
||||
get_bytes_per_span());
|
||||
if (LIKELY(raw_pages != nullptr)) {
|
||||
new_page =
|
||||
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(raw_pages);
|
||||
InitializeSlotSpan(new_page);
|
||||
// New pages from PageAllocator are always zeroed.
|
||||
void* raw_memory = AllocNewSlotSpan(root, flags, num_partition_pages,
|
||||
get_bytes_per_span());
|
||||
if (LIKELY(raw_memory != nullptr)) {
|
||||
new_slot_span =
|
||||
SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(
|
||||
raw_memory);
|
||||
InitializeSlotSpan(new_slot_span);
|
||||
// New memory from PageAllocator is always zeroed.
|
||||
*is_already_zeroed = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Bail if we had a memory allocation failure.
|
||||
if (UNLIKELY(!new_page)) {
|
||||
PA_DCHECK(active_pages_head ==
|
||||
PartitionPage<thread_safe>::get_sentinel_page());
|
||||
if (UNLIKELY(!new_slot_span)) {
|
||||
PA_DCHECK(active_slot_spans_head ==
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
if (return_null)
|
||||
return nullptr;
|
||||
// See comment above.
|
||||
@ -663,24 +668,24 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||
}
|
||||
|
||||
PA_DCHECK(new_page_bucket != &root->sentinel_bucket);
|
||||
new_page_bucket->active_pages_head = new_page;
|
||||
if (new_page->CanStoreRawSize())
|
||||
new_page->SetRawSize(raw_size);
|
||||
PA_DCHECK(new_bucket != &root->sentinel_bucket);
|
||||
new_bucket->active_slot_spans_head = new_slot_span;
|
||||
if (new_slot_span->CanStoreRawSize())
|
||||
new_slot_span->SetRawSize(raw_size);
|
||||
|
||||
// If we found an active page with free slots, or an empty page, we have a
|
||||
// usable freelist head.
|
||||
if (LIKELY(new_page->freelist_head != nullptr)) {
|
||||
PartitionFreelistEntry* entry = new_page->freelist_head;
|
||||
// If we found an active slot span with free slots, or an empty slot span, we
|
||||
// have a usable freelist head.
|
||||
if (LIKELY(new_slot_span->freelist_head != nullptr)) {
|
||||
PartitionFreelistEntry* entry = new_slot_span->freelist_head;
|
||||
PartitionFreelistEntry* new_head =
|
||||
EncodedPartitionFreelistEntry::Decode(entry->next);
|
||||
new_page->freelist_head = new_head;
|
||||
new_page->num_allocated_slots++;
|
||||
new_slot_span->freelist_head = new_head;
|
||||
new_slot_span->num_allocated_slots++;
|
||||
return entry;
|
||||
}
|
||||
// Otherwise, we need to build the freelist.
|
||||
PA_DCHECK(new_page->num_unprovisioned_slots);
|
||||
return AllocAndFillFreelist(new_page);
|
||||
PA_DCHECK(new_slot_span->num_unprovisioned_slots);
|
||||
return AllocAndFillFreelist(new_slot_span);
|
||||
}
|
||||
|
||||
template struct PartitionBucket<ThreadSafe>;
|
||||
|
@ -22,13 +22,13 @@ namespace internal {
|
||||
template <bool thread_safe>
|
||||
struct PartitionBucket {
|
||||
// Accessed most in hot path => goes first.
|
||||
PartitionPage<thread_safe>* active_pages_head;
|
||||
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
|
||||
|
||||
PartitionPage<thread_safe>* empty_pages_head;
|
||||
PartitionPage<thread_safe>* decommitted_pages_head;
|
||||
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
|
||||
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
|
||||
uint32_t slot_size;
|
||||
uint32_t num_system_pages_per_slot_span : 8;
|
||||
uint32_t num_full_pages : 24;
|
||||
uint32_t num_full_slot_spans : 24;
|
||||
|
||||
// `slot_size_reciprocal` is used to improve the performance of
|
||||
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
|
||||
@ -57,7 +57,7 @@ struct PartitionBucket {
|
||||
// there is no need to call memset on fresh pages; the OS has already zeroed
|
||||
// them. (See |PartitionRoot::AllocFromBucket|.)
|
||||
//
|
||||
// Note the matching Free() functions are in PartitionPage.
|
||||
// Note the matching Free() functions are in SlotSpanMetadata.
|
||||
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t raw_size,
|
||||
@ -86,17 +86,17 @@ struct PartitionBucket {
|
||||
return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
|
||||
}
|
||||
|
||||
// This helper function scans a bucket's active page list for a suitable new
|
||||
// active page. When it finds a suitable new active page (one that has
|
||||
// free slots and is not empty), it is set as the new active page. If there
|
||||
// is no suitable new active page, the current active page is set to
|
||||
// PartitionPage::get_sentinel_page(). As potential pages are scanned, they
|
||||
// are tidied up according to their state. Empty pages are swept on to the
|
||||
// empty page list, decommitted pages on to the decommitted page list and full
|
||||
// pages are unlinked from any list.
|
||||
// This helper function scans a bucket's active slot span list for a suitable
|
||||
// new active slot span. When it finds a suitable new active slot span (one
|
||||
// that has free slots and is not empty), it is set as the new active slot
|
||||
// span. If there is no suitable new active slot span, the current active slot
|
||||
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
|
||||
// slot spans are scanned, they are tidied up according to their state. Empty
|
||||
// slot spans are swept on to the empty list, decommitted slot spans on to the
|
||||
// decommitted list and full slot spans are unlinked from any list.
|
||||
//
|
||||
// This is where the guts of the bucket maintenance is done!
|
||||
bool SetNewActivePage();
|
||||
bool SetNewActiveSlotSpan();
|
||||
|
||||
// Returns an offset within an allocation slot.
|
||||
ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) {
|
||||
@ -122,17 +122,17 @@ struct PartitionBucket {
|
||||
private:
|
||||
static NOINLINE void OnFull();
|
||||
|
||||
// Returns a natural number of PartitionPages (calculated by
|
||||
// Returns a natural number of partition pages (calculated by
|
||||
// get_system_pages_per_slot_span()) to allocate from the current
|
||||
// SuperPage when the bucket runs out of slots.
|
||||
// super page when the bucket runs out of slots.
|
||||
ALWAYS_INLINE uint16_t get_pages_per_slot_span();
|
||||
|
||||
// Returns the number of system pages in a slot span.
|
||||
//
|
||||
// The calculation attemps to find the best number of System Pages to
|
||||
// The calculation attempts to find the best number of system pages to
|
||||
// allocate for the given slot_size to minimize wasted space. It uses a
|
||||
// heuristic that looks at number of bytes wasted after the last slot and
|
||||
// attempts to account for the PTE usage of each System Page.
|
||||
// attempts to account for the PTE usage of each system page.
|
||||
uint8_t get_system_pages_per_slot_span();
|
||||
|
||||
// Allocates a new slot span with size |num_partition_pages| from the
|
||||
@ -146,16 +146,19 @@ struct PartitionBucket {
|
||||
|
||||
// Each bucket allocates a slot span when it runs out of slots.
|
||||
// A slot span's size is equal to get_pages_per_slot_span() number of
|
||||
// PartitionPages. This function initializes all PartitionPage within the
|
||||
// partition pages. This function initializes all PartitionPage within the
|
||||
// span to point to the first PartitionPage which holds all the metadata
|
||||
// for the span and registers this bucket as the owner of the span. It does
|
||||
// NOT put the slots into the bucket's freelist.
|
||||
ALWAYS_INLINE void InitializeSlotSpan(PartitionPage<thread_safe>* page);
|
||||
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
|
||||
// as the owner of the span. It does NOT put the slots into the bucket's
|
||||
// freelist.
|
||||
ALWAYS_INLINE void InitializeSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span);
|
||||
|
||||
// Allocates one slot from the given |page| and then adds the remainder to
|
||||
// the current bucket. If the |page| was freshly allocated, it must have been
|
||||
// passed through InitializeSlotSpan() first.
|
||||
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page);
|
||||
// Allocates one slot from the given |slot_span| and then adds the remainder
|
||||
// to the current bucket. If the |slot_span| was freshly allocated, it must
|
||||
// have been passed through InitializeSlotSpan() first.
|
||||
ALWAYS_INLINE char* AllocAndFillFreelist(
|
||||
SlotSpanMetadata<thread_safe>* slot_span);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -20,8 +20,8 @@ struct PartitionDirectMapExtent {
|
||||
PartitionBucket<thread_safe>* bucket;
|
||||
size_t map_size; // Mapped size, not including guard pages and meta-data.
|
||||
|
||||
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromPage(
|
||||
PartitionPage<thread_safe>* page);
|
||||
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span);
|
||||
};
|
||||
|
||||
// Metadata page for direct-mapped allocations.
|
||||
@ -39,9 +39,10 @@ struct PartitionDirectMapMetadata {
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
|
||||
PartitionDirectMapExtent<thread_safe>::FromPage(
|
||||
PartitionPage<thread_safe>* page) {
|
||||
PA_DCHECK(page->bucket->is_direct_mapped());
|
||||
PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span->bucket->is_direct_mapped());
|
||||
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
|
||||
// The page passed here is always |page| in |PartitionDirectMapMetadata|
|
||||
// above. To get the metadata structure, need to get the invalid page address.
|
||||
auto* first_invalid_page = page - 1;
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/check.h"
|
||||
#include "base/feature_list.h"
|
||||
@ -22,11 +23,10 @@ namespace {
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE DeferredUnmap
|
||||
PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
||||
PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
|
||||
PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||
root->lock_.AssertAcquired();
|
||||
const PartitionDirectMapExtent<thread_safe>* extent =
|
||||
PartitionDirectMapExtent<thread_safe>::FromPage(page);
|
||||
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
|
||||
size_t unmap_size = extent->map_size;
|
||||
|
||||
// Maintain the doubly-linked list of all direct mappings.
|
||||
@ -45,15 +45,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
||||
// page.
|
||||
unmap_size += PartitionPageSize() + SystemPageSize();
|
||||
|
||||
size_t uncommitted_page_size = page->bucket->slot_size + SystemPageSize();
|
||||
size_t uncommitted_page_size =
|
||||
slot_span->bucket->slot_size + SystemPageSize();
|
||||
root->DecreaseCommittedPages(uncommitted_page_size);
|
||||
PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
|
||||
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
|
||||
|
||||
PA_DCHECK(!(unmap_size & PageAllocationGranularityOffsetMask()));
|
||||
|
||||
char* ptr =
|
||||
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
|
||||
char* ptr = reinterpret_cast<char*>(
|
||||
SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||
// Account for the mapping starting a partition page before the actual
|
||||
// allocation address.
|
||||
ptr -= PartitionPageSize();
|
||||
@ -61,78 +62,82 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionRegisterEmptyPage(
|
||||
PartitionPage<thread_safe>* page) {
|
||||
PA_DCHECK(page->is_empty());
|
||||
PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
|
||||
ALWAYS_INLINE void PartitionRegisterEmptySlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span->is_empty());
|
||||
PartitionRoot<thread_safe>* root =
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||
root->lock_.AssertAcquired();
|
||||
|
||||
// If the page is already registered as empty, give it another life.
|
||||
if (page->empty_cache_index != -1) {
|
||||
PA_DCHECK(page->empty_cache_index >= 0);
|
||||
PA_DCHECK(static_cast<unsigned>(page->empty_cache_index) <
|
||||
// If the slot span is already registered as empty, give it another life.
|
||||
if (slot_span->empty_cache_index != -1) {
|
||||
PA_DCHECK(slot_span->empty_cache_index >= 0);
|
||||
PA_DCHECK(static_cast<unsigned>(slot_span->empty_cache_index) <
|
||||
kMaxFreeableSpans);
|
||||
PA_DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
|
||||
root->global_empty_page_ring[page->empty_cache_index] = nullptr;
|
||||
PA_DCHECK(root->global_empty_slot_span_ring[slot_span->empty_cache_index] ==
|
||||
slot_span);
|
||||
root->global_empty_slot_span_ring[slot_span->empty_cache_index] = nullptr;
|
||||
}
|
||||
|
||||
int16_t current_index = root->global_empty_page_ring_index;
|
||||
PartitionPage<thread_safe>* page_to_decommit =
|
||||
root->global_empty_page_ring[current_index];
|
||||
// The page might well have been re-activated, filled up, etc. before we get
|
||||
// around to looking at it here.
|
||||
if (page_to_decommit)
|
||||
page_to_decommit->DecommitIfPossible(root);
|
||||
int16_t current_index = root->global_empty_slot_span_ring_index;
|
||||
SlotSpanMetadata<thread_safe>* slot_span_to_decommit =
|
||||
root->global_empty_slot_span_ring[current_index];
|
||||
// The slot span might well have been re-activated, filled up, etc. before we
|
||||
// get around to looking at it here.
|
||||
if (slot_span_to_decommit)
|
||||
slot_span_to_decommit->DecommitIfPossible(root);
|
||||
|
||||
// We put the empty slot span on our global list of "pages that were once
|
||||
// We put the empty slot span on our global list of "slot spans that were once
|
||||
// empty". thus providing it a bit of breathing room to get re-used before
|
||||
// we really free it. This improves performance, particularly on Mac OS X
|
||||
// which has subpar memory management performance.
|
||||
root->global_empty_page_ring[current_index] = page;
|
||||
page->empty_cache_index = current_index;
|
||||
root->global_empty_slot_span_ring[current_index] = slot_span;
|
||||
slot_span->empty_cache_index = current_index;
|
||||
++current_index;
|
||||
if (current_index == kMaxFreeableSpans)
|
||||
current_index = 0;
|
||||
root->global_empty_page_ring_index = current_index;
|
||||
root->global_empty_slot_span_ring_index = current_index;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
PartitionPage<thread_safe> PartitionPage<thread_safe>::sentinel_page_;
|
||||
SlotSpanMetadata<thread_safe>
|
||||
SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() {
|
||||
return &sentinel_page_;
|
||||
SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
|
||||
return &sentinel_slot_span_;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
||||
DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() {
|
||||
#if DCHECK_IS_ON()
|
||||
auto* root = PartitionRoot<thread_safe>::FromPage(this);
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||
root->lock_.AssertAcquired();
|
||||
#endif
|
||||
PA_DCHECK(this != get_sentinel_page());
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
if (LIKELY(num_allocated_slots == 0)) {
|
||||
// Page became fully unused.
|
||||
// Slot span became fully unused.
|
||||
if (UNLIKELY(bucket->is_direct_mapped())) {
|
||||
return PartitionDirectUnmap(this);
|
||||
}
|
||||
// If it's the current active page, change it. We bounce the page to
|
||||
// the empty list as a force towards defragmentation.
|
||||
if (LIKELY(this == bucket->active_pages_head))
|
||||
bucket->SetNewActivePage();
|
||||
PA_DCHECK(bucket->active_pages_head != this);
|
||||
// If it's the current active slot span, change it. We bounce the slot span
|
||||
// to the empty list as a force towards defragmentation.
|
||||
if (LIKELY(this == bucket->active_slot_spans_head))
|
||||
bucket->SetNewActiveSlotSpan();
|
||||
PA_DCHECK(bucket->active_slot_spans_head != this);
|
||||
|
||||
if (CanStoreRawSize())
|
||||
SetRawSize(0);
|
||||
|
||||
PartitionRegisterEmptyPage(this);
|
||||
PartitionRegisterEmptySlotSpan(this);
|
||||
} else {
|
||||
PA_DCHECK(!bucket->is_direct_mapped());
|
||||
// Ensure that the page is full. That's the only valid case if we
|
||||
// Ensure that the slot span is full. That's the only valid case if we
|
||||
// arrive here.
|
||||
PA_DCHECK(num_allocated_slots < 0);
|
||||
// A transition of num_allocated_slots from 0 to -1 is not legal, and
|
||||
@ -140,16 +145,16 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
||||
PA_CHECK(num_allocated_slots != -1);
|
||||
num_allocated_slots = -num_allocated_slots - 2;
|
||||
PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
|
||||
// Fully used page became partially used. It must be put back on the
|
||||
// non-full page list. Also make it the current page to increase the
|
||||
// chances of it being filled up again. The old current page will be
|
||||
// the next page.
|
||||
PA_DCHECK(!next_page);
|
||||
if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
|
||||
next_page = bucket->active_pages_head;
|
||||
bucket->active_pages_head = this;
|
||||
--bucket->num_full_pages;
|
||||
// Special case: for a partition page with just a single slot, it may
|
||||
// Fully used slot span became partially used. It must be put back on the
|
||||
// non-full list. Also make it the current slot span to increase the
|
||||
// chances of it being filled up again. The old current slot span will be
|
||||
// the next slot span.
|
||||
PA_DCHECK(!next_slot_span);
|
||||
if (LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
|
||||
next_slot_span = bucket->active_slot_spans_head;
|
||||
bucket->active_slot_spans_head = this;
|
||||
--bucket->num_full_slot_spans;
|
||||
// Special case: for a partition slot span with just a single slot, it may
|
||||
// now be empty and we want to run it through the empty logic.
|
||||
if (UNLIKELY(num_allocated_slots == 0))
|
||||
return FreeSlowPath();
|
||||
@ -158,31 +163,30 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
|
||||
void SlotSpanMetadata<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
|
||||
root->lock_.AssertAcquired();
|
||||
PA_DCHECK(is_empty());
|
||||
PA_DCHECK(!bucket->is_direct_mapped());
|
||||
void* addr = PartitionPage::ToPointer(this);
|
||||
void* addr = SlotSpanMetadata::ToPointer(this);
|
||||
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
|
||||
|
||||
// We actually leave the decommitted page in the active list. We'll sweep
|
||||
// it on to the decommitted page list when we next walk the active page
|
||||
// list.
|
||||
// Pulling this trick enables us to use a singly-linked page list for all
|
||||
// cases, which is critical in keeping the page metadata structure down to
|
||||
// 32 bytes in size.
|
||||
// We actually leave the decommitted slot span in the active list. We'll sweep
|
||||
// it on to the decommitted list when we next walk the active list.
|
||||
// Pulling this trick enables us to use a singly-linked list for all
|
||||
// cases, which is critical in keeping the slot span metadata structure down
|
||||
// to 32 bytes in size.
|
||||
freelist_head = nullptr;
|
||||
num_unprovisioned_slots = 0;
|
||||
PA_DCHECK(is_decommitted());
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionPage<thread_safe>::DecommitIfPossible(
|
||||
void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
|
||||
PartitionRoot<thread_safe>* root) {
|
||||
root->lock_.AssertAcquired();
|
||||
PA_DCHECK(empty_cache_index >= 0);
|
||||
PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
|
||||
PA_DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
|
||||
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index]);
|
||||
empty_cache_index = -1;
|
||||
if (is_empty())
|
||||
Decommit(root);
|
||||
@ -201,8 +205,8 @@ void DeferredUnmap::Unmap() {
|
||||
}
|
||||
}
|
||||
|
||||
template struct PartitionPage<ThreadSafe>;
|
||||
template struct PartitionPage<NotThreadSafe>;
|
||||
template struct SlotSpanMetadata<ThreadSafe>;
|
||||
template struct SlotSpanMetadata<NotThreadSafe>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
@ -37,8 +37,8 @@ static_assert(
|
||||
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
|
||||
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
|
||||
|
||||
// PartitionPage::Free() defers unmapping a large page until the lock is
|
||||
// released. Callers of PartitionPage::Free() must invoke Run().
|
||||
// SlotSpanMetadata::Free() defers unmapping a large page until the lock is
|
||||
// released. Callers of SlotSpanMetadata::Free() must invoke Run().
|
||||
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
|
||||
struct DeferredUnmap {
|
||||
void* ptr = nullptr;
|
||||
@ -55,58 +55,46 @@ struct DeferredUnmap {
|
||||
using QuarantineBitmap =
|
||||
ObjectBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
|
||||
|
||||
// Some notes on page states. A page can be in one of four major states:
|
||||
// Metadata of the slot span.
|
||||
//
|
||||
// Some notes on slot span states. It can be in one of four major states:
|
||||
// 1) Active.
|
||||
// 2) Full.
|
||||
// 3) Empty.
|
||||
// 4) Decommitted.
|
||||
// An active page has available free slots. A full page has no free slots. An
|
||||
// empty page has no free slots, and a decommitted page is an empty page that
|
||||
// had its backing memory released back to the system.
|
||||
// There are two linked lists tracking the pages. The "active page" list is an
|
||||
// approximation of a list of active pages. It is an approximation because
|
||||
// full, empty and decommitted pages may briefly be present in the list until
|
||||
// we next do a scan over it.
|
||||
// The "empty page" list is an accurate list of pages which are either empty
|
||||
// or decommitted.
|
||||
// An active slot span has available free slots. A full slot span has no free
|
||||
// slots. An empty slot span has no free slots, and a decommitted slot span is
|
||||
// an empty one that had its backing memory released back to the system.
|
||||
//
|
||||
// The significant page transitions are:
|
||||
// - free() will detect when a full page has a slot free()'d and immediately
|
||||
// return the page to the head of the active list.
|
||||
// - free() will detect when a page is fully emptied. It _may_ add it to the
|
||||
// empty list or it _may_ leave it on the active list until a future list scan.
|
||||
// - malloc() _may_ scan the active page list in order to fulfil the request.
|
||||
// If it does this, full, empty and decommitted pages encountered will be
|
||||
// booted out of the active list. If there are no suitable active pages found,
|
||||
// an empty or decommitted page (if one exists) will be pulled from the empty
|
||||
// list on to the active list.
|
||||
// There are two linked lists tracking slot spans. The "active" list is an
|
||||
// approximation of a list of active slot spans. It is an approximation because
|
||||
// full, empty and decommitted slot spans may briefly be present in the list
|
||||
// until we next do a scan over it. The "empty" list is an accurate list of slot
|
||||
// spans which are either empty or decommitted.
|
||||
//
|
||||
// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
|
||||
// similar. If so, all uses of the term "page" in comments, member variables,
|
||||
// local variables, and documentation that refer to this concept should be
|
||||
// updated.
|
||||
// The significant slot span transitions are:
|
||||
// - Free() will detect when a full slot span has a slot freed and immediately
|
||||
// return the slot span to the head of the active list.
|
||||
// - Free() will detect when a slot span is fully emptied. It _may_ add it to
|
||||
// the empty list or it _may_ leave it on the active list until a future
|
||||
// list scan.
|
||||
// - Alloc() _may_ scan the active page list in order to fulfil the request.
|
||||
// If it does this, full, empty and decommitted slot spans encountered will be
|
||||
// booted out of the active list. If there are no suitable active slot spans
|
||||
// found, an empty or decommitted slot spans (if one exists) will be pulled
|
||||
// from the empty list on to the active list.
|
||||
template <bool thread_safe>
|
||||
struct PartitionPage {
|
||||
union {
|
||||
struct {
|
||||
PartitionFreelistEntry* freelist_head;
|
||||
PartitionPage<thread_safe>* next_page;
|
||||
PartitionBucket<thread_safe>* bucket;
|
||||
// Deliberately signed, 0 for empty or decommitted page, -n for full
|
||||
// pages:
|
||||
int16_t num_allocated_slots;
|
||||
uint16_t num_unprovisioned_slots;
|
||||
uint16_t page_offset;
|
||||
int16_t empty_cache_index; // -1 if not in the empty cache.
|
||||
};
|
||||
struct __attribute__((packed)) SlotSpanMetadata {
|
||||
PartitionFreelistEntry* freelist_head;
|
||||
SlotSpanMetadata<thread_safe>* next_slot_span;
|
||||
PartitionBucket<thread_safe>* bucket;
|
||||
|
||||
// Deliberately signed, 0 for empty or decommitted slot spans, -n for full
|
||||
// slot spans:
|
||||
int16_t num_allocated_slots;
|
||||
uint16_t num_unprovisioned_slots;
|
||||
int16_t empty_cache_index; // -1 if not in the empty cache.
|
||||
|
||||
// sizeof(PartitionPage) must always be:
|
||||
// - a power of 2 (for fast modulo operations)
|
||||
// - below kPageMetadataSize
|
||||
//
|
||||
// This makes sure that this is respected no matter the architecture.
|
||||
char optional_padding[kPageMetadataSize];
|
||||
};
|
||||
// Public API
|
||||
// Note the matching Alloc() functions are in PartitionPage.
|
||||
// Callers must invoke DeferredUnmap::Run() after releasing the lock.
|
||||
@ -116,12 +104,13 @@ struct PartitionPage {
|
||||
void Decommit(PartitionRoot<thread_safe>* root);
|
||||
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
|
||||
|
||||
// Pointer manipulation functions. These must be static as the input |page|
|
||||
// pointer may be the result of an offset calculation and therefore cannot
|
||||
// be trusted. The objective of these functions is to sanitize this input.
|
||||
ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
|
||||
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
|
||||
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
|
||||
// Pointer manipulation functions. These must be static as the input
|
||||
// |slot_span| pointer may be the result of an offset calculation and
|
||||
// therefore cannot be trusted. The objective of these functions is to
|
||||
// sanitize this input.
|
||||
ALWAYS_INLINE static void* ToPointer(const SlotSpanMetadata* slot_span);
|
||||
ALWAYS_INLINE static SlotSpanMetadata* FromPointer(void* ptr);
|
||||
ALWAYS_INLINE static SlotSpanMetadata* FromPointerNoAlignmentCheck(void* ptr);
|
||||
|
||||
// Checks if it is feasible to store raw_size.
|
||||
ALWAYS_INLINE bool CanStoreRawSize() const;
|
||||
@ -146,7 +135,7 @@ struct PartitionPage {
|
||||
ALWAYS_INLINE void Reset();
|
||||
|
||||
// TODO(ajwong): Can this be made private? https://crbug.com/787153
|
||||
BASE_EXPORT static PartitionPage* get_sentinel_page();
|
||||
BASE_EXPORT static SlotSpanMetadata* get_sentinel_slot_span();
|
||||
|
||||
// Page State accessors.
|
||||
// Note that it's only valid to call these functions on pages found on one of
|
||||
@ -163,17 +152,74 @@ struct PartitionPage {
|
||||
ALWAYS_INLINE bool is_decommitted() const;
|
||||
|
||||
private:
|
||||
// g_sentinel_page is used as a sentinel to indicate that there is no page
|
||||
// in the active page list. We can use nullptr, but in that case we need
|
||||
// to add a null-check branch to the hot allocation path. We want to avoid
|
||||
// that.
|
||||
// sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
|
||||
// span in the active list. We could use nullptr, but in that case we need to
|
||||
// add a null-check branch to the hot allocation path. We want to avoid that.
|
||||
//
|
||||
// Note, this declaration is kept in the header as opposed to an anonymous
|
||||
// namespace so the getter can be fully inlined.
|
||||
static PartitionPage sentinel_page_;
|
||||
static SlotSpanMetadata sentinel_slot_span_;
|
||||
};
|
||||
|
||||
// Metadata of a non-first partition page in a slot span.
|
||||
struct SubsequentPageMetadata {
|
||||
// Raw size is the size needed to satisfy the allocation (requested size +
|
||||
// extras). If available, it can be used to report better statistics or to
|
||||
// bring protective cookie closer to the allocated memory.
|
||||
//
|
||||
// It can be used only if:
|
||||
// - there is no more than one slot in the slot span (otherwise we wouldn't
|
||||
// know which slot the raw size applies to)
|
||||
// - there is more than one partition page in the slot span (the metadata of
|
||||
// the first one is used to store slot information, but the second one is
|
||||
// available for extra information)
|
||||
size_t raw_size;
|
||||
};
|
||||
|
||||
// Each partition page has metadata associated with it. The metadata of the
|
||||
// first page of a slot span, describes that slot span. If a slot span spans
|
||||
// more than 1 page, the page metadata may contain rudimentary additional
|
||||
// information.
|
||||
template <bool thread_safe>
|
||||
struct PartitionPage {
|
||||
// "Pack" the union so that slot_span_metadata_offset still fits within
|
||||
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
|
||||
union __attribute__((packed)) {
|
||||
SlotSpanMetadata<thread_safe> slot_span_metadata;
|
||||
|
||||
SubsequentPageMetadata subsequent_page_metadata;
|
||||
|
||||
// sizeof(PartitionPageMetadata) must always be:
|
||||
// - a power of 2 (for fast modulo operations)
|
||||
// - below kPageMetadataSize
|
||||
//
|
||||
// This makes sure that this is respected no matter the architecture.
|
||||
char optional_padding[kPageMetadataSize - sizeof(uint16_t)];
|
||||
};
|
||||
|
||||
// The first PartitionPage of the slot span holds its metadata. This offset
|
||||
// tells how many pages in from that first page we are.
|
||||
uint16_t slot_span_metadata_offset;
|
||||
|
||||
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
|
||||
"PartitionPage must be able to fit in a metadata slot");
|
||||
static_assert(sizeof(PartitionPage<NotThreadSafe>) == kPageMetadataSize,
|
||||
"PartitionPage must be able to fit in a metadata slot");
|
||||
|
||||
// Certain functions rely on PartitionPage being either SlotSpanMetadata or
|
||||
// SubsequentPageMetadata, and therefore freely casting between each other.
|
||||
static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
|
||||
static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
|
||||
0,
|
||||
"");
|
||||
static_assert(offsetof(PartitionPage<NotThreadSafe>, slot_span_metadata) == 0,
|
||||
"");
|
||||
static_assert(offsetof(PartitionPage<NotThreadSafe>,
|
||||
subsequent_page_metadata) == 0,
|
||||
"");
|
||||
|
||||
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
|
||||
@ -195,6 +241,14 @@ ALWAYS_INLINE bool IsWithinSuperPagePayload(bool with_pcscan, void* ptr) {
|
||||
return ptr_as_uint >= payload_start && ptr_as_uint < payload_end;
|
||||
}
|
||||
|
||||
// See the comment for |FromPointer|.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
|
||||
return reinterpret_cast<SlotSpanMetadata*>(
|
||||
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr));
|
||||
}
|
||||
|
||||
// See the comment for |FromPointer|.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
||||
@ -209,24 +263,22 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
|
||||
// pages.
|
||||
PA_DCHECK(partition_page_index);
|
||||
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
|
||||
auto* page = reinterpret_cast<PartitionPage*>(
|
||||
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_ptr) +
|
||||
(partition_page_index << kPageMetadataShift));
|
||||
// Partition pages in the same slot span share the same page object. Adjust
|
||||
// Partition pages in the same slot span share the same slot span metadata
|
||||
// object (located in the first PartitionPage object of that span). Adjust
|
||||
// for that.
|
||||
size_t delta = page->page_offset << kPageMetadataShift;
|
||||
page =
|
||||
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
|
||||
page -= page->slot_span_metadata_offset;
|
||||
return page;
|
||||
}
|
||||
|
||||
// Converts from a pointer to the PartitionPage object (within super pages's
|
||||
// metadata) into a pointer to the beginning of the partition page.
|
||||
// This doesn't have to be the first page in the slot span.
|
||||
// Converts from a pointer to the SlotSpanMetadata object (within super pages's
|
||||
// metadata) into a pointer to the beginning of the slot span.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
|
||||
const PartitionPage<thread_safe>* page) {
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
|
||||
ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToPointer(
|
||||
const SlotSpanMetadata* slot_span) {
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
|
||||
|
||||
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
|
||||
|
||||
@ -250,27 +302,23 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Converts from a pointer inside a partition page into a pointer to the
|
||||
// PartitionPage object (within super pages's metadata).
|
||||
// The first PartitionPage of the slot span will be returned, regardless where
|
||||
// inside of the slot span |ptr| points to.
|
||||
// Converts from a pointer inside a slot span into a pointer to the
|
||||
// SlotSpanMetadata object (within super pages's metadata).
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
||||
PartitionPage<thread_safe>::FromPointer(void* ptr) {
|
||||
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
|
||||
// Checks that the pointer is a multiple of bucket size.
|
||||
PA_DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
|
||||
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
|
||||
page->bucket->slot_size));
|
||||
return page;
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::FromPointer(void* ptr) {
|
||||
SlotSpanMetadata* slot_span =
|
||||
SlotSpanMetadata::FromPointerNoAlignmentCheck(ptr);
|
||||
// Checks that the pointer is a multiple of slot size.
|
||||
PA_DCHECK(
|
||||
!((reinterpret_cast<uintptr_t>(ptr) -
|
||||
reinterpret_cast<uintptr_t>(SlotSpanMetadata::ToPointer(slot_span))) %
|
||||
slot_span->bucket->slot_size));
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::CanStoreRawSize() const {
|
||||
// Raw size is the size needed to satisfy the allocation (requested size +
|
||||
// extras). If available, it can be used to report better statistics or to
|
||||
// bring protective cookie closer to the allocated memory.
|
||||
//
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::CanStoreRawSize() const {
|
||||
// For direct-map as well as single-slot slot spans (recognized by checking
|
||||
// against |kMaxPartitionPagesPerSlotSpan|), we have some spare metadata space
|
||||
// in subsequent PartitionPage to store the raw size. It isn't only metadata
|
||||
@ -287,24 +335,24 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::CanStoreRawSize() const {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionPage<thread_safe>::SetRawSize(size_t raw_size) {
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(size_t raw_size) {
|
||||
PA_DCHECK(CanStoreRawSize());
|
||||
PartitionPage* the_next_page = this + 1;
|
||||
the_next_page->freelist_head =
|
||||
reinterpret_cast<PartitionFreelistEntry*>(raw_size);
|
||||
auto* the_next_page = reinterpret_cast<PartitionPage<thread_safe>*>(this) + 1;
|
||||
the_next_page->subsequent_page_metadata.raw_size = raw_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE size_t PartitionPage<thread_safe>::GetRawSize() const {
|
||||
ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
|
||||
PA_DCHECK(CanStoreRawSize());
|
||||
const PartitionPage* the_next_page = this + 1;
|
||||
return reinterpret_cast<size_t>(the_next_page->freelist_head);
|
||||
auto* the_next_page =
|
||||
reinterpret_cast<const PartitionPage<thread_safe>*>(this) + 1;
|
||||
return the_next_page->subsequent_page_metadata.raw_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
|
||||
ALWAYS_INLINE DeferredUnmap SlotSpanMetadata<thread_safe>::Free(void* ptr) {
|
||||
#if DCHECK_IS_ON()
|
||||
auto* root = PartitionRoot<thread_safe>::FromPage(this);
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||
root->lock_.AssertAcquired();
|
||||
#endif
|
||||
|
||||
@ -329,17 +377,15 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const {
|
||||
PA_DCHECK(this != get_sentinel_page());
|
||||
PA_DCHECK(!page_offset);
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
return (num_allocated_slots > 0 &&
|
||||
(freelist_head || num_unprovisioned_slots));
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
|
||||
PA_DCHECK(this != get_sentinel_page());
|
||||
PA_DCHECK(!page_offset);
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
|
||||
if (ret) {
|
||||
PA_DCHECK(!freelist_head);
|
||||
@ -349,16 +395,14 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const {
|
||||
PA_DCHECK(this != get_sentinel_page());
|
||||
PA_DCHECK(!page_offset);
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
return (!num_allocated_slots && freelist_head);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
|
||||
PA_DCHECK(this != get_sentinel_page());
|
||||
PA_DCHECK(!page_offset);
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
bool ret = (!num_allocated_slots && !freelist_head);
|
||||
if (ret) {
|
||||
PA_DCHECK(!num_unprovisioned_slots);
|
||||
@ -368,13 +412,13 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() {
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
|
||||
PA_DCHECK(is_decommitted());
|
||||
|
||||
num_unprovisioned_slots = bucket->get_slots_per_span();
|
||||
PA_DCHECK(num_unprovisioned_slots);
|
||||
|
||||
next_page = nullptr;
|
||||
next_slot_span = nullptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void DeferredUnmap::Run() {
|
||||
|
@ -17,11 +17,12 @@ namespace internal {
|
||||
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
|
||||
// is no longer used.
|
||||
void PartitionRefCount::Free() {
|
||||
auto* page = PartitionPage<ThreadSafe>::FromPointerNoAlignmentCheck(this);
|
||||
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
|
||||
auto* slot_span =
|
||||
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(this);
|
||||
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
|
||||
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
size_t utilized_slot_size = page->GetUtilizedSlotSize();
|
||||
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
|
||||
// supports extras.
|
||||
PA_DCHECK(root->allow_extras);
|
||||
@ -31,15 +32,15 @@ void PartitionRefCount::Free() {
|
||||
#endif
|
||||
|
||||
if (root->is_thread_safe) {
|
||||
root->RawFree(this, page);
|
||||
root->RawFree(this, slot_span);
|
||||
return;
|
||||
}
|
||||
|
||||
auto* non_thread_safe_page =
|
||||
reinterpret_cast<PartitionPage<NotThreadSafe>*>(page);
|
||||
auto* non_thread_safe_slot_span =
|
||||
reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
|
||||
auto* non_thread_safe_root =
|
||||
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
|
||||
non_thread_safe_root->RawFree(this, non_thread_safe_page);
|
||||
non_thread_safe_root->RawFree(this, non_thread_safe_slot_span);
|
||||
}
|
||||
|
||||
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
|
||||
|
@ -52,7 +52,7 @@ struct PartitionOptions {
|
||||
// PartitionAllocator.
|
||||
template <bool thread_safe>
|
||||
struct BASE_EXPORT PartitionRoot {
|
||||
using Page = internal::PartitionPage<thread_safe>;
|
||||
using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
|
||||
using Bucket = internal::PartitionBucket<thread_safe>;
|
||||
using SuperPageExtentEntry =
|
||||
internal::PartitionSuperPageExtentEntry<thread_safe>;
|
||||
@ -92,8 +92,8 @@ struct BASE_EXPORT PartitionRoot {
|
||||
SuperPageExtentEntry* current_extent = nullptr;
|
||||
SuperPageExtentEntry* first_extent = nullptr;
|
||||
DirectMapExtent* direct_map_list = nullptr;
|
||||
Page* global_empty_page_ring[kMaxFreeableSpans] = {};
|
||||
int16_t global_empty_page_ring_index = 0;
|
||||
SlotSpan* global_empty_slot_span_ring[kMaxFreeableSpans] = {};
|
||||
int16_t global_empty_slot_span_ring_index = 0;
|
||||
|
||||
// Integrity check = ~reinterpret_cast<uintptr_t>(this).
|
||||
uintptr_t inverted_self = 0;
|
||||
@ -119,16 +119,16 @@ struct BASE_EXPORT PartitionRoot {
|
||||
//
|
||||
// Allocates out of the given bucket. Properly, this function should probably
|
||||
// be in PartitionBucket, but because the implementation needs to be inlined
|
||||
// for performance, and because it needs to inspect PartitionPage,
|
||||
// for performance, and because it needs to inspect SlotSpanMetadata,
|
||||
// it becomes impossible to have it in PartitionBucket as this causes a
|
||||
// cyclical dependency on PartitionPage function implementations.
|
||||
// cyclical dependency on SlotSpanMetadata function implementations.
|
||||
//
|
||||
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but
|
||||
// preserves the layering of the includes.
|
||||
void Init(PartitionOptions);
|
||||
|
||||
ALWAYS_INLINE static bool IsValidPage(Page* page);
|
||||
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
|
||||
ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
|
||||
ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
|
||||
|
||||
ALWAYS_INLINE void IncreaseCommittedPages(size_t len)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
@ -181,14 +181,14 @@ struct BASE_EXPORT PartitionRoot {
|
||||
// Same as |Free()|, bypasses the allocator hooks.
|
||||
ALWAYS_INLINE static void FreeNoHooks(void* ptr);
|
||||
// Immediately frees the pointer bypassing the quarantine.
|
||||
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, Page* page);
|
||||
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span);
|
||||
|
||||
ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
|
||||
ALWAYS_INLINE size_t GetSize(void* ptr) const;
|
||||
ALWAYS_INLINE size_t ActualSize(size_t size);
|
||||
|
||||
// Frees memory from this partition, if possible, by decommitting pages.
|
||||
// |flags| is an OR of base::PartitionPurgeFlags.
|
||||
// Frees memory from this partition, if possible, by decommitting pages or
|
||||
// even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
|
||||
void PurgeMemory(int flags);
|
||||
|
||||
void DumpStats(const char* partition_name,
|
||||
@ -198,7 +198,7 @@ struct BASE_EXPORT PartitionRoot {
|
||||
static uint16_t SizeToBucketIndex(size_t size);
|
||||
|
||||
// Frees memory, with |ptr| as returned by |RawAlloc()|.
|
||||
ALWAYS_INLINE void RawFree(void* ptr, Page* page);
|
||||
ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span);
|
||||
static void RawFreeStatic(void* ptr);
|
||||
|
||||
internal::ThreadCache* thread_cache_for_testing() const {
|
||||
@ -241,10 +241,10 @@ struct BASE_EXPORT PartitionRoot {
|
||||
bool* is_already_zeroed)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
|
||||
size_t requested_size)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
bool ReallocDirectMappedInPlace(
|
||||
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||
size_t requested_size) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
void DecommitEmptySlotSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
friend class internal::ThreadCache;
|
||||
};
|
||||
|
@ -91,7 +91,7 @@ class PCScan<thread_safe>::PCScanTask final {
|
||||
void RunOnce() &&;
|
||||
|
||||
private:
|
||||
using Page = PartitionPage<thread_safe>;
|
||||
using SlotSpan = SlotSpanMetadata<thread_safe>;
|
||||
|
||||
struct ScanArea {
|
||||
uintptr_t* begin = nullptr;
|
||||
@ -134,7 +134,7 @@ template <bool thread_safe>
|
||||
QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
|
||||
uintptr_t maybe_ptr) const {
|
||||
// TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to
|
||||
// quickly find a super-page.
|
||||
// quickly find a super page.
|
||||
const auto super_page_base = maybe_ptr & kSuperPageBaseMask;
|
||||
|
||||
auto it = super_pages_.lower_bound(super_page_base);
|
||||
@ -145,7 +145,7 @@ QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
|
||||
reinterpret_cast<void*>(maybe_ptr)))
|
||||
return nullptr;
|
||||
|
||||
// We are certain here that |maybe_ptr| points to the superpage payload.
|
||||
// We are certain here that |maybe_ptr| points to the super page payload.
|
||||
return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner,
|
||||
pcscan_.quarantine_data_.epoch(),
|
||||
reinterpret_cast<char*>(maybe_ptr));
|
||||
@ -176,12 +176,13 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
|
||||
|
||||
PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask));
|
||||
|
||||
auto target_page =
|
||||
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
|
||||
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page));
|
||||
auto target_slot_span =
|
||||
SlotSpan::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
|
||||
PA_DCHECK(&root_ ==
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(target_slot_span));
|
||||
|
||||
const size_t usable_size = PartitionSizeAdjustSubtract(
|
||||
root_.allow_extras, target_page->GetUtilizedSlotSize());
|
||||
root_.allow_extras, target_slot_span->GetUtilizedSlotSize());
|
||||
// Range check for inner pointers.
|
||||
if (maybe_ptr >= base + usable_size)
|
||||
return 0;
|
||||
@ -193,7 +194,7 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
|
||||
pcscan_.quarantine_data_.epoch(),
|
||||
reinterpret_cast<char*>(base))
|
||||
->SetBit(base);
|
||||
return target_page->bucket->slot_size;
|
||||
return target_slot_span->bucket->slot_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
@ -205,12 +206,12 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
|
||||
reinterpret_cast<char*>(super_page));
|
||||
bitmap->Iterate([allow_extras](uintptr_t ptr) {
|
||||
auto* object = reinterpret_cast<void*>(ptr);
|
||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
||||
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||
// Use zero as a zapping value to speed up the fast bailout check in
|
||||
// ScanPartition.
|
||||
memset(object, 0,
|
||||
PartitionSizeAdjustSubtract(allow_extras,
|
||||
page->GetUtilizedSlotSize()));
|
||||
slot_span->GetUtilizedSlotSize()));
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -234,7 +235,7 @@ size_t PCScan<thread_safe>::PCScanTask::ScanPartition() NO_SANITIZE("thread") {
|
||||
// implemented.
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
// On partitions without extras (partitions with aligned allocations),
|
||||
// pages are not allocated from the GigaCage.
|
||||
// memory is not allocated from the GigaCage.
|
||||
if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) {
|
||||
// With GigaCage, we first do a fast bitmask check to see if the pointer
|
||||
// points to the normal bucket pool.
|
||||
@ -267,9 +268,9 @@ size_t PCScan<thread_safe>::PCScanTask::SweepQuarantine() {
|
||||
reinterpret_cast<char*>(super_page));
|
||||
bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) {
|
||||
auto* object = reinterpret_cast<void*>(ptr);
|
||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
||||
swept_bytes += page->bucket->slot_size;
|
||||
root_.FreeNoHooksImmediate(object, page);
|
||||
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||
swept_bytes += slot_span->bucket->slot_size;
|
||||
root_.FreeNoHooksImmediate(object, slot_span);
|
||||
});
|
||||
bitmap->Clear();
|
||||
}
|
||||
@ -293,25 +294,27 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root)
|
||||
}
|
||||
}
|
||||
|
||||
// Take a snapshot of all active pages.
|
||||
// Take a snapshot of all active slot spans.
|
||||
static constexpr size_t kScanAreasReservationSlack = 10;
|
||||
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages /
|
||||
PartitionPageSize() /
|
||||
kScanAreasReservationSlack;
|
||||
scan_areas_.reserve(kScanAreasReservationSize);
|
||||
{
|
||||
// TODO(bikineev): Scan full pages.
|
||||
// TODO(bikineev): Scan full slot spans.
|
||||
for (const auto& bucket : root_.buckets) {
|
||||
for (auto* page = bucket.active_pages_head;
|
||||
page && page != page->get_sentinel_page(); page = page->next_page) {
|
||||
for (auto* slot_span = bucket.active_slot_spans_head;
|
||||
slot_span && slot_span != slot_span->get_sentinel_slot_span();
|
||||
slot_span = slot_span->next_slot_span) {
|
||||
// The active list may contain false positives, skip them.
|
||||
if (page->is_empty() || page->is_decommitted())
|
||||
if (slot_span->is_empty() || slot_span->is_decommitted())
|
||||
continue;
|
||||
|
||||
auto* payload_begin = static_cast<uintptr_t*>(Page::ToPointer(page));
|
||||
auto* payload_begin =
|
||||
static_cast<uintptr_t*>(SlotSpan::ToPointer(slot_span));
|
||||
auto* payload_end =
|
||||
payload_begin +
|
||||
(page->bucket->get_bytes_per_span() / sizeof(uintptr_t));
|
||||
(slot_span->bucket->get_bytes_per_span() / sizeof(uintptr_t));
|
||||
scan_areas_.push_back({payload_begin, payload_end});
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ template <bool thread_safe>
|
||||
class BASE_EXPORT PCScan final {
|
||||
public:
|
||||
using Root = PartitionRoot<thread_safe>;
|
||||
using Page = PartitionPage<thread_safe>;
|
||||
using SlotSpan = SlotSpanMetadata<thread_safe>;
|
||||
|
||||
explicit PCScan(Root* root) : root_(root) {}
|
||||
|
||||
@ -45,7 +45,7 @@ class BASE_EXPORT PCScan final {
|
||||
|
||||
~PCScan();
|
||||
|
||||
ALWAYS_INLINE void MoveToQuarantine(void* ptr, Page* page);
|
||||
ALWAYS_INLINE void MoveToQuarantine(void* ptr, SlotSpan* slot_span);
|
||||
|
||||
private:
|
||||
class PCScanTask;
|
||||
@ -113,15 +113,15 @@ void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() {
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr,
|
||||
Page* page) {
|
||||
PA_DCHECK(!page->bucket->is_direct_mapped());
|
||||
SlotSpan* slot_span) {
|
||||
PA_DCHECK(!slot_span->bucket->is_direct_mapped());
|
||||
|
||||
QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator,
|
||||
quarantine_data_.epoch(), ptr)
|
||||
->SetBit(reinterpret_cast<uintptr_t>(ptr));
|
||||
|
||||
const bool is_limit_reached =
|
||||
quarantine_data_.Account(page->bucket->slot_size);
|
||||
quarantine_data_.Account(slot_span->bucket->slot_size);
|
||||
if (is_limit_reached) {
|
||||
// Post a background task to not block the current thread.
|
||||
ScheduleTask(TaskType::kNonBlocking);
|
||||
|
@ -21,7 +21,7 @@ class PCScanTest : public testing::Test {
|
||||
PartitionOptions::PCScan::kEnabled});
|
||||
}
|
||||
~PCScanTest() override {
|
||||
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
|
||||
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
|
||||
PartitionPurgeDiscardUnusedSystemPages);
|
||||
PartitionAllocGlobalUninitForTesting();
|
||||
}
|
||||
@ -47,17 +47,17 @@ class PCScanTest : public testing::Test {
|
||||
|
||||
namespace {
|
||||
|
||||
using Page = ThreadSafePartitionRoot::Page;
|
||||
using SlotSpan = ThreadSafePartitionRoot::SlotSpan;
|
||||
|
||||
struct FullPageAllocation {
|
||||
Page* page;
|
||||
struct FullSlotSpanAllocation {
|
||||
SlotSpan* slot_span;
|
||||
void* first;
|
||||
void* last;
|
||||
};
|
||||
|
||||
// Assumes heap is purged.
|
||||
FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
|
||||
size_t object_size) {
|
||||
FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root,
|
||||
size_t object_size) {
|
||||
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing());
|
||||
|
||||
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
|
||||
@ -76,24 +76,24 @@ FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
|
||||
last = PartitionPointerAdjustSubtract(true, ptr);
|
||||
}
|
||||
|
||||
EXPECT_EQ(ThreadSafePartitionRoot::Page::FromPointer(first),
|
||||
ThreadSafePartitionRoot::Page::FromPointer(last));
|
||||
EXPECT_EQ(SlotSpan::FromPointer(first), SlotSpan::FromPointer(last));
|
||||
if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage())
|
||||
EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
|
||||
reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
|
||||
EXPECT_EQ(num_slots,
|
||||
static_cast<size_t>(bucket.active_pages_head->num_allocated_slots));
|
||||
EXPECT_EQ(nullptr, bucket.active_pages_head->freelist_head);
|
||||
EXPECT_TRUE(bucket.active_pages_head);
|
||||
EXPECT_TRUE(bucket.active_pages_head != Page::get_sentinel_page());
|
||||
EXPECT_EQ(num_slots, static_cast<size_t>(
|
||||
bucket.active_slot_spans_head->num_allocated_slots));
|
||||
EXPECT_EQ(nullptr, bucket.active_slot_spans_head->freelist_head);
|
||||
EXPECT_TRUE(bucket.active_slot_spans_head);
|
||||
EXPECT_TRUE(bucket.active_slot_spans_head !=
|
||||
SlotSpan::get_sentinel_slot_span());
|
||||
|
||||
return {bucket.active_pages_head, PartitionPointerAdjustAdd(true, first),
|
||||
return {bucket.active_slot_spans_head, PartitionPointerAdjustAdd(true, first),
|
||||
PartitionPointerAdjustAdd(true, last)};
|
||||
}
|
||||
|
||||
bool IsInFreeList(void* object) {
|
||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
||||
for (auto* entry = page->freelist_head; entry;
|
||||
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||
for (auto* entry = slot_span->freelist_head; entry;
|
||||
entry = EncodedPartitionFreelistEntry::Decode(entry->next)) {
|
||||
if (entry == object)
|
||||
return true;
|
||||
@ -138,21 +138,23 @@ TEST_F(PCScanTest, ArbitraryObjectInQuarantine) {
|
||||
TEST_F(PCScanTest, FirstObjectInQuarantine) {
|
||||
static constexpr size_t kAllocationSize = 16;
|
||||
|
||||
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
|
||||
EXPECT_FALSE(IsInQuarantine(full_page.first));
|
||||
FullSlotSpanAllocation full_slot_span =
|
||||
GetFullSlotSpan(root(), kAllocationSize);
|
||||
EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
|
||||
|
||||
root().FreeNoHooks(full_page.first);
|
||||
EXPECT_TRUE(IsInQuarantine(full_page.first));
|
||||
root().FreeNoHooks(full_slot_span.first);
|
||||
EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
|
||||
}
|
||||
|
||||
TEST_F(PCScanTest, LastObjectInQuarantine) {
|
||||
static constexpr size_t kAllocationSize = 16;
|
||||
|
||||
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
|
||||
EXPECT_FALSE(IsInQuarantine(full_page.last));
|
||||
FullSlotSpanAllocation full_slot_span =
|
||||
GetFullSlotSpan(root(), kAllocationSize);
|
||||
EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
|
||||
|
||||
root().FreeNoHooks(full_page.last);
|
||||
EXPECT_TRUE(IsInQuarantine(full_page.last));
|
||||
root().FreeNoHooks(full_slot_span.last);
|
||||
EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -217,23 +219,22 @@ TEST_F(PCScanTest, DanglingReferenceSameSlotSpanButDifferentPages) {
|
||||
static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
|
||||
static_cast<size_t>(PartitionPageSize() * 0.75);
|
||||
|
||||
FullPageAllocation full_page = GetFullPage(
|
||||
FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
|
||||
root(),
|
||||
PartitionSizeAdjustSubtract(
|
||||
true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
|
||||
|
||||
// Assert that the first and the last objects are in the same slot span but on
|
||||
// different partition pages.
|
||||
ASSERT_EQ(ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
|
||||
full_page.first),
|
||||
ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
|
||||
full_page.last));
|
||||
ASSERT_NE(reinterpret_cast<size_t>(full_page.first) & PartitionPageBaseMask(),
|
||||
reinterpret_cast<size_t>(full_page.last) & PartitionPageBaseMask());
|
||||
ASSERT_EQ(SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.first),
|
||||
SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.last));
|
||||
ASSERT_NE(
|
||||
reinterpret_cast<size_t>(full_slot_span.first) & PartitionPageBaseMask(),
|
||||
reinterpret_cast<size_t>(full_slot_span.last) & PartitionPageBaseMask());
|
||||
|
||||
// Create two objects, on different partition pages.
|
||||
auto* value = new (full_page.first) ValueList;
|
||||
auto* source = new (full_page.last) SourceList;
|
||||
auto* value = new (full_slot_span.first) ValueList;
|
||||
auto* source = new (full_slot_span.last) SourceList;
|
||||
source->next = value;
|
||||
|
||||
TestDanglingReference(*this, source, value);
|
||||
|
11
third_party/blink/renderer/platform/instrumentation/partition_alloc_memory_dump_provider.cc
vendored
11
third_party/blink/renderer/platform/instrumentation/partition_alloc_memory_dump_provider.cc
vendored
@ -134,16 +134,17 @@ void PartitionStatsDumperImpl::PartitionsDumpBucketStats(
|
||||
memory_stats->decommittable_bytes);
|
||||
allocator_dump->AddScalar("discardable_size", "bytes",
|
||||
memory_stats->discardable_bytes);
|
||||
// TODO(bartekn): Rename the scalar names.
|
||||
allocator_dump->AddScalar("total_pages_size", "bytes",
|
||||
memory_stats->allocated_page_size);
|
||||
memory_stats->allocated_slot_span_size);
|
||||
allocator_dump->AddScalar("active_pages", "objects",
|
||||
memory_stats->num_active_pages);
|
||||
memory_stats->num_active_slot_spans);
|
||||
allocator_dump->AddScalar("full_pages", "objects",
|
||||
memory_stats->num_full_pages);
|
||||
memory_stats->num_full_slot_spans);
|
||||
allocator_dump->AddScalar("empty_pages", "objects",
|
||||
memory_stats->num_empty_pages);
|
||||
memory_stats->num_empty_slot_spans);
|
||||
allocator_dump->AddScalar("decommitted_pages", "objects",
|
||||
memory_stats->num_decommitted_pages);
|
||||
memory_stats->num_decommitted_slot_spans);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
Reference in New Issue
Block a user