[PartitionAlloc] Transition to SlotSpanMetadata
Currently all metadata is stored in PartitionPage, which is confusing because the most commonly used metadata is related to slot spans, and is stored only in the PartitionPage object that corresponds to the first partition page of the slot span. This CL introduces SlotSpanMetadata to clarify that confusion. Change-Id: Id8873dba1c9e3018a8643f4f9c93e694f2edb9c2 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2466007 Commit-Queue: Bartek Nowierski <bartekn@chromium.org> Reviewed-by: Kentaro Hara <haraken@chromium.org> Reviewed-by: Anton Bikineev <bikineev@chromium.org> Reviewed-by: Benoit L <lizeb@chromium.org> Cr-Commit-Position: refs/heads/master@{#817117}
This commit is contained in:

committed by
Commit Bot

parent
16d2ce3236
commit
22b2cdc0fa
base/allocator/partition_allocator
memory_reclaimer.ccpartition_alloc.ccpartition_alloc.hpartition_alloc_forward.hpartition_alloc_unittest.ccpartition_bucket.ccpartition_bucket.hpartition_direct_map_extent.hpartition_page.ccpartition_page.hpartition_ref_count.ccpartition_root.hpcscan.ccpcscan.hpcscan_unittest.cc
third_party/blink/renderer/platform/instrumentation
@ -105,8 +105,8 @@ void PartitionAllocMemoryReclaimer::Reclaim() {
|
|||||||
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
|
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
|
||||||
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
|
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
|
||||||
|
|
||||||
constexpr int kFlags =
|
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
|
||||||
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
|
PartitionPurgeDiscardUnusedSystemPages;
|
||||||
|
|
||||||
for (auto* partition : thread_safe_partitions_)
|
for (auto* partition : thread_safe_partitions_)
|
||||||
partition->PurgeMemory(kFlags);
|
partition->PurgeMemory(kFlags);
|
||||||
|
@ -46,11 +46,11 @@ NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
void PartitionRoot<thread_safe>::DecommitEmptyPages() {
|
void PartitionRoot<thread_safe>::DecommitEmptySlotSpans() {
|
||||||
for (Page*& page : global_empty_page_ring) {
|
for (SlotSpan*& slot_span : global_empty_slot_span_ring) {
|
||||||
if (page)
|
if (slot_span)
|
||||||
page->DecommitIfPossible(this);
|
slot_span->DecommitIfPossible(this);
|
||||||
page = nullptr;
|
slot_span = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,10 +283,10 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
|||||||
if (thread_safe && opts.pcscan == PartitionOptions::PCScan::kEnabled)
|
if (thread_safe && opts.pcscan == PartitionOptions::PCScan::kEnabled)
|
||||||
pcscan.emplace(this);
|
pcscan.emplace(this);
|
||||||
|
|
||||||
// We mark the sentinel bucket/page as free to make sure it is skipped by our
|
// We mark the sentinel slot span as free to make sure it is skipped by our
|
||||||
// logic to find a new active page.
|
// logic to find a new active slot span.
|
||||||
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
|
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
|
||||||
sentinel_bucket.active_pages_head = Page::get_sentinel_page();
|
sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
|
||||||
|
|
||||||
// This is a "magic" value so we can test if a root pointer is valid.
|
// This is a "magic" value so we can test if a root pointer is valid.
|
||||||
inverted_self = ~reinterpret_cast<uintptr_t>(this);
|
inverted_self = ~reinterpret_cast<uintptr_t>(this);
|
||||||
@ -306,7 +306,7 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
|||||||
bucket->Init(current_size);
|
bucket->Init(current_size);
|
||||||
// Disable pseudo buckets so that touching them faults.
|
// Disable pseudo buckets so that touching them faults.
|
||||||
if (current_size % kSmallestBucket)
|
if (current_size % kSmallestBucket)
|
||||||
bucket->active_pages_head = nullptr;
|
bucket->active_slot_spans_head = nullptr;
|
||||||
current_size += current_increment;
|
current_size += current_increment;
|
||||||
++bucket;
|
++bucket;
|
||||||
}
|
}
|
||||||
@ -339,9 +339,9 @@ PartitionRoot<thread_safe>::~PartitionRoot() = default;
|
|||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
||||||
internal::PartitionPage<thread_safe>* page,
|
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||||
size_t requested_size) {
|
size_t requested_size) {
|
||||||
PA_DCHECK(page->bucket->is_direct_mapped());
|
PA_DCHECK(slot_span->bucket->is_direct_mapped());
|
||||||
|
|
||||||
size_t raw_size =
|
size_t raw_size =
|
||||||
internal::PartitionSizeAdjustAdd(allow_extras, requested_size);
|
internal::PartitionSizeAdjustAdd(allow_extras, requested_size);
|
||||||
@ -352,12 +352,12 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
// bucket->slot_size is the current size of the allocation.
|
// bucket->slot_size is the current size of the allocation.
|
||||||
size_t current_slot_size = page->bucket->slot_size;
|
size_t current_slot_size = slot_span->bucket->slot_size;
|
||||||
char* char_ptr = static_cast<char*>(Page::ToPointer(page));
|
char* char_ptr = static_cast<char*>(SlotSpan::ToPointer(slot_span));
|
||||||
if (new_slot_size == current_slot_size) {
|
if (new_slot_size == current_slot_size) {
|
||||||
// No need to move any memory around, but update size and cookie below.
|
// No need to move any memory around, but update size and cookie below.
|
||||||
} else if (new_slot_size < current_slot_size) {
|
} else if (new_slot_size < current_slot_size) {
|
||||||
size_t map_size = DirectMapExtent::FromPage(page)->map_size;
|
size_t map_size = DirectMapExtent::FromSlotSpan(slot_span)->map_size;
|
||||||
|
|
||||||
// Don't reallocate in-place if new size is less than 80 % of the full
|
// Don't reallocate in-place if new size is less than 80 % of the full
|
||||||
// map size, to avoid holding on to too much unused address space.
|
// map size, to avoid holding on to too much unused address space.
|
||||||
@ -370,7 +370,8 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
|||||||
DecommitSystemPages(char_ptr + new_slot_size, decommit_size);
|
DecommitSystemPages(char_ptr + new_slot_size, decommit_size);
|
||||||
SetSystemPagesAccess(char_ptr + new_slot_size, decommit_size,
|
SetSystemPagesAccess(char_ptr + new_slot_size, decommit_size,
|
||||||
PageInaccessible);
|
PageInaccessible);
|
||||||
} else if (new_slot_size <= DirectMapExtent::FromPage(page)->map_size) {
|
} else if (new_slot_size <=
|
||||||
|
DirectMapExtent::FromSlotSpan(slot_span)->map_size) {
|
||||||
// Grow within the actually allocated memory. Just need to make the
|
// Grow within the actually allocated memory. Just need to make the
|
||||||
// pages accessible again.
|
// pages accessible again.
|
||||||
size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
|
size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
|
||||||
@ -397,8 +398,8 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
page->SetRawSize(raw_size);
|
slot_span->SetRawSize(raw_size);
|
||||||
page->bucket->slot_size = new_slot_size;
|
slot_span->bucket->slot_size = new_slot_size;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,19 +439,19 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
|||||||
&actual_old_size, ptr);
|
&actual_old_size, ptr);
|
||||||
}
|
}
|
||||||
if (LIKELY(!overridden)) {
|
if (LIKELY(!overridden)) {
|
||||||
auto* page = Page::FromPointer(
|
auto* slot_span = SlotSpan::FromPointer(
|
||||||
internal::PartitionPointerAdjustSubtract(allow_extras, ptr));
|
internal::PartitionPointerAdjustSubtract(allow_extras, ptr));
|
||||||
bool success = false;
|
bool success = false;
|
||||||
{
|
{
|
||||||
internal::ScopedGuard<thread_safe> guard{lock_};
|
internal::ScopedGuard<thread_safe> guard{lock_};
|
||||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||||
PA_DCHECK(IsValidPage(page));
|
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||||
|
|
||||||
if (UNLIKELY(page->bucket->is_direct_mapped())) {
|
if (UNLIKELY(slot_span->bucket->is_direct_mapped())) {
|
||||||
// We may be able to perform the realloc in place by changing the
|
// We may be able to perform the realloc in place by changing the
|
||||||
// accessibility of memory pages and, if reducing the size, decommitting
|
// accessibility of memory pages and, if reducing the size, decommitting
|
||||||
// them.
|
// them.
|
||||||
success = ReallocDirectMappedInPlace(page, new_size);
|
success = ReallocDirectMappedInPlace(slot_span, new_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (success) {
|
if (success) {
|
||||||
@ -471,10 +472,10 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
|||||||
// Trying to allocate a block of size |new_size| would give us a block of
|
// Trying to allocate a block of size |new_size| would give us a block of
|
||||||
// the same size as the one we've already got, so re-use the allocation
|
// the same size as the one we've already got, so re-use the allocation
|
||||||
// after updating statistics (and cookies, if present).
|
// after updating statistics (and cookies, if present).
|
||||||
if (page->CanStoreRawSize()) {
|
if (slot_span->CanStoreRawSize()) {
|
||||||
size_t new_raw_size =
|
size_t new_raw_size =
|
||||||
internal::PartitionSizeAdjustAdd(allow_extras, new_size);
|
internal::PartitionSizeAdjustAdd(allow_extras, new_size);
|
||||||
page->SetRawSize(new_raw_size);
|
slot_span->SetRawSize(new_raw_size);
|
||||||
#if DCHECK_IS_ON()
|
#if DCHECK_IS_ON()
|
||||||
// Write a new trailing cookie only when it is possible to keep track
|
// Write a new trailing cookie only when it is possible to keep track
|
||||||
// raw size (otherwise we wouldn't know where to look for it later).
|
// raw size (otherwise we wouldn't know where to look for it later).
|
||||||
@ -508,23 +509,24 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
static size_t PartitionPurgeSlotSpan(
|
||||||
bool discard) {
|
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||||
const internal::PartitionBucket<thread_safe>* bucket = page->bucket;
|
bool discard) {
|
||||||
|
const internal::PartitionBucket<thread_safe>* bucket = slot_span->bucket;
|
||||||
size_t slot_size = bucket->slot_size;
|
size_t slot_size = bucket->slot_size;
|
||||||
if (slot_size < SystemPageSize() || !page->num_allocated_slots)
|
if (slot_size < SystemPageSize() || !slot_span->num_allocated_slots)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
size_t bucket_num_slots = bucket->get_slots_per_span();
|
size_t bucket_num_slots = bucket->get_slots_per_span();
|
||||||
size_t discardable_bytes = 0;
|
size_t discardable_bytes = 0;
|
||||||
|
|
||||||
if (page->CanStoreRawSize()) {
|
if (slot_span->CanStoreRawSize()) {
|
||||||
uint32_t used_bytes =
|
uint32_t used_bytes =
|
||||||
static_cast<uint32_t>(RoundUpToSystemPage(page->GetRawSize()));
|
static_cast<uint32_t>(RoundUpToSystemPage(slot_span->GetRawSize()));
|
||||||
discardable_bytes = bucket->slot_size - used_bytes;
|
discardable_bytes = bucket->slot_size - used_bytes;
|
||||||
if (discardable_bytes && discard) {
|
if (discardable_bytes && discard) {
|
||||||
char* ptr = reinterpret_cast<char*>(
|
char* ptr = reinterpret_cast<char*>(
|
||||||
internal::PartitionPage<thread_safe>::ToPointer(page));
|
internal::SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||||
ptr += used_bytes;
|
ptr += used_bytes;
|
||||||
DiscardSystemPages(ptr, discardable_bytes);
|
DiscardSystemPages(ptr, discardable_bytes);
|
||||||
}
|
}
|
||||||
@ -545,8 +547,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
|||||||
SystemPageSize());
|
SystemPageSize());
|
||||||
#endif
|
#endif
|
||||||
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
|
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
|
||||||
PA_DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
|
PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
|
||||||
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
|
size_t num_slots = bucket_num_slots - slot_span->num_unprovisioned_slots;
|
||||||
char slot_usage[kMaxSlotCount];
|
char slot_usage[kMaxSlotCount];
|
||||||
#if !defined(OS_WIN)
|
#if !defined(OS_WIN)
|
||||||
// The last freelist entry should not be discarded when using OS_WIN.
|
// The last freelist entry should not be discarded when using OS_WIN.
|
||||||
@ -555,10 +557,11 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
|||||||
#endif
|
#endif
|
||||||
memset(slot_usage, 1, num_slots);
|
memset(slot_usage, 1, num_slots);
|
||||||
char* ptr = reinterpret_cast<char*>(
|
char* ptr = reinterpret_cast<char*>(
|
||||||
internal::PartitionPage<thread_safe>::ToPointer(page));
|
internal::SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||||
// First, walk the freelist for this page and make a bitmap of which slots
|
// First, walk the freelist for this slot span and make a bitmap of which
|
||||||
// are not in use.
|
// slots are not in use.
|
||||||
for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
|
for (internal::PartitionFreelistEntry* entry = slot_span->freelist_head;
|
||||||
|
entry;
|
||||||
/**/) {
|
/**/) {
|
||||||
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
|
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
|
||||||
PA_DCHECK(slot_index < num_slots);
|
PA_DCHECK(slot_index < num_slots);
|
||||||
@ -603,7 +606,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
|||||||
if (unprovisioned_bytes && discard) {
|
if (unprovisioned_bytes && discard) {
|
||||||
PA_DCHECK(truncated_slots > 0);
|
PA_DCHECK(truncated_slots > 0);
|
||||||
size_t num_new_entries = 0;
|
size_t num_new_entries = 0;
|
||||||
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
|
slot_span->num_unprovisioned_slots +=
|
||||||
|
static_cast<uint16_t>(truncated_slots);
|
||||||
|
|
||||||
// Rewrite the freelist.
|
// Rewrite the freelist.
|
||||||
internal::PartitionFreelistEntry* head = nullptr;
|
internal::PartitionFreelistEntry* head = nullptr;
|
||||||
@ -627,11 +631,11 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
page->freelist_head = head;
|
slot_span->freelist_head = head;
|
||||||
if (back)
|
if (back)
|
||||||
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
|
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
|
||||||
|
|
||||||
PA_DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
|
PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
|
||||||
// Discard the memory.
|
// Discard the memory.
|
||||||
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
|
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
|
||||||
}
|
}
|
||||||
@ -671,13 +675,15 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
|
|||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
static void PartitionPurgeBucket(
|
static void PartitionPurgeBucket(
|
||||||
internal::PartitionBucket<thread_safe>* bucket) {
|
internal::PartitionBucket<thread_safe>* bucket) {
|
||||||
if (bucket->active_pages_head !=
|
if (bucket->active_slot_spans_head !=
|
||||||
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
|
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
|
||||||
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
|
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||||
page; page = page->next_page) {
|
bucket->active_slot_spans_head;
|
||||||
PA_DCHECK(page !=
|
slot_span; slot_span = slot_span->next_slot_span) {
|
||||||
internal::PartitionPage<thread_safe>::get_sentinel_page());
|
PA_DCHECK(
|
||||||
PartitionPurgePage(page, true);
|
slot_span !=
|
||||||
|
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||||
|
PartitionPurgeSlotSpan(slot_span, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,8 +692,8 @@ template <bool thread_safe>
|
|||||||
void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
||||||
{
|
{
|
||||||
ScopedGuard guard{lock_};
|
ScopedGuard guard{lock_};
|
||||||
if (flags & PartitionPurgeDecommitEmptyPages)
|
if (flags & PartitionPurgeDecommitEmptySlotSpans)
|
||||||
DecommitEmptyPages();
|
DecommitEmptySlotSpans();
|
||||||
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
|
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
|
||||||
for (size_t i = 0; i < kNumBuckets; ++i) {
|
for (size_t i = 0; i < kNumBuckets; ++i) {
|
||||||
Bucket* bucket = &buckets[i];
|
Bucket* bucket = &buckets[i];
|
||||||
@ -702,36 +708,37 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
|
static void PartitionDumpSlotSpanStats(
|
||||||
internal::PartitionPage<thread_safe>* page) {
|
PartitionBucketMemoryStats* stats_out,
|
||||||
uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
|
internal::SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
|
uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
|
||||||
|
|
||||||
if (page->is_decommitted()) {
|
if (slot_span->is_decommitted()) {
|
||||||
++stats_out->num_decommitted_pages;
|
++stats_out->num_decommitted_slot_spans;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
stats_out->discardable_bytes += PartitionPurgePage(page, false);
|
stats_out->discardable_bytes += PartitionPurgeSlotSpan(slot_span, false);
|
||||||
|
|
||||||
if (page->CanStoreRawSize()) {
|
if (slot_span->CanStoreRawSize()) {
|
||||||
stats_out->active_bytes += static_cast<uint32_t>(page->GetRawSize());
|
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
|
||||||
} else {
|
} else {
|
||||||
stats_out->active_bytes +=
|
stats_out->active_bytes +=
|
||||||
(page->num_allocated_slots * stats_out->bucket_slot_size);
|
(slot_span->num_allocated_slots * stats_out->bucket_slot_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t page_bytes_resident =
|
size_t slot_span_bytes_resident = RoundUpToSystemPage(
|
||||||
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
|
(bucket_num_slots - slot_span->num_unprovisioned_slots) *
|
||||||
stats_out->bucket_slot_size);
|
stats_out->bucket_slot_size);
|
||||||
stats_out->resident_bytes += page_bytes_resident;
|
stats_out->resident_bytes += slot_span_bytes_resident;
|
||||||
if (page->is_empty()) {
|
if (slot_span->is_empty()) {
|
||||||
stats_out->decommittable_bytes += page_bytes_resident;
|
stats_out->decommittable_bytes += slot_span_bytes_resident;
|
||||||
++stats_out->num_empty_pages;
|
++stats_out->num_empty_slot_spans;
|
||||||
} else if (page->is_full()) {
|
} else if (slot_span->is_full()) {
|
||||||
++stats_out->num_full_pages;
|
++stats_out->num_full_slot_spans;
|
||||||
} else {
|
} else {
|
||||||
PA_DCHECK(page->is_active());
|
PA_DCHECK(slot_span->is_active());
|
||||||
++stats_out->num_active_pages;
|
++stats_out->num_active_slot_spans;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -741,46 +748,51 @@ static void PartitionDumpBucketStats(
|
|||||||
const internal::PartitionBucket<thread_safe>* bucket) {
|
const internal::PartitionBucket<thread_safe>* bucket) {
|
||||||
PA_DCHECK(!bucket->is_direct_mapped());
|
PA_DCHECK(!bucket->is_direct_mapped());
|
||||||
stats_out->is_valid = false;
|
stats_out->is_valid = false;
|
||||||
// If the active page list is empty (==
|
// If the active slot span list is empty (==
|
||||||
// internal::PartitionPage::get_sentinel_page()), the bucket might still need
|
// internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
|
||||||
// to be reported if it has a list of empty, decommitted or full pages.
|
// still need to be reported if it has a list of empty, decommitted or full
|
||||||
if (bucket->active_pages_head ==
|
// slot spans.
|
||||||
internal::PartitionPage<thread_safe>::get_sentinel_page() &&
|
if (bucket->active_slot_spans_head ==
|
||||||
!bucket->empty_pages_head && !bucket->decommitted_pages_head &&
|
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() &&
|
||||||
!bucket->num_full_pages)
|
!bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
|
||||||
|
!bucket->num_full_slot_spans)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
memset(stats_out, '\0', sizeof(*stats_out));
|
memset(stats_out, '\0', sizeof(*stats_out));
|
||||||
stats_out->is_valid = true;
|
stats_out->is_valid = true;
|
||||||
stats_out->is_direct_map = false;
|
stats_out->is_direct_map = false;
|
||||||
stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
|
stats_out->num_full_slot_spans =
|
||||||
|
static_cast<size_t>(bucket->num_full_slot_spans);
|
||||||
stats_out->bucket_slot_size = bucket->slot_size;
|
stats_out->bucket_slot_size = bucket->slot_size;
|
||||||
uint16_t bucket_num_slots = bucket->get_slots_per_span();
|
uint16_t bucket_num_slots = bucket->get_slots_per_span();
|
||||||
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
|
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
|
||||||
stats_out->allocated_page_size = bucket->get_bytes_per_span();
|
stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
|
||||||
stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
|
stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
|
||||||
stats_out->resident_bytes =
|
stats_out->resident_bytes =
|
||||||
bucket->num_full_pages * stats_out->allocated_page_size;
|
bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
|
||||||
|
|
||||||
for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head;
|
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||||
page; page = page->next_page) {
|
bucket->empty_slot_spans_head;
|
||||||
PA_DCHECK(page->is_empty() || page->is_decommitted());
|
slot_span; slot_span = slot_span->next_slot_span) {
|
||||||
PartitionDumpPageStats(stats_out, page);
|
PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
|
||||||
|
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||||
}
|
}
|
||||||
for (internal::PartitionPage<thread_safe>* page =
|
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||||
bucket->decommitted_pages_head;
|
bucket->decommitted_slot_spans_head;
|
||||||
page; page = page->next_page) {
|
slot_span; slot_span = slot_span->next_slot_span) {
|
||||||
PA_DCHECK(page->is_decommitted());
|
PA_DCHECK(slot_span->is_decommitted());
|
||||||
PartitionDumpPageStats(stats_out, page);
|
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bucket->active_pages_head !=
|
if (bucket->active_slot_spans_head !=
|
||||||
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
|
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
|
||||||
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
|
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
|
||||||
page; page = page->next_page) {
|
bucket->active_slot_spans_head;
|
||||||
PA_DCHECK(page !=
|
slot_span; slot_span = slot_span->next_slot_span) {
|
||||||
internal::PartitionPage<thread_safe>::get_sentinel_page());
|
PA_DCHECK(
|
||||||
PartitionDumpPageStats(stats_out, page);
|
slot_span !=
|
||||||
|
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||||
|
PartitionDumpSlotSpanStats(stats_out, slot_span);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -818,7 +830,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
|||||||
// Don't report the pseudo buckets that the generic allocator sets up in
|
// Don't report the pseudo buckets that the generic allocator sets up in
|
||||||
// order to preserve a fast size->bucket map (see
|
// order to preserve a fast size->bucket map (see
|
||||||
// PartitionRoot::Init() for details).
|
// PartitionRoot::Init() for details).
|
||||||
if (!bucket->active_pages_head)
|
if (!bucket->active_slot_spans_head)
|
||||||
bucket_stats[i].is_valid = false;
|
bucket_stats[i].is_valid = false;
|
||||||
else
|
else
|
||||||
PartitionDumpBucketStats(&bucket_stats[i], bucket);
|
PartitionDumpBucketStats(&bucket_stats[i], bucket);
|
||||||
@ -867,8 +879,8 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
|||||||
PartitionBucketMemoryStats mapped_stats = {};
|
PartitionBucketMemoryStats mapped_stats = {};
|
||||||
mapped_stats.is_valid = true;
|
mapped_stats.is_valid = true;
|
||||||
mapped_stats.is_direct_map = true;
|
mapped_stats.is_direct_map = true;
|
||||||
mapped_stats.num_full_pages = 1;
|
mapped_stats.num_full_slot_spans = 1;
|
||||||
mapped_stats.allocated_page_size = size;
|
mapped_stats.allocated_slot_span_size = size;
|
||||||
mapped_stats.bucket_slot_size = size;
|
mapped_stats.bucket_slot_size = size;
|
||||||
mapped_stats.active_bytes = size;
|
mapped_stats.active_bytes = size;
|
||||||
mapped_stats.resident_bytes = size;
|
mapped_stats.resident_bytes = size;
|
||||||
@ -937,17 +949,19 @@ BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr) {
|
|||||||
// The only allocations that don't use tag are allocated outside of GigaCage,
|
// The only allocations that don't use tag are allocated outside of GigaCage,
|
||||||
// hence we'd never get here in the use_tag=false case.
|
// hence we'd never get here in the use_tag=false case.
|
||||||
ptr = internal::PartitionPointerAdjustSubtract(true /* use_tag */, ptr);
|
ptr = internal::PartitionPointerAdjustSubtract(true /* use_tag */, ptr);
|
||||||
auto* page =
|
auto* slot_span =
|
||||||
internal::PartitionAllocGetPageForSize<internal::ThreadSafe>(ptr);
|
internal::PartitionAllocGetSlotSpanForSizeQuery<internal::ThreadSafe>(
|
||||||
PA_DCHECK(PartitionRoot<internal::ThreadSafe>::FromPage(page)->allow_extras);
|
ptr);
|
||||||
|
PA_DCHECK(PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span)
|
||||||
|
->allow_extras);
|
||||||
|
|
||||||
// Get the offset from the beginning of the slot span.
|
// Get the offset from the beginning of the slot span.
|
||||||
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
|
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
|
||||||
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
|
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
|
||||||
internal::PartitionPage<internal::ThreadSafe>::ToPointer(page));
|
internal::SlotSpanMetadata<internal::ThreadSafe>::ToPointer(slot_span));
|
||||||
size_t offset_in_slot_span = ptr_addr - slot_span_start;
|
size_t offset_in_slot_span = ptr_addr - slot_span_start;
|
||||||
|
|
||||||
return page->bucket->GetSlotOffset(offset_in_slot_span);
|
return slot_span->bucket->GetSlotOffset(offset_in_slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
@ -226,12 +226,12 @@ static OomFunction g_oom_handling_function = nullptr;
|
|||||||
class PartitionStatsDumper;
|
class PartitionStatsDumper;
|
||||||
|
|
||||||
enum PartitionPurgeFlags {
|
enum PartitionPurgeFlags {
|
||||||
// Decommitting the ring list of empty pages is reasonably fast.
|
// Decommitting the ring list of empty slot spans is reasonably fast.
|
||||||
PartitionPurgeDecommitEmptyPages = 1 << 0,
|
PartitionPurgeDecommitEmptySlotSpans = 1 << 0,
|
||||||
// Discarding unused system pages is slower, because it involves walking all
|
// Discarding unused system pages is slower, because it involves walking all
|
||||||
// freelists in all active partition pages of all buckets >= system page
|
// freelists in all active slot spans of all buckets >= system page
|
||||||
// size. It often frees a similar amount of memory to decommitting the empty
|
// size. It often frees a similar amount of memory to decommitting the empty
|
||||||
// pages, though.
|
// slot spans, though.
|
||||||
PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
|
PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -256,20 +256,21 @@ struct PartitionBucketMemoryStats {
|
|||||||
bool is_valid; // Used to check if the stats is valid.
|
bool is_valid; // Used to check if the stats is valid.
|
||||||
bool is_direct_map; // True if this is a direct mapping; size will not be
|
bool is_direct_map; // True if this is a direct mapping; size will not be
|
||||||
// unique.
|
// unique.
|
||||||
uint32_t bucket_slot_size; // The size of the slot in bytes.
|
uint32_t bucket_slot_size; // The size of the slot in bytes.
|
||||||
uint32_t allocated_page_size; // Total size the partition page allocated from
|
uint32_t allocated_slot_span_size; // Total size the slot span allocated
|
||||||
// the system.
|
// from the system (committed pages).
|
||||||
uint32_t active_bytes; // Total active bytes used in the bucket.
|
uint32_t active_bytes; // Total active bytes used in the bucket.
|
||||||
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
|
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
|
||||||
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
|
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
|
||||||
uint32_t discardable_bytes; // Total bytes that could be discarded.
|
uint32_t discardable_bytes; // Total bytes that could be discarded.
|
||||||
uint32_t num_full_pages; // Number of pages with all slots allocated.
|
uint32_t num_full_slot_spans; // Number of slot spans with all slots
|
||||||
uint32_t num_active_pages; // Number of pages that have at least one
|
// allocated.
|
||||||
// provisioned slot.
|
uint32_t num_active_slot_spans; // Number of slot spans that have at least
|
||||||
uint32_t num_empty_pages; // Number of pages that are empty
|
// one provisioned slot.
|
||||||
// but not decommitted.
|
uint32_t num_empty_slot_spans; // Number of slot spans that are empty
|
||||||
uint32_t num_decommitted_pages; // Number of pages that are empty
|
// but not decommitted.
|
||||||
// and decommitted.
|
uint32_t num_decommitted_slot_spans; // Number of slot spans that are empty
|
||||||
|
// and decommitted.
|
||||||
};
|
};
|
||||||
|
|
||||||
// Interface that is passed to PartitionDumpStats and
|
// Interface that is passed to PartitionDumpStats and
|
||||||
@ -380,42 +381,43 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
|
|||||||
bool* is_already_zeroed) {
|
bool* is_already_zeroed) {
|
||||||
*is_already_zeroed = false;
|
*is_already_zeroed = false;
|
||||||
|
|
||||||
Page* page = bucket->active_pages_head;
|
SlotSpan* slot_span = bucket->active_slot_spans_head;
|
||||||
// Check that this page is neither full nor freed.
|
// Check that this slot span is neither full nor freed.
|
||||||
PA_DCHECK(page);
|
PA_DCHECK(slot_span);
|
||||||
PA_DCHECK(page->num_allocated_slots >= 0);
|
PA_DCHECK(slot_span->num_allocated_slots >= 0);
|
||||||
*utilized_slot_size = bucket->slot_size;
|
*utilized_slot_size = bucket->slot_size;
|
||||||
|
|
||||||
void* ret = page->freelist_head;
|
void* ret = slot_span->freelist_head;
|
||||||
if (LIKELY(ret)) {
|
if (LIKELY(ret)) {
|
||||||
// If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
|
// If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
|
||||||
// if we can afford to make these CHECKs.
|
// if we can afford to make these CHECKs.
|
||||||
PA_DCHECK(IsValidPage(page));
|
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||||
|
|
||||||
// All large allocations must go through the slow path to correctly update
|
// All large allocations must go through the slow path to correctly update
|
||||||
// the size metadata.
|
// the size metadata.
|
||||||
PA_DCHECK(!page->CanStoreRawSize());
|
PA_DCHECK(!slot_span->CanStoreRawSize());
|
||||||
internal::PartitionFreelistEntry* new_head =
|
internal::PartitionFreelistEntry* new_head =
|
||||||
internal::EncodedPartitionFreelistEntry::Decode(
|
internal::EncodedPartitionFreelistEntry::Decode(
|
||||||
page->freelist_head->next);
|
slot_span->freelist_head->next);
|
||||||
page->freelist_head = new_head;
|
slot_span->freelist_head = new_head;
|
||||||
page->num_allocated_slots++;
|
slot_span->num_allocated_slots++;
|
||||||
|
|
||||||
PA_DCHECK(page->bucket == bucket);
|
PA_DCHECK(slot_span->bucket == bucket);
|
||||||
} else {
|
} else {
|
||||||
ret = bucket->SlowPathAlloc(this, flags, raw_size, is_already_zeroed);
|
ret = bucket->SlowPathAlloc(this, flags, raw_size, is_already_zeroed);
|
||||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||||
PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
|
PA_DCHECK(!ret || IsValidSlotSpan(SlotSpan::FromPointer(ret)));
|
||||||
|
|
||||||
if (UNLIKELY(!ret))
|
if (UNLIKELY(!ret))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
page = Page::FromPointer(ret);
|
slot_span = SlotSpan::FromPointer(ret);
|
||||||
// For direct mapped allocations, |bucket| is the sentinel.
|
// For direct mapped allocations, |bucket| is the sentinel.
|
||||||
PA_DCHECK((page->bucket == bucket) || (page->bucket->is_direct_mapped() &&
|
PA_DCHECK((slot_span->bucket == bucket) ||
|
||||||
(bucket == &sentinel_bucket)));
|
(slot_span->bucket->is_direct_mapped() &&
|
||||||
|
(bucket == &sentinel_bucket)));
|
||||||
|
|
||||||
*utilized_slot_size = page->GetUtilizedSlotSize();
|
*utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -447,25 +449,26 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
// No check as the pointer hasn't been adjusted yet.
|
// No check as the pointer hasn't been adjusted yet.
|
||||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||||
PA_DCHECK(IsValidPage(page));
|
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
auto* root = FromSlotSpan(slot_span);
|
||||||
|
|
||||||
// TODO(bikineev): Change the first condition to LIKELY once PCScan is enabled
|
// TODO(bikineev): Change the first condition to LIKELY once PCScan is enabled
|
||||||
// by default.
|
// by default.
|
||||||
if (UNLIKELY(root->pcscan) && LIKELY(!page->bucket->is_direct_mapped())) {
|
if (UNLIKELY(root->pcscan) &&
|
||||||
root->pcscan->MoveToQuarantine(ptr, page);
|
LIKELY(!slot_span->bucket->is_direct_mapped())) {
|
||||||
|
root->pcscan->MoveToQuarantine(ptr, slot_span);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
root->FreeNoHooksImmediate(ptr, page);
|
root->FreeNoHooksImmediate(ptr, slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||||
void* ptr,
|
void* ptr,
|
||||||
Page* page) {
|
SlotSpan* slot_span) {
|
||||||
// The thread cache is added "in the middle" of the main allocator, that is:
|
// The thread cache is added "in the middle" of the main allocator, that is:
|
||||||
// - After all the cookie/tag/ref-count management
|
// - After all the cookie/tag/ref-count management
|
||||||
// - Before the "raw" allocator.
|
// - Before the "raw" allocator.
|
||||||
@ -476,11 +479,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
|||||||
// a. Return to the thread cache if possible. If it succeeds, return.
|
// a. Return to the thread cache if possible. If it succeeds, return.
|
||||||
// b. Otherwise, call the "raw" allocator <-- Locking
|
// b. Otherwise, call the "raw" allocator <-- Locking
|
||||||
PA_DCHECK(ptr);
|
PA_DCHECK(ptr);
|
||||||
PA_DCHECK(page);
|
PA_DCHECK(slot_span);
|
||||||
PA_DCHECK(IsValidPage(page));
|
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||||
|
|
||||||
#if DCHECK_IS_ON()
|
#if DCHECK_IS_ON()
|
||||||
size_t utilized_slot_size = page->GetUtilizedSlotSize();
|
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (allow_extras) {
|
if (allow_extras) {
|
||||||
@ -511,14 +514,14 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
|||||||
internal::PartitionCookieCheckValue(end_cookie_ptr);
|
internal::PartitionCookieCheckValue(end_cookie_ptr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!page->bucket->is_direct_mapped()) {
|
if (!slot_span->bucket->is_direct_mapped()) {
|
||||||
// PartitionTagIncrementValue and PartitionTagClearValue require that the
|
// PartitionTagIncrementValue and PartitionTagClearValue require that the
|
||||||
// size is tag_bitmap::kBytesPerPartitionTag-aligned (currently 16
|
// size is tag_bitmap::kBytesPerPartitionTag-aligned (currently 16
|
||||||
// bytes-aligned) when MTECheckedPtr is enabled. However,
|
// bytes-aligned) when MTECheckedPtr is enabled. However,
|
||||||
// utilized_slot_size may not be aligned for single-slot slot spans. So we
|
// utilized_slot_size may not be aligned for single-slot slot spans. So we
|
||||||
// need the bucket's slot_size.
|
// need the bucket's slot_size.
|
||||||
size_t slot_size_with_no_extras =
|
size_t slot_size_with_no_extras = internal::PartitionSizeAdjustSubtract(
|
||||||
internal::PartitionSizeAdjustSubtract(true, page->bucket->slot_size);
|
true, slot_span->bucket->slot_size);
|
||||||
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR && MTE_CHECKED_PTR_SET_TAG_AT_FREE
|
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR && MTE_CHECKED_PTR_SET_TAG_AT_FREE
|
||||||
internal::PartitionTagIncrementValue(ptr, slot_size_with_no_extras);
|
internal::PartitionTagIncrementValue(ptr, slot_size_with_no_extras);
|
||||||
#else
|
#else
|
||||||
@ -556,24 +559,26 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
|||||||
//
|
//
|
||||||
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
|
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
|
||||||
// make it statically known to the compiler.
|
// make it statically known to the compiler.
|
||||||
if (thread_safe && with_thread_cache && !page->bucket->is_direct_mapped()) {
|
if (thread_safe && with_thread_cache &&
|
||||||
PA_DCHECK(page->bucket >= this->buckets &&
|
!slot_span->bucket->is_direct_mapped()) {
|
||||||
page->bucket <= &this->sentinel_bucket);
|
PA_DCHECK(slot_span->bucket >= this->buckets &&
|
||||||
size_t bucket_index = page->bucket - this->buckets;
|
slot_span->bucket <= &this->sentinel_bucket);
|
||||||
|
size_t bucket_index = slot_span->bucket - this->buckets;
|
||||||
auto* thread_cache = internal::ThreadCache::Get();
|
auto* thread_cache = internal::ThreadCache::Get();
|
||||||
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
|
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
RawFree(ptr, page);
|
RawFree(ptr, slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) {
|
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr,
|
||||||
|
SlotSpan* slot_span) {
|
||||||
internal::DeferredUnmap deferred_unmap;
|
internal::DeferredUnmap deferred_unmap;
|
||||||
{
|
{
|
||||||
ScopedGuard guard{lock_};
|
ScopedGuard guard{lock_};
|
||||||
deferred_unmap = page->Free(ptr);
|
deferred_unmap = slot_span->Free(ptr);
|
||||||
}
|
}
|
||||||
deferred_unmap.Run();
|
deferred_unmap.Run();
|
||||||
}
|
}
|
||||||
@ -581,23 +586,24 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) {
|
|||||||
// static
|
// static
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
void PartitionRoot<thread_safe>::RawFreeStatic(void* ptr) {
|
void PartitionRoot<thread_safe>::RawFreeStatic(void* ptr) {
|
||||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
auto* root = FromSlotSpan(slot_span);
|
||||||
root->RawFree(ptr, page);
|
root->RawFree(ptr, slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) {
|
ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
|
||||||
PartitionRoot* root = FromPage(page);
|
SlotSpan* slot_span) {
|
||||||
|
PartitionRoot* root = FromSlotSpan(slot_span);
|
||||||
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
|
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE PartitionRoot<thread_safe>* PartitionRoot<thread_safe>::FromPage(
|
ALWAYS_INLINE PartitionRoot<thread_safe>*
|
||||||
Page* page) {
|
PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
|
||||||
auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
|
auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
|
||||||
reinterpret_cast<uintptr_t>(page) & SystemPageBaseMask());
|
reinterpret_cast<uintptr_t>(slot_span) & SystemPageBaseMask());
|
||||||
return extent_entry->root;
|
return extent_entry->root;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -637,20 +643,20 @@ BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
|
|||||||
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
|
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
// Gets the PartitionPage object for the first partition page of the slot span
|
// Gets the SlotSpanMetadata object of the slot span that contains |ptr|. It's
|
||||||
// that contains |ptr|. It's used with intention to do obtain the slot size.
|
// used with intention to do obtain the slot size. CAUTION! It works well for
|
||||||
// CAUTION! It works well for normal buckets, but for direct-mapped allocations
|
// normal buckets, but for direct-mapped allocations it'll only work if |ptr| is
|
||||||
// it'll only work if |ptr| is in the first partition page of the allocation.
|
// in the first partition page of the allocation.
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE internal::PartitionPage<thread_safe>*
|
ALWAYS_INLINE internal::SlotSpanMetadata<thread_safe>*
|
||||||
PartitionAllocGetPageForSize(void* ptr) {
|
PartitionAllocGetSlotSpanForSizeQuery(void* ptr) {
|
||||||
// No need to lock here. Only |ptr| being freed by another thread could
|
// No need to lock here. Only |ptr| being freed by another thread could
|
||||||
// cause trouble, and the caller is responsible for that not happening.
|
// cause trouble, and the caller is responsible for that not happening.
|
||||||
auto* page =
|
auto* slot_span =
|
||||||
internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
|
internal::SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(ptr);
|
||||||
// TODO(palmer): See if we can afford to make this a CHECK.
|
// TODO(palmer): See if we can afford to make this a CHECK.
|
||||||
PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
|
PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
|
||||||
return page;
|
return slot_span;
|
||||||
}
|
}
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|
||||||
@ -662,10 +668,10 @@ PartitionAllocGetPageForSize(void* ptr) {
|
|||||||
// Used as malloc_usable_size.
|
// Used as malloc_usable_size.
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
|
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
|
||||||
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
|
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
|
||||||
auto* root = PartitionRoot<thread_safe>::FromPage(page);
|
auto* root = FromSlotSpan(slot_span);
|
||||||
|
|
||||||
size_t size = page->GetUtilizedSlotSize();
|
size_t size = slot_span->GetUtilizedSlotSize();
|
||||||
// Adjust back by subtracing extras (if any).
|
// Adjust back by subtracing extras (if any).
|
||||||
size = internal::PartitionSizeAdjustSubtract(root->allow_extras, size);
|
size = internal::PartitionSizeAdjustSubtract(root->allow_extras, size);
|
||||||
return size;
|
return size;
|
||||||
@ -677,9 +683,10 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
|
|||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetSize(void* ptr) const {
|
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetSize(void* ptr) const {
|
||||||
ptr = internal::PartitionPointerAdjustSubtract(allow_extras, ptr);
|
ptr = internal::PartitionPointerAdjustSubtract(allow_extras, ptr);
|
||||||
auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
|
auto* slot_span =
|
||||||
size_t size = internal::PartitionSizeAdjustSubtract(allow_extras,
|
internal::PartitionAllocGetSlotSpanForSizeQuery<thread_safe>(ptr);
|
||||||
page->bucket->slot_size);
|
size_t size = internal::PartitionSizeAdjustSubtract(
|
||||||
|
allow_extras, slot_span->bucket->slot_size);
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -815,12 +822,12 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
|
|||||||
// Make sure that the allocated pointer comes from the same place it would
|
// Make sure that the allocated pointer comes from the same place it would
|
||||||
// for a non-thread cache allocation.
|
// for a non-thread cache allocation.
|
||||||
if (ret) {
|
if (ret) {
|
||||||
Page* page = Page::FromPointerNoAlignmentCheck(ret);
|
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ret);
|
||||||
// All large allocations must go through the RawAlloc path to correctly
|
// All large allocations must go through the RawAlloc path to correctly
|
||||||
// set |utilized_slot_size|.
|
// set |utilized_slot_size|.
|
||||||
PA_DCHECK(!page->CanStoreRawSize());
|
PA_DCHECK(!slot_span->CanStoreRawSize());
|
||||||
PA_DCHECK(IsValidPage(page));
|
PA_DCHECK(IsValidSlotSpan(slot_span));
|
||||||
PA_DCHECK(page->bucket == &buckets[bucket_index]);
|
PA_DCHECK(slot_span->bucket == &buckets[bucket_index]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ namespace base {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
struct PartitionPage;
|
struct SlotSpanMetadata;
|
||||||
|
|
||||||
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
|
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -26,7 +26,7 @@ namespace internal {
|
|||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||||
PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
|
EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
|
||||||
size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size);
|
size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size);
|
||||||
@ -86,22 +86,23 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
|||||||
&metadata->page);
|
&metadata->page);
|
||||||
|
|
||||||
auto* page = &metadata->page;
|
auto* page = &metadata->page;
|
||||||
PA_DCHECK(!page->next_page);
|
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||||
PA_DCHECK(!page->num_allocated_slots);
|
PA_DCHECK(!page->slot_span_metadata.next_slot_span);
|
||||||
PA_DCHECK(!page->num_unprovisioned_slots);
|
PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
|
||||||
PA_DCHECK(!page->page_offset);
|
PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
|
||||||
PA_DCHECK(!page->empty_cache_index);
|
PA_DCHECK(!page->slot_span_metadata.empty_cache_index);
|
||||||
page->bucket = &metadata->bucket;
|
page->slot_span_metadata.bucket = &metadata->bucket;
|
||||||
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
|
page->slot_span_metadata.freelist_head =
|
||||||
|
reinterpret_cast<PartitionFreelistEntry*>(slot);
|
||||||
|
|
||||||
auto* next_entry = reinterpret_cast<PartitionFreelistEntry*>(slot);
|
auto* next_entry = reinterpret_cast<PartitionFreelistEntry*>(slot);
|
||||||
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
|
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
|
||||||
|
|
||||||
PA_DCHECK(!metadata->bucket.active_pages_head);
|
PA_DCHECK(!metadata->bucket.active_slot_spans_head);
|
||||||
PA_DCHECK(!metadata->bucket.empty_pages_head);
|
PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
|
||||||
PA_DCHECK(!metadata->bucket.decommitted_pages_head);
|
PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
|
||||||
PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
|
PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
|
||||||
PA_DCHECK(!metadata->bucket.num_full_pages);
|
PA_DCHECK(!metadata->bucket.num_full_slot_spans);
|
||||||
metadata->bucket.slot_size = size;
|
metadata->bucket.slot_size = size;
|
||||||
|
|
||||||
auto* map_extent = &metadata->direct_map_extent;
|
auto* map_extent = &metadata->direct_map_extent;
|
||||||
@ -115,7 +116,7 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
|
|||||||
map_extent->prev_extent = nullptr;
|
map_extent->prev_extent = nullptr;
|
||||||
root->direct_map_list = map_extent;
|
root->direct_map_list = map_extent;
|
||||||
|
|
||||||
return page;
|
return reinterpret_cast<SlotSpanMetadata<thread_safe>*>(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -188,10 +189,11 @@ template <bool thread_safe>
|
|||||||
void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
|
void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
|
||||||
slot_size = new_slot_size;
|
slot_size = new_slot_size;
|
||||||
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
|
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
|
||||||
active_pages_head = PartitionPage<thread_safe>::get_sentinel_page();
|
active_slot_spans_head =
|
||||||
empty_pages_head = nullptr;
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||||
decommitted_pages_head = nullptr;
|
empty_slot_spans_head = nullptr;
|
||||||
num_full_pages = 0;
|
decommitted_slot_spans_head = nullptr;
|
||||||
|
num_full_slot_spans = 0;
|
||||||
num_system_pages_per_slot_span = get_system_pages_per_slot_span();
|
num_system_pages_per_slot_span = get_system_pages_per_slot_span();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,41 +407,40 @@ ALWAYS_INLINE uint16_t PartitionBucket<thread_safe>::get_pages_per_slot_span() {
|
|||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
|
ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
|
||||||
PartitionPage<thread_safe>* page) {
|
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
// The bucket never changes. We set it up once.
|
// The bucket never changes. We set it up once.
|
||||||
page->bucket = this;
|
slot_span->bucket = this;
|
||||||
page->empty_cache_index = -1;
|
slot_span->empty_cache_index = -1;
|
||||||
|
|
||||||
page->Reset();
|
slot_span->Reset();
|
||||||
|
|
||||||
uint16_t num_partition_pages = get_pages_per_slot_span();
|
uint16_t num_partition_pages = get_pages_per_slot_span();
|
||||||
char* page_char_ptr = reinterpret_cast<char*>(page);
|
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
|
||||||
for (uint16_t i = 1; i < num_partition_pages; ++i) {
|
for (uint16_t i = 1; i < num_partition_pages; ++i) {
|
||||||
page_char_ptr += kPageMetadataSize;
|
auto* secondary_page = page + i;
|
||||||
auto* secondary_page =
|
secondary_page->slot_span_metadata_offset = i;
|
||||||
reinterpret_cast<PartitionPage<thread_safe>*>(page_char_ptr);
|
|
||||||
secondary_page->page_offset = i;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
||||||
PartitionPage<thread_safe>* page) {
|
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
PA_DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
|
PA_DCHECK(slot_span !=
|
||||||
uint16_t num_slots = page->num_unprovisioned_slots;
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||||
|
uint16_t num_slots = slot_span->num_unprovisioned_slots;
|
||||||
PA_DCHECK(num_slots);
|
PA_DCHECK(num_slots);
|
||||||
// We should only get here when _every_ slot is either used or unprovisioned.
|
// We should only get here when _every_ slot is either used or unprovisioned.
|
||||||
// (The third state is "on the freelist". If we have a non-empty freelist, we
|
// (The third state is "on the freelist". If we have a non-empty freelist, we
|
||||||
// should not get here.)
|
// should not get here.)
|
||||||
PA_DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
|
PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
|
||||||
// Similarly, make explicitly sure that the freelist is empty.
|
// Similarly, make explicitly sure that the freelist is empty.
|
||||||
PA_DCHECK(!page->freelist_head);
|
PA_DCHECK(!slot_span->freelist_head);
|
||||||
PA_DCHECK(page->num_allocated_slots >= 0);
|
PA_DCHECK(slot_span->num_allocated_slots >= 0);
|
||||||
|
|
||||||
size_t size = slot_size;
|
size_t size = slot_size;
|
||||||
char* base =
|
char* base = reinterpret_cast<char*>(
|
||||||
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
|
SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||||
char* return_object = base + (size * page->num_allocated_slots);
|
char* return_object = base + (size * slot_span->num_allocated_slots);
|
||||||
char* first_freelist_pointer = return_object + size;
|
char* first_freelist_pointer = return_object + size;
|
||||||
char* first_freelist_pointer_extent =
|
char* first_freelist_pointer_extent =
|
||||||
first_freelist_pointer + sizeof(PartitionFreelistEntry*);
|
first_freelist_pointer + sizeof(PartitionFreelistEntry*);
|
||||||
@ -470,13 +471,13 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
|||||||
// sub page boundaries frequently for large bucket sizes.
|
// sub page boundaries frequently for large bucket sizes.
|
||||||
PA_DCHECK(num_new_freelist_entries + 1 <= num_slots);
|
PA_DCHECK(num_new_freelist_entries + 1 <= num_slots);
|
||||||
num_slots -= (num_new_freelist_entries + 1);
|
num_slots -= (num_new_freelist_entries + 1);
|
||||||
page->num_unprovisioned_slots = num_slots;
|
slot_span->num_unprovisioned_slots = num_slots;
|
||||||
page->num_allocated_slots++;
|
slot_span->num_allocated_slots++;
|
||||||
|
|
||||||
if (LIKELY(num_new_freelist_entries)) {
|
if (LIKELY(num_new_freelist_entries)) {
|
||||||
char* freelist_pointer = first_freelist_pointer;
|
char* freelist_pointer = first_freelist_pointer;
|
||||||
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
|
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
|
||||||
page->freelist_head = entry;
|
slot_span->freelist_head = entry;
|
||||||
while (--num_new_freelist_entries) {
|
while (--num_new_freelist_entries) {
|
||||||
freelist_pointer += size;
|
freelist_pointer += size;
|
||||||
auto* next_entry =
|
auto* next_entry =
|
||||||
@ -486,56 +487,57 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
|
|||||||
}
|
}
|
||||||
entry->next = PartitionFreelistEntry::Encode(nullptr);
|
entry->next = PartitionFreelistEntry::Encode(nullptr);
|
||||||
} else {
|
} else {
|
||||||
page->freelist_head = nullptr;
|
slot_span->freelist_head = nullptr;
|
||||||
}
|
}
|
||||||
return return_object;
|
return return_object;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
bool PartitionBucket<thread_safe>::SetNewActivePage() {
|
bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
|
||||||
PartitionPage<thread_safe>* page = active_pages_head;
|
SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
|
||||||
if (page == PartitionPage<thread_safe>::get_sentinel_page())
|
if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
PartitionPage<thread_safe>* next_page;
|
SlotSpanMetadata<thread_safe>* next_slot_span;
|
||||||
|
|
||||||
for (; page; page = next_page) {
|
for (; slot_span; slot_span = next_slot_span) {
|
||||||
next_page = page->next_page;
|
next_slot_span = slot_span->next_slot_span;
|
||||||
PA_DCHECK(page->bucket == this);
|
PA_DCHECK(slot_span->bucket == this);
|
||||||
PA_DCHECK(page != empty_pages_head);
|
PA_DCHECK(slot_span != empty_slot_spans_head);
|
||||||
PA_DCHECK(page != decommitted_pages_head);
|
PA_DCHECK(slot_span != decommitted_slot_spans_head);
|
||||||
|
|
||||||
if (LIKELY(page->is_active())) {
|
if (LIKELY(slot_span->is_active())) {
|
||||||
// This page is usable because it has freelist entries, or has
|
// This slot span is usable because it has freelist entries, or has
|
||||||
// unprovisioned slots we can create freelist entries from.
|
// unprovisioned slots we can create freelist entries from.
|
||||||
active_pages_head = page;
|
active_slot_spans_head = slot_span;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deal with empty and decommitted pages.
|
// Deal with empty and decommitted slot spans.
|
||||||
if (LIKELY(page->is_empty())) {
|
if (LIKELY(slot_span->is_empty())) {
|
||||||
page->next_page = empty_pages_head;
|
slot_span->next_slot_span = empty_slot_spans_head;
|
||||||
empty_pages_head = page;
|
empty_slot_spans_head = slot_span;
|
||||||
} else if (LIKELY(page->is_decommitted())) {
|
} else if (LIKELY(slot_span->is_decommitted())) {
|
||||||
page->next_page = decommitted_pages_head;
|
slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||||
decommitted_pages_head = page;
|
decommitted_slot_spans_head = slot_span;
|
||||||
} else {
|
} else {
|
||||||
PA_DCHECK(page->is_full());
|
PA_DCHECK(slot_span->is_full());
|
||||||
// If we get here, we found a full page. Skip over it too, and also
|
// If we get here, we found a full slot span. Skip over it too, and also
|
||||||
// tag it as full (via a negative value). We need it tagged so that
|
// tag it as full (via a negative value). We need it tagged so that
|
||||||
// free'ing can tell, and move it back into the active page list.
|
// free'ing can tell, and move it back into the active list.
|
||||||
page->num_allocated_slots = -page->num_allocated_slots;
|
slot_span->num_allocated_slots = -slot_span->num_allocated_slots;
|
||||||
++num_full_pages;
|
++num_full_slot_spans;
|
||||||
// num_full_pages is a uint16_t for efficient packing so guard against
|
// num_full_slot_spans is a uint16_t for efficient packing so guard
|
||||||
// overflow to be safe.
|
// against overflow to be safe.
|
||||||
if (UNLIKELY(!num_full_pages))
|
if (UNLIKELY(!num_full_slot_spans))
|
||||||
OnFull();
|
OnFull();
|
||||||
// Not necessary but might help stop accidents.
|
// Not necessary but might help stop accidents.
|
||||||
page->next_page = nullptr;
|
slot_span->next_slot_span = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
active_pages_head = PartitionPage<thread_safe>::get_sentinel_page();
|
active_slot_spans_head =
|
||||||
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,14 +548,14 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
|||||||
size_t raw_size,
|
size_t raw_size,
|
||||||
bool* is_already_zeroed) {
|
bool* is_already_zeroed) {
|
||||||
// The slow path is called when the freelist is empty.
|
// The slow path is called when the freelist is empty.
|
||||||
PA_DCHECK(!active_pages_head->freelist_head);
|
PA_DCHECK(!active_slot_spans_head->freelist_head);
|
||||||
|
|
||||||
PartitionPage<thread_safe>* new_page = nullptr;
|
SlotSpanMetadata<thread_safe>* new_slot_span = nullptr;
|
||||||
// |new_page->bucket| will always be |this|, except when |this| is the
|
// |new_slot_span->bucket| will always be |this|, except when |this| is the
|
||||||
// sentinel bucket, which is used to signal a direct mapped allocation. In
|
// sentinel bucket, which is used to signal a direct mapped allocation. In
|
||||||
// this case |new_page_bucket| will be set properly later. This avoids a read
|
// this case |new_bucket| will be set properly later. This avoids a read for
|
||||||
// for most allocations.
|
// most allocations.
|
||||||
PartitionBucket* new_page_bucket = this;
|
PartitionBucket* new_bucket = this;
|
||||||
*is_already_zeroed = false;
|
*is_already_zeroed = false;
|
||||||
|
|
||||||
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
|
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
|
||||||
@ -562,15 +564,15 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
|||||||
// branches.
|
// branches.
|
||||||
//
|
//
|
||||||
// Note: The ordering of the conditionals matter! In particular,
|
// Note: The ordering of the conditionals matter! In particular,
|
||||||
// SetNewActivePage() has a side-effect even when returning
|
// SetNewActiveSlotSpan() has a side-effect even when returning
|
||||||
// false where it sweeps the active page list and may move things into
|
// false where it sweeps the active list and may move things into the empty or
|
||||||
// the empty or decommitted lists which affects the subsequent conditional.
|
// decommitted lists which affects the subsequent conditional.
|
||||||
bool return_null = flags & PartitionAllocReturnNull;
|
bool return_null = flags & PartitionAllocReturnNull;
|
||||||
if (UNLIKELY(is_direct_mapped())) {
|
if (UNLIKELY(is_direct_mapped())) {
|
||||||
PA_DCHECK(raw_size > kMaxBucketed);
|
PA_DCHECK(raw_size > kMaxBucketed);
|
||||||
PA_DCHECK(this == &root->sentinel_bucket);
|
PA_DCHECK(this == &root->sentinel_bucket);
|
||||||
PA_DCHECK(active_pages_head ==
|
PA_DCHECK(active_slot_spans_head ==
|
||||||
PartitionPage<thread_safe>::get_sentinel_page());
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||||
if (raw_size > MaxDirectMapped()) {
|
if (raw_size > MaxDirectMapped()) {
|
||||||
if (return_null)
|
if (return_null)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -599,62 +601,65 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
|||||||
PartitionExcessiveAllocationSize(raw_size);
|
PartitionExcessiveAllocationSize(raw_size);
|
||||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||||
}
|
}
|
||||||
new_page = PartitionDirectMap(root, flags, raw_size);
|
new_slot_span = PartitionDirectMap(root, flags, raw_size);
|
||||||
if (new_page)
|
if (new_slot_span)
|
||||||
new_page_bucket = new_page->bucket;
|
new_bucket = new_slot_span->bucket;
|
||||||
// New pages from PageAllocator are always zeroed.
|
// Memory from PageAllocator is always zeroed.
|
||||||
*is_already_zeroed = true;
|
*is_already_zeroed = true;
|
||||||
} else if (LIKELY(SetNewActivePage())) {
|
} else if (LIKELY(SetNewActiveSlotSpan())) {
|
||||||
// First, did we find an active page in the active pages list?
|
// First, did we find an active slot span in the active list?
|
||||||
new_page = active_pages_head;
|
new_slot_span = active_slot_spans_head;
|
||||||
PA_DCHECK(new_page->is_active());
|
PA_DCHECK(new_slot_span->is_active());
|
||||||
} else if (LIKELY(empty_pages_head != nullptr) ||
|
} else if (LIKELY(empty_slot_spans_head != nullptr) ||
|
||||||
LIKELY(decommitted_pages_head != nullptr)) {
|
LIKELY(decommitted_slot_spans_head != nullptr)) {
|
||||||
// Second, look in our lists of empty and decommitted pages.
|
// Second, look in our lists of empty and decommitted slot spans.
|
||||||
// Check empty pages first, which are preferred, but beware that an
|
// Check empty slot spans first, which are preferred, but beware that an
|
||||||
// empty page might have been decommitted.
|
// empty slot span might have been decommitted.
|
||||||
while (LIKELY((new_page = empty_pages_head) != nullptr)) {
|
while (LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
|
||||||
PA_DCHECK(new_page->bucket == this);
|
PA_DCHECK(new_slot_span->bucket == this);
|
||||||
PA_DCHECK(new_page->is_empty() || new_page->is_decommitted());
|
PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
|
||||||
empty_pages_head = new_page->next_page;
|
empty_slot_spans_head = new_slot_span->next_slot_span;
|
||||||
// Accept the empty page unless it got decommitted.
|
// Accept the empty slot span unless it got decommitted.
|
||||||
if (new_page->freelist_head) {
|
if (new_slot_span->freelist_head) {
|
||||||
new_page->next_page = nullptr;
|
new_slot_span->next_slot_span = nullptr;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
PA_DCHECK(new_page->is_decommitted());
|
PA_DCHECK(new_slot_span->is_decommitted());
|
||||||
new_page->next_page = decommitted_pages_head;
|
new_slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||||
decommitted_pages_head = new_page;
|
decommitted_slot_spans_head = new_slot_span;
|
||||||
}
|
}
|
||||||
if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
|
if (UNLIKELY(!new_slot_span) &&
|
||||||
new_page = decommitted_pages_head;
|
LIKELY(decommitted_slot_spans_head != nullptr)) {
|
||||||
PA_DCHECK(new_page->bucket == this);
|
new_slot_span = decommitted_slot_spans_head;
|
||||||
PA_DCHECK(new_page->is_decommitted());
|
PA_DCHECK(new_slot_span->bucket == this);
|
||||||
decommitted_pages_head = new_page->next_page;
|
PA_DCHECK(new_slot_span->is_decommitted());
|
||||||
void* addr = PartitionPage<thread_safe>::ToPointer(new_page);
|
decommitted_slot_spans_head = new_slot_span->next_slot_span;
|
||||||
root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
|
void* addr = SlotSpanMetadata<thread_safe>::ToPointer(new_slot_span);
|
||||||
new_page->Reset();
|
root->RecommitSystemPages(addr,
|
||||||
|
new_slot_span->bucket->get_bytes_per_span());
|
||||||
|
new_slot_span->Reset();
|
||||||
*is_already_zeroed = kDecommittedPagesAreAlwaysZeroed;
|
*is_already_zeroed = kDecommittedPagesAreAlwaysZeroed;
|
||||||
}
|
}
|
||||||
PA_DCHECK(new_page);
|
PA_DCHECK(new_slot_span);
|
||||||
} else {
|
} else {
|
||||||
// Third. If we get here, we need a brand new page.
|
// Third. If we get here, we need a brand new slot span.
|
||||||
uint16_t num_partition_pages = get_pages_per_slot_span();
|
uint16_t num_partition_pages = get_pages_per_slot_span();
|
||||||
void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages,
|
void* raw_memory = AllocNewSlotSpan(root, flags, num_partition_pages,
|
||||||
get_bytes_per_span());
|
get_bytes_per_span());
|
||||||
if (LIKELY(raw_pages != nullptr)) {
|
if (LIKELY(raw_memory != nullptr)) {
|
||||||
new_page =
|
new_slot_span =
|
||||||
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(raw_pages);
|
SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(
|
||||||
InitializeSlotSpan(new_page);
|
raw_memory);
|
||||||
// New pages from PageAllocator are always zeroed.
|
InitializeSlotSpan(new_slot_span);
|
||||||
|
// New memory from PageAllocator is always zeroed.
|
||||||
*is_already_zeroed = true;
|
*is_already_zeroed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bail if we had a memory allocation failure.
|
// Bail if we had a memory allocation failure.
|
||||||
if (UNLIKELY(!new_page)) {
|
if (UNLIKELY(!new_slot_span)) {
|
||||||
PA_DCHECK(active_pages_head ==
|
PA_DCHECK(active_slot_spans_head ==
|
||||||
PartitionPage<thread_safe>::get_sentinel_page());
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||||
if (return_null)
|
if (return_null)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
// See comment above.
|
// See comment above.
|
||||||
@ -663,24 +668,24 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
|||||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||||
}
|
}
|
||||||
|
|
||||||
PA_DCHECK(new_page_bucket != &root->sentinel_bucket);
|
PA_DCHECK(new_bucket != &root->sentinel_bucket);
|
||||||
new_page_bucket->active_pages_head = new_page;
|
new_bucket->active_slot_spans_head = new_slot_span;
|
||||||
if (new_page->CanStoreRawSize())
|
if (new_slot_span->CanStoreRawSize())
|
||||||
new_page->SetRawSize(raw_size);
|
new_slot_span->SetRawSize(raw_size);
|
||||||
|
|
||||||
// If we found an active page with free slots, or an empty page, we have a
|
// If we found an active slot span with free slots, or an empty slot span, we
|
||||||
// usable freelist head.
|
// have a usable freelist head.
|
||||||
if (LIKELY(new_page->freelist_head != nullptr)) {
|
if (LIKELY(new_slot_span->freelist_head != nullptr)) {
|
||||||
PartitionFreelistEntry* entry = new_page->freelist_head;
|
PartitionFreelistEntry* entry = new_slot_span->freelist_head;
|
||||||
PartitionFreelistEntry* new_head =
|
PartitionFreelistEntry* new_head =
|
||||||
EncodedPartitionFreelistEntry::Decode(entry->next);
|
EncodedPartitionFreelistEntry::Decode(entry->next);
|
||||||
new_page->freelist_head = new_head;
|
new_slot_span->freelist_head = new_head;
|
||||||
new_page->num_allocated_slots++;
|
new_slot_span->num_allocated_slots++;
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
// Otherwise, we need to build the freelist.
|
// Otherwise, we need to build the freelist.
|
||||||
PA_DCHECK(new_page->num_unprovisioned_slots);
|
PA_DCHECK(new_slot_span->num_unprovisioned_slots);
|
||||||
return AllocAndFillFreelist(new_page);
|
return AllocAndFillFreelist(new_slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
template struct PartitionBucket<ThreadSafe>;
|
template struct PartitionBucket<ThreadSafe>;
|
||||||
|
@ -22,13 +22,13 @@ namespace internal {
|
|||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
struct PartitionBucket {
|
struct PartitionBucket {
|
||||||
// Accessed most in hot path => goes first.
|
// Accessed most in hot path => goes first.
|
||||||
PartitionPage<thread_safe>* active_pages_head;
|
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
|
||||||
|
|
||||||
PartitionPage<thread_safe>* empty_pages_head;
|
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
|
||||||
PartitionPage<thread_safe>* decommitted_pages_head;
|
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
|
||||||
uint32_t slot_size;
|
uint32_t slot_size;
|
||||||
uint32_t num_system_pages_per_slot_span : 8;
|
uint32_t num_system_pages_per_slot_span : 8;
|
||||||
uint32_t num_full_pages : 24;
|
uint32_t num_full_slot_spans : 24;
|
||||||
|
|
||||||
// `slot_size_reciprocal` is used to improve the performance of
|
// `slot_size_reciprocal` is used to improve the performance of
|
||||||
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
|
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
|
||||||
@ -57,7 +57,7 @@ struct PartitionBucket {
|
|||||||
// there is no need to call memset on fresh pages; the OS has already zeroed
|
// there is no need to call memset on fresh pages; the OS has already zeroed
|
||||||
// them. (See |PartitionRoot::AllocFromBucket|.)
|
// them. (See |PartitionRoot::AllocFromBucket|.)
|
||||||
//
|
//
|
||||||
// Note the matching Free() functions are in PartitionPage.
|
// Note the matching Free() functions are in SlotSpanMetadata.
|
||||||
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
||||||
int flags,
|
int flags,
|
||||||
size_t raw_size,
|
size_t raw_size,
|
||||||
@ -86,17 +86,17 @@ struct PartitionBucket {
|
|||||||
return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
|
return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This helper function scans a bucket's active page list for a suitable new
|
// This helper function scans a bucket's active slot span list for a suitable
|
||||||
// active page. When it finds a suitable new active page (one that has
|
// new active slot span. When it finds a suitable new active slot span (one
|
||||||
// free slots and is not empty), it is set as the new active page. If there
|
// that has free slots and is not empty), it is set as the new active slot
|
||||||
// is no suitable new active page, the current active page is set to
|
// span. If there is no suitable new active slot span, the current active slot
|
||||||
// PartitionPage::get_sentinel_page(). As potential pages are scanned, they
|
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
|
||||||
// are tidied up according to their state. Empty pages are swept on to the
|
// slot spans are scanned, they are tidied up according to their state. Empty
|
||||||
// empty page list, decommitted pages on to the decommitted page list and full
|
// slot spans are swept on to the empty list, decommitted slot spans on to the
|
||||||
// pages are unlinked from any list.
|
// decommitted list and full slot spans are unlinked from any list.
|
||||||
//
|
//
|
||||||
// This is where the guts of the bucket maintenance is done!
|
// This is where the guts of the bucket maintenance is done!
|
||||||
bool SetNewActivePage();
|
bool SetNewActiveSlotSpan();
|
||||||
|
|
||||||
// Returns an offset within an allocation slot.
|
// Returns an offset within an allocation slot.
|
||||||
ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) {
|
ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) {
|
||||||
@ -122,17 +122,17 @@ struct PartitionBucket {
|
|||||||
private:
|
private:
|
||||||
static NOINLINE void OnFull();
|
static NOINLINE void OnFull();
|
||||||
|
|
||||||
// Returns a natural number of PartitionPages (calculated by
|
// Returns a natural number of partition pages (calculated by
|
||||||
// get_system_pages_per_slot_span()) to allocate from the current
|
// get_system_pages_per_slot_span()) to allocate from the current
|
||||||
// SuperPage when the bucket runs out of slots.
|
// super page when the bucket runs out of slots.
|
||||||
ALWAYS_INLINE uint16_t get_pages_per_slot_span();
|
ALWAYS_INLINE uint16_t get_pages_per_slot_span();
|
||||||
|
|
||||||
// Returns the number of system pages in a slot span.
|
// Returns the number of system pages in a slot span.
|
||||||
//
|
//
|
||||||
// The calculation attemps to find the best number of System Pages to
|
// The calculation attempts to find the best number of system pages to
|
||||||
// allocate for the given slot_size to minimize wasted space. It uses a
|
// allocate for the given slot_size to minimize wasted space. It uses a
|
||||||
// heuristic that looks at number of bytes wasted after the last slot and
|
// heuristic that looks at number of bytes wasted after the last slot and
|
||||||
// attempts to account for the PTE usage of each System Page.
|
// attempts to account for the PTE usage of each system page.
|
||||||
uint8_t get_system_pages_per_slot_span();
|
uint8_t get_system_pages_per_slot_span();
|
||||||
|
|
||||||
// Allocates a new slot span with size |num_partition_pages| from the
|
// Allocates a new slot span with size |num_partition_pages| from the
|
||||||
@ -146,16 +146,19 @@ struct PartitionBucket {
|
|||||||
|
|
||||||
// Each bucket allocates a slot span when it runs out of slots.
|
// Each bucket allocates a slot span when it runs out of slots.
|
||||||
// A slot span's size is equal to get_pages_per_slot_span() number of
|
// A slot span's size is equal to get_pages_per_slot_span() number of
|
||||||
// PartitionPages. This function initializes all PartitionPage within the
|
// partition pages. This function initializes all PartitionPage within the
|
||||||
// span to point to the first PartitionPage which holds all the metadata
|
// span to point to the first PartitionPage which holds all the metadata
|
||||||
// for the span and registers this bucket as the owner of the span. It does
|
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
|
||||||
// NOT put the slots into the bucket's freelist.
|
// as the owner of the span. It does NOT put the slots into the bucket's
|
||||||
ALWAYS_INLINE void InitializeSlotSpan(PartitionPage<thread_safe>* page);
|
// freelist.
|
||||||
|
ALWAYS_INLINE void InitializeSlotSpan(
|
||||||
|
SlotSpanMetadata<thread_safe>* slot_span);
|
||||||
|
|
||||||
// Allocates one slot from the given |page| and then adds the remainder to
|
// Allocates one slot from the given |slot_span| and then adds the remainder
|
||||||
// the current bucket. If the |page| was freshly allocated, it must have been
|
// to the current bucket. If the |slot_span| was freshly allocated, it must
|
||||||
// passed through InitializeSlotSpan() first.
|
// have been passed through InitializeSlotSpan() first.
|
||||||
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page);
|
ALWAYS_INLINE char* AllocAndFillFreelist(
|
||||||
|
SlotSpanMetadata<thread_safe>* slot_span);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
@ -20,8 +20,8 @@ struct PartitionDirectMapExtent {
|
|||||||
PartitionBucket<thread_safe>* bucket;
|
PartitionBucket<thread_safe>* bucket;
|
||||||
size_t map_size; // Mapped size, not including guard pages and meta-data.
|
size_t map_size; // Mapped size, not including guard pages and meta-data.
|
||||||
|
|
||||||
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromPage(
|
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
|
||||||
PartitionPage<thread_safe>* page);
|
SlotSpanMetadata<thread_safe>* slot_span);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Metadata page for direct-mapped allocations.
|
// Metadata page for direct-mapped allocations.
|
||||||
@ -39,9 +39,10 @@ struct PartitionDirectMapMetadata {
|
|||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
|
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
|
||||||
PartitionDirectMapExtent<thread_safe>::FromPage(
|
PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
|
||||||
PartitionPage<thread_safe>* page) {
|
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
PA_DCHECK(page->bucket->is_direct_mapped());
|
PA_DCHECK(slot_span->bucket->is_direct_mapped());
|
||||||
|
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
|
||||||
// The page passed here is always |page| in |PartitionDirectMapMetadata|
|
// The page passed here is always |page| in |PartitionDirectMapMetadata|
|
||||||
// above. To get the metadata structure, need to get the invalid page address.
|
// above. To get the metadata structure, need to get the invalid page address.
|
||||||
auto* first_invalid_page = page - 1;
|
auto* first_invalid_page = page - 1;
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||||
#include "base/check.h"
|
#include "base/check.h"
|
||||||
#include "base/feature_list.h"
|
#include "base/feature_list.h"
|
||||||
@ -22,11 +23,10 @@ namespace {
|
|||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE DeferredUnmap
|
ALWAYS_INLINE DeferredUnmap
|
||||||
PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
|
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
const PartitionDirectMapExtent<thread_safe>* extent =
|
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
|
||||||
PartitionDirectMapExtent<thread_safe>::FromPage(page);
|
|
||||||
size_t unmap_size = extent->map_size;
|
size_t unmap_size = extent->map_size;
|
||||||
|
|
||||||
// Maintain the doubly-linked list of all direct mappings.
|
// Maintain the doubly-linked list of all direct mappings.
|
||||||
@ -45,15 +45,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
|||||||
// page.
|
// page.
|
||||||
unmap_size += PartitionPageSize() + SystemPageSize();
|
unmap_size += PartitionPageSize() + SystemPageSize();
|
||||||
|
|
||||||
size_t uncommitted_page_size = page->bucket->slot_size + SystemPageSize();
|
size_t uncommitted_page_size =
|
||||||
|
slot_span->bucket->slot_size + SystemPageSize();
|
||||||
root->DecreaseCommittedPages(uncommitted_page_size);
|
root->DecreaseCommittedPages(uncommitted_page_size);
|
||||||
PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
|
PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
|
||||||
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
|
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
|
||||||
|
|
||||||
PA_DCHECK(!(unmap_size & PageAllocationGranularityOffsetMask()));
|
PA_DCHECK(!(unmap_size & PageAllocationGranularityOffsetMask()));
|
||||||
|
|
||||||
char* ptr =
|
char* ptr = reinterpret_cast<char*>(
|
||||||
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
|
SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
|
||||||
// Account for the mapping starting a partition page before the actual
|
// Account for the mapping starting a partition page before the actual
|
||||||
// allocation address.
|
// allocation address.
|
||||||
ptr -= PartitionPageSize();
|
ptr -= PartitionPageSize();
|
||||||
@ -61,78 +62,82 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionRegisterEmptyPage(
|
ALWAYS_INLINE void PartitionRegisterEmptySlotSpan(
|
||||||
PartitionPage<thread_safe>* page) {
|
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||||
PA_DCHECK(page->is_empty());
|
PA_DCHECK(slot_span->is_empty());
|
||||||
PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
|
PartitionRoot<thread_safe>* root =
|
||||||
|
PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
|
|
||||||
// If the page is already registered as empty, give it another life.
|
// If the slot span is already registered as empty, give it another life.
|
||||||
if (page->empty_cache_index != -1) {
|
if (slot_span->empty_cache_index != -1) {
|
||||||
PA_DCHECK(page->empty_cache_index >= 0);
|
PA_DCHECK(slot_span->empty_cache_index >= 0);
|
||||||
PA_DCHECK(static_cast<unsigned>(page->empty_cache_index) <
|
PA_DCHECK(static_cast<unsigned>(slot_span->empty_cache_index) <
|
||||||
kMaxFreeableSpans);
|
kMaxFreeableSpans);
|
||||||
PA_DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
|
PA_DCHECK(root->global_empty_slot_span_ring[slot_span->empty_cache_index] ==
|
||||||
root->global_empty_page_ring[page->empty_cache_index] = nullptr;
|
slot_span);
|
||||||
|
root->global_empty_slot_span_ring[slot_span->empty_cache_index] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t current_index = root->global_empty_page_ring_index;
|
int16_t current_index = root->global_empty_slot_span_ring_index;
|
||||||
PartitionPage<thread_safe>* page_to_decommit =
|
SlotSpanMetadata<thread_safe>* slot_span_to_decommit =
|
||||||
root->global_empty_page_ring[current_index];
|
root->global_empty_slot_span_ring[current_index];
|
||||||
// The page might well have been re-activated, filled up, etc. before we get
|
// The slot span might well have been re-activated, filled up, etc. before we
|
||||||
// around to looking at it here.
|
// get around to looking at it here.
|
||||||
if (page_to_decommit)
|
if (slot_span_to_decommit)
|
||||||
page_to_decommit->DecommitIfPossible(root);
|
slot_span_to_decommit->DecommitIfPossible(root);
|
||||||
|
|
||||||
// We put the empty slot span on our global list of "pages that were once
|
// We put the empty slot span on our global list of "slot spans that were once
|
||||||
// empty". thus providing it a bit of breathing room to get re-used before
|
// empty". thus providing it a bit of breathing room to get re-used before
|
||||||
// we really free it. This improves performance, particularly on Mac OS X
|
// we really free it. This improves performance, particularly on Mac OS X
|
||||||
// which has subpar memory management performance.
|
// which has subpar memory management performance.
|
||||||
root->global_empty_page_ring[current_index] = page;
|
root->global_empty_slot_span_ring[current_index] = slot_span;
|
||||||
page->empty_cache_index = current_index;
|
slot_span->empty_cache_index = current_index;
|
||||||
++current_index;
|
++current_index;
|
||||||
if (current_index == kMaxFreeableSpans)
|
if (current_index == kMaxFreeableSpans)
|
||||||
current_index = 0;
|
current_index = 0;
|
||||||
root->global_empty_page_ring_index = current_index;
|
root->global_empty_slot_span_ring_index = current_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
PartitionPage<thread_safe> PartitionPage<thread_safe>::sentinel_page_;
|
SlotSpanMetadata<thread_safe>
|
||||||
|
SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
|
||||||
|
|
||||||
// static
|
// static
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() {
|
SlotSpanMetadata<thread_safe>*
|
||||||
return &sentinel_page_;
|
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
|
||||||
|
return &sentinel_slot_span_;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() {
|
||||||
#if DCHECK_IS_ON()
|
#if DCHECK_IS_ON()
|
||||||
auto* root = PartitionRoot<thread_safe>::FromPage(this);
|
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
#endif
|
#endif
|
||||||
PA_DCHECK(this != get_sentinel_page());
|
PA_DCHECK(this != get_sentinel_slot_span());
|
||||||
if (LIKELY(num_allocated_slots == 0)) {
|
if (LIKELY(num_allocated_slots == 0)) {
|
||||||
// Page became fully unused.
|
// Slot span became fully unused.
|
||||||
if (UNLIKELY(bucket->is_direct_mapped())) {
|
if (UNLIKELY(bucket->is_direct_mapped())) {
|
||||||
return PartitionDirectUnmap(this);
|
return PartitionDirectUnmap(this);
|
||||||
}
|
}
|
||||||
// If it's the current active page, change it. We bounce the page to
|
// If it's the current active slot span, change it. We bounce the slot span
|
||||||
// the empty list as a force towards defragmentation.
|
// to the empty list as a force towards defragmentation.
|
||||||
if (LIKELY(this == bucket->active_pages_head))
|
if (LIKELY(this == bucket->active_slot_spans_head))
|
||||||
bucket->SetNewActivePage();
|
bucket->SetNewActiveSlotSpan();
|
||||||
PA_DCHECK(bucket->active_pages_head != this);
|
PA_DCHECK(bucket->active_slot_spans_head != this);
|
||||||
|
|
||||||
if (CanStoreRawSize())
|
if (CanStoreRawSize())
|
||||||
SetRawSize(0);
|
SetRawSize(0);
|
||||||
|
|
||||||
PartitionRegisterEmptyPage(this);
|
PartitionRegisterEmptySlotSpan(this);
|
||||||
} else {
|
} else {
|
||||||
PA_DCHECK(!bucket->is_direct_mapped());
|
PA_DCHECK(!bucket->is_direct_mapped());
|
||||||
// Ensure that the page is full. That's the only valid case if we
|
// Ensure that the slot span is full. That's the only valid case if we
|
||||||
// arrive here.
|
// arrive here.
|
||||||
PA_DCHECK(num_allocated_slots < 0);
|
PA_DCHECK(num_allocated_slots < 0);
|
||||||
// A transition of num_allocated_slots from 0 to -1 is not legal, and
|
// A transition of num_allocated_slots from 0 to -1 is not legal, and
|
||||||
@ -140,16 +145,16 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
|||||||
PA_CHECK(num_allocated_slots != -1);
|
PA_CHECK(num_allocated_slots != -1);
|
||||||
num_allocated_slots = -num_allocated_slots - 2;
|
num_allocated_slots = -num_allocated_slots - 2;
|
||||||
PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
|
PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
|
||||||
// Fully used page became partially used. It must be put back on the
|
// Fully used slot span became partially used. It must be put back on the
|
||||||
// non-full page list. Also make it the current page to increase the
|
// non-full list. Also make it the current slot span to increase the
|
||||||
// chances of it being filled up again. The old current page will be
|
// chances of it being filled up again. The old current slot span will be
|
||||||
// the next page.
|
// the next slot span.
|
||||||
PA_DCHECK(!next_page);
|
PA_DCHECK(!next_slot_span);
|
||||||
if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
|
if (LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
|
||||||
next_page = bucket->active_pages_head;
|
next_slot_span = bucket->active_slot_spans_head;
|
||||||
bucket->active_pages_head = this;
|
bucket->active_slot_spans_head = this;
|
||||||
--bucket->num_full_pages;
|
--bucket->num_full_slot_spans;
|
||||||
// Special case: for a partition page with just a single slot, it may
|
// Special case: for a partition slot span with just a single slot, it may
|
||||||
// now be empty and we want to run it through the empty logic.
|
// now be empty and we want to run it through the empty logic.
|
||||||
if (UNLIKELY(num_allocated_slots == 0))
|
if (UNLIKELY(num_allocated_slots == 0))
|
||||||
return FreeSlowPath();
|
return FreeSlowPath();
|
||||||
@ -158,31 +163,30 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
|
void SlotSpanMetadata<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
PA_DCHECK(is_empty());
|
PA_DCHECK(is_empty());
|
||||||
PA_DCHECK(!bucket->is_direct_mapped());
|
PA_DCHECK(!bucket->is_direct_mapped());
|
||||||
void* addr = PartitionPage::ToPointer(this);
|
void* addr = SlotSpanMetadata::ToPointer(this);
|
||||||
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
|
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
|
||||||
|
|
||||||
// We actually leave the decommitted page in the active list. We'll sweep
|
// We actually leave the decommitted slot span in the active list. We'll sweep
|
||||||
// it on to the decommitted page list when we next walk the active page
|
// it on to the decommitted list when we next walk the active list.
|
||||||
// list.
|
// Pulling this trick enables us to use a singly-linked list for all
|
||||||
// Pulling this trick enables us to use a singly-linked page list for all
|
// cases, which is critical in keeping the slot span metadata structure down
|
||||||
// cases, which is critical in keeping the page metadata structure down to
|
// to 32 bytes in size.
|
||||||
// 32 bytes in size.
|
|
||||||
freelist_head = nullptr;
|
freelist_head = nullptr;
|
||||||
num_unprovisioned_slots = 0;
|
num_unprovisioned_slots = 0;
|
||||||
PA_DCHECK(is_decommitted());
|
PA_DCHECK(is_decommitted());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
void PartitionPage<thread_safe>::DecommitIfPossible(
|
void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
|
||||||
PartitionRoot<thread_safe>* root) {
|
PartitionRoot<thread_safe>* root) {
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
PA_DCHECK(empty_cache_index >= 0);
|
PA_DCHECK(empty_cache_index >= 0);
|
||||||
PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
|
PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
|
||||||
PA_DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
|
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index]);
|
||||||
empty_cache_index = -1;
|
empty_cache_index = -1;
|
||||||
if (is_empty())
|
if (is_empty())
|
||||||
Decommit(root);
|
Decommit(root);
|
||||||
@ -201,8 +205,8 @@ void DeferredUnmap::Unmap() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template struct PartitionPage<ThreadSafe>;
|
template struct SlotSpanMetadata<ThreadSafe>;
|
||||||
template struct PartitionPage<NotThreadSafe>;
|
template struct SlotSpanMetadata<NotThreadSafe>;
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace base
|
} // namespace base
|
||||||
|
@ -37,8 +37,8 @@ static_assert(
|
|||||||
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
|
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
|
||||||
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
|
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
|
||||||
|
|
||||||
// PartitionPage::Free() defers unmapping a large page until the lock is
|
// SlotSpanMetadata::Free() defers unmapping a large page until the lock is
|
||||||
// released. Callers of PartitionPage::Free() must invoke Run().
|
// released. Callers of SlotSpanMetadata::Free() must invoke Run().
|
||||||
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
|
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
|
||||||
struct DeferredUnmap {
|
struct DeferredUnmap {
|
||||||
void* ptr = nullptr;
|
void* ptr = nullptr;
|
||||||
@ -55,58 +55,46 @@ struct DeferredUnmap {
|
|||||||
using QuarantineBitmap =
|
using QuarantineBitmap =
|
||||||
ObjectBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
|
ObjectBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
|
||||||
|
|
||||||
// Some notes on page states. A page can be in one of four major states:
|
// Metadata of the slot span.
|
||||||
|
//
|
||||||
|
// Some notes on slot span states. It can be in one of four major states:
|
||||||
// 1) Active.
|
// 1) Active.
|
||||||
// 2) Full.
|
// 2) Full.
|
||||||
// 3) Empty.
|
// 3) Empty.
|
||||||
// 4) Decommitted.
|
// 4) Decommitted.
|
||||||
// An active page has available free slots. A full page has no free slots. An
|
// An active slot span has available free slots. A full slot span has no free
|
||||||
// empty page has no free slots, and a decommitted page is an empty page that
|
// slots. An empty slot span has no free slots, and a decommitted slot span is
|
||||||
// had its backing memory released back to the system.
|
// an empty one that had its backing memory released back to the system.
|
||||||
// There are two linked lists tracking the pages. The "active page" list is an
|
|
||||||
// approximation of a list of active pages. It is an approximation because
|
|
||||||
// full, empty and decommitted pages may briefly be present in the list until
|
|
||||||
// we next do a scan over it.
|
|
||||||
// The "empty page" list is an accurate list of pages which are either empty
|
|
||||||
// or decommitted.
|
|
||||||
//
|
//
|
||||||
// The significant page transitions are:
|
// There are two linked lists tracking slot spans. The "active" list is an
|
||||||
// - free() will detect when a full page has a slot free()'d and immediately
|
// approximation of a list of active slot spans. It is an approximation because
|
||||||
// return the page to the head of the active list.
|
// full, empty and decommitted slot spans may briefly be present in the list
|
||||||
// - free() will detect when a page is fully emptied. It _may_ add it to the
|
// until we next do a scan over it. The "empty" list is an accurate list of slot
|
||||||
// empty list or it _may_ leave it on the active list until a future list scan.
|
// spans which are either empty or decommitted.
|
||||||
// - malloc() _may_ scan the active page list in order to fulfil the request.
|
|
||||||
// If it does this, full, empty and decommitted pages encountered will be
|
|
||||||
// booted out of the active list. If there are no suitable active pages found,
|
|
||||||
// an empty or decommitted page (if one exists) will be pulled from the empty
|
|
||||||
// list on to the active list.
|
|
||||||
//
|
//
|
||||||
// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
|
// The significant slot span transitions are:
|
||||||
// similar. If so, all uses of the term "page" in comments, member variables,
|
// - Free() will detect when a full slot span has a slot freed and immediately
|
||||||
// local variables, and documentation that refer to this concept should be
|
// return the slot span to the head of the active list.
|
||||||
// updated.
|
// - Free() will detect when a slot span is fully emptied. It _may_ add it to
|
||||||
|
// the empty list or it _may_ leave it on the active list until a future
|
||||||
|
// list scan.
|
||||||
|
// - Alloc() _may_ scan the active page list in order to fulfil the request.
|
||||||
|
// If it does this, full, empty and decommitted slot spans encountered will be
|
||||||
|
// booted out of the active list. If there are no suitable active slot spans
|
||||||
|
// found, an empty or decommitted slot spans (if one exists) will be pulled
|
||||||
|
// from the empty list on to the active list.
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
struct PartitionPage {
|
struct __attribute__((packed)) SlotSpanMetadata {
|
||||||
union {
|
PartitionFreelistEntry* freelist_head;
|
||||||
struct {
|
SlotSpanMetadata<thread_safe>* next_slot_span;
|
||||||
PartitionFreelistEntry* freelist_head;
|
PartitionBucket<thread_safe>* bucket;
|
||||||
PartitionPage<thread_safe>* next_page;
|
|
||||||
PartitionBucket<thread_safe>* bucket;
|
// Deliberately signed, 0 for empty or decommitted slot spans, -n for full
|
||||||
// Deliberately signed, 0 for empty or decommitted page, -n for full
|
// slot spans:
|
||||||
// pages:
|
int16_t num_allocated_slots;
|
||||||
int16_t num_allocated_slots;
|
uint16_t num_unprovisioned_slots;
|
||||||
uint16_t num_unprovisioned_slots;
|
int16_t empty_cache_index; // -1 if not in the empty cache.
|
||||||
uint16_t page_offset;
|
|
||||||
int16_t empty_cache_index; // -1 if not in the empty cache.
|
|
||||||
};
|
|
||||||
|
|
||||||
// sizeof(PartitionPage) must always be:
|
|
||||||
// - a power of 2 (for fast modulo operations)
|
|
||||||
// - below kPageMetadataSize
|
|
||||||
//
|
|
||||||
// This makes sure that this is respected no matter the architecture.
|
|
||||||
char optional_padding[kPageMetadataSize];
|
|
||||||
};
|
|
||||||
// Public API
|
// Public API
|
||||||
// Note the matching Alloc() functions are in PartitionPage.
|
// Note the matching Alloc() functions are in PartitionPage.
|
||||||
// Callers must invoke DeferredUnmap::Run() after releasing the lock.
|
// Callers must invoke DeferredUnmap::Run() after releasing the lock.
|
||||||
@ -116,12 +104,13 @@ struct PartitionPage {
|
|||||||
void Decommit(PartitionRoot<thread_safe>* root);
|
void Decommit(PartitionRoot<thread_safe>* root);
|
||||||
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
|
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
|
||||||
|
|
||||||
// Pointer manipulation functions. These must be static as the input |page|
|
// Pointer manipulation functions. These must be static as the input
|
||||||
// pointer may be the result of an offset calculation and therefore cannot
|
// |slot_span| pointer may be the result of an offset calculation and
|
||||||
// be trusted. The objective of these functions is to sanitize this input.
|
// therefore cannot be trusted. The objective of these functions is to
|
||||||
ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
|
// sanitize this input.
|
||||||
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
|
ALWAYS_INLINE static void* ToPointer(const SlotSpanMetadata* slot_span);
|
||||||
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
|
ALWAYS_INLINE static SlotSpanMetadata* FromPointer(void* ptr);
|
||||||
|
ALWAYS_INLINE static SlotSpanMetadata* FromPointerNoAlignmentCheck(void* ptr);
|
||||||
|
|
||||||
// Checks if it is feasible to store raw_size.
|
// Checks if it is feasible to store raw_size.
|
||||||
ALWAYS_INLINE bool CanStoreRawSize() const;
|
ALWAYS_INLINE bool CanStoreRawSize() const;
|
||||||
@ -146,7 +135,7 @@ struct PartitionPage {
|
|||||||
ALWAYS_INLINE void Reset();
|
ALWAYS_INLINE void Reset();
|
||||||
|
|
||||||
// TODO(ajwong): Can this be made private? https://crbug.com/787153
|
// TODO(ajwong): Can this be made private? https://crbug.com/787153
|
||||||
BASE_EXPORT static PartitionPage* get_sentinel_page();
|
BASE_EXPORT static SlotSpanMetadata* get_sentinel_slot_span();
|
||||||
|
|
||||||
// Page State accessors.
|
// Page State accessors.
|
||||||
// Note that it's only valid to call these functions on pages found on one of
|
// Note that it's only valid to call these functions on pages found on one of
|
||||||
@ -163,17 +152,74 @@ struct PartitionPage {
|
|||||||
ALWAYS_INLINE bool is_decommitted() const;
|
ALWAYS_INLINE bool is_decommitted() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// g_sentinel_page is used as a sentinel to indicate that there is no page
|
// sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
|
||||||
// in the active page list. We can use nullptr, but in that case we need
|
// span in the active list. We could use nullptr, but in that case we need to
|
||||||
// to add a null-check branch to the hot allocation path. We want to avoid
|
// add a null-check branch to the hot allocation path. We want to avoid that.
|
||||||
// that.
|
|
||||||
//
|
//
|
||||||
// Note, this declaration is kept in the header as opposed to an anonymous
|
// Note, this declaration is kept in the header as opposed to an anonymous
|
||||||
// namespace so the getter can be fully inlined.
|
// namespace so the getter can be fully inlined.
|
||||||
static PartitionPage sentinel_page_;
|
static SlotSpanMetadata sentinel_slot_span_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Metadata of a non-first partition page in a slot span.
|
||||||
|
struct SubsequentPageMetadata {
|
||||||
|
// Raw size is the size needed to satisfy the allocation (requested size +
|
||||||
|
// extras). If available, it can be used to report better statistics or to
|
||||||
|
// bring protective cookie closer to the allocated memory.
|
||||||
|
//
|
||||||
|
// It can be used only if:
|
||||||
|
// - there is no more than one slot in the slot span (otherwise we wouldn't
|
||||||
|
// know which slot the raw size applies to)
|
||||||
|
// - there is more than one partition page in the slot span (the metadata of
|
||||||
|
// the first one is used to store slot information, but the second one is
|
||||||
|
// available for extra information)
|
||||||
|
size_t raw_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Each partition page has metadata associated with it. The metadata of the
|
||||||
|
// first page of a slot span, describes that slot span. If a slot span spans
|
||||||
|
// more than 1 page, the page metadata may contain rudimentary additional
|
||||||
|
// information.
|
||||||
|
template <bool thread_safe>
|
||||||
|
struct PartitionPage {
|
||||||
|
// "Pack" the union so that slot_span_metadata_offset still fits within
|
||||||
|
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
|
||||||
|
union __attribute__((packed)) {
|
||||||
|
SlotSpanMetadata<thread_safe> slot_span_metadata;
|
||||||
|
|
||||||
|
SubsequentPageMetadata subsequent_page_metadata;
|
||||||
|
|
||||||
|
// sizeof(PartitionPageMetadata) must always be:
|
||||||
|
// - a power of 2 (for fast modulo operations)
|
||||||
|
// - below kPageMetadataSize
|
||||||
|
//
|
||||||
|
// This makes sure that this is respected no matter the architecture.
|
||||||
|
char optional_padding[kPageMetadataSize - sizeof(uint16_t)];
|
||||||
|
};
|
||||||
|
|
||||||
|
// The first PartitionPage of the slot span holds its metadata. This offset
|
||||||
|
// tells how many pages in from that first page we are.
|
||||||
|
uint16_t slot_span_metadata_offset;
|
||||||
|
|
||||||
|
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
|
||||||
|
};
|
||||||
|
|
||||||
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
|
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
|
||||||
"PartitionPage must be able to fit in a metadata slot");
|
"PartitionPage must be able to fit in a metadata slot");
|
||||||
|
static_assert(sizeof(PartitionPage<NotThreadSafe>) == kPageMetadataSize,
|
||||||
|
"PartitionPage must be able to fit in a metadata slot");
|
||||||
|
|
||||||
|
// Certain functions rely on PartitionPage being either SlotSpanMetadata or
|
||||||
|
// SubsequentPageMetadata, and therefore freely casting between each other.
|
||||||
|
static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
|
||||||
|
static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
|
||||||
|
0,
|
||||||
|
"");
|
||||||
|
static_assert(offsetof(PartitionPage<NotThreadSafe>, slot_span_metadata) == 0,
|
||||||
|
"");
|
||||||
|
static_assert(offsetof(PartitionPage<NotThreadSafe>,
|
||||||
|
subsequent_page_metadata) == 0,
|
||||||
|
"");
|
||||||
|
|
||||||
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
|
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
|
||||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
|
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
|
||||||
@ -195,6 +241,14 @@ ALWAYS_INLINE bool IsWithinSuperPagePayload(bool with_pcscan, void* ptr) {
|
|||||||
return ptr_as_uint >= payload_start && ptr_as_uint < payload_end;
|
return ptr_as_uint >= payload_start && ptr_as_uint < payload_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// See the comment for |FromPointer|.
|
||||||
|
template <bool thread_safe>
|
||||||
|
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||||
|
SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
|
||||||
|
return reinterpret_cast<SlotSpanMetadata*>(
|
||||||
|
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr));
|
||||||
|
}
|
||||||
|
|
||||||
// See the comment for |FromPointer|.
|
// See the comment for |FromPointer|.
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
ALWAYS_INLINE PartitionPage<thread_safe>*
|
||||||
@ -209,24 +263,22 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
|
|||||||
// pages.
|
// pages.
|
||||||
PA_DCHECK(partition_page_index);
|
PA_DCHECK(partition_page_index);
|
||||||
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
|
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
|
||||||
auto* page = reinterpret_cast<PartitionPage*>(
|
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(
|
||||||
PartitionSuperPageToMetadataArea(super_page_ptr) +
|
PartitionSuperPageToMetadataArea(super_page_ptr) +
|
||||||
(partition_page_index << kPageMetadataShift));
|
(partition_page_index << kPageMetadataShift));
|
||||||
// Partition pages in the same slot span share the same page object. Adjust
|
// Partition pages in the same slot span share the same slot span metadata
|
||||||
|
// object (located in the first PartitionPage object of that span). Adjust
|
||||||
// for that.
|
// for that.
|
||||||
size_t delta = page->page_offset << kPageMetadataShift;
|
page -= page->slot_span_metadata_offset;
|
||||||
page =
|
|
||||||
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Converts from a pointer to the PartitionPage object (within super pages's
|
// Converts from a pointer to the SlotSpanMetadata object (within super pages's
|
||||||
// metadata) into a pointer to the beginning of the partition page.
|
// metadata) into a pointer to the beginning of the slot span.
|
||||||
// This doesn't have to be the first page in the slot span.
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
|
ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToPointer(
|
||||||
const PartitionPage<thread_safe>* page) {
|
const SlotSpanMetadata* slot_span) {
|
||||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
|
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
|
||||||
|
|
||||||
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
|
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
|
||||||
|
|
||||||
@ -250,27 +302,23 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Converts from a pointer inside a partition page into a pointer to the
|
// Converts from a pointer inside a slot span into a pointer to the
|
||||||
// PartitionPage object (within super pages's metadata).
|
// SlotSpanMetadata object (within super pages's metadata).
|
||||||
// The first PartitionPage of the slot span will be returned, regardless where
|
|
||||||
// inside of the slot span |ptr| points to.
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE PartitionPage<thread_safe>*
|
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||||
PartitionPage<thread_safe>::FromPointer(void* ptr) {
|
SlotSpanMetadata<thread_safe>::FromPointer(void* ptr) {
|
||||||
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
|
SlotSpanMetadata* slot_span =
|
||||||
// Checks that the pointer is a multiple of bucket size.
|
SlotSpanMetadata::FromPointerNoAlignmentCheck(ptr);
|
||||||
PA_DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
|
// Checks that the pointer is a multiple of slot size.
|
||||||
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
|
PA_DCHECK(
|
||||||
page->bucket->slot_size));
|
!((reinterpret_cast<uintptr_t>(ptr) -
|
||||||
return page;
|
reinterpret_cast<uintptr_t>(SlotSpanMetadata::ToPointer(slot_span))) %
|
||||||
|
slot_span->bucket->slot_size));
|
||||||
|
return slot_span;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::CanStoreRawSize() const {
|
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::CanStoreRawSize() const {
|
||||||
// Raw size is the size needed to satisfy the allocation (requested size +
|
|
||||||
// extras). If available, it can be used to report better statistics or to
|
|
||||||
// bring protective cookie closer to the allocated memory.
|
|
||||||
//
|
|
||||||
// For direct-map as well as single-slot slot spans (recognized by checking
|
// For direct-map as well as single-slot slot spans (recognized by checking
|
||||||
// against |kMaxPartitionPagesPerSlotSpan|), we have some spare metadata space
|
// against |kMaxPartitionPagesPerSlotSpan|), we have some spare metadata space
|
||||||
// in subsequent PartitionPage to store the raw size. It isn't only metadata
|
// in subsequent PartitionPage to store the raw size. It isn't only metadata
|
||||||
@ -287,24 +335,24 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::CanStoreRawSize() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionPage<thread_safe>::SetRawSize(size_t raw_size) {
|
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(size_t raw_size) {
|
||||||
PA_DCHECK(CanStoreRawSize());
|
PA_DCHECK(CanStoreRawSize());
|
||||||
PartitionPage* the_next_page = this + 1;
|
auto* the_next_page = reinterpret_cast<PartitionPage<thread_safe>*>(this) + 1;
|
||||||
the_next_page->freelist_head =
|
the_next_page->subsequent_page_metadata.raw_size = raw_size;
|
||||||
reinterpret_cast<PartitionFreelistEntry*>(raw_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE size_t PartitionPage<thread_safe>::GetRawSize() const {
|
ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
|
||||||
PA_DCHECK(CanStoreRawSize());
|
PA_DCHECK(CanStoreRawSize());
|
||||||
const PartitionPage* the_next_page = this + 1;
|
auto* the_next_page =
|
||||||
return reinterpret_cast<size_t>(the_next_page->freelist_head);
|
reinterpret_cast<const PartitionPage<thread_safe>*>(this) + 1;
|
||||||
|
return the_next_page->subsequent_page_metadata.raw_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
|
ALWAYS_INLINE DeferredUnmap SlotSpanMetadata<thread_safe>::Free(void* ptr) {
|
||||||
#if DCHECK_IS_ON()
|
#if DCHECK_IS_ON()
|
||||||
auto* root = PartitionRoot<thread_safe>::FromPage(this);
|
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||||
root->lock_.AssertAcquired();
|
root->lock_.AssertAcquired();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -329,17 +377,15 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const {
|
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
|
||||||
PA_DCHECK(this != get_sentinel_page());
|
PA_DCHECK(this != get_sentinel_slot_span());
|
||||||
PA_DCHECK(!page_offset);
|
|
||||||
return (num_allocated_slots > 0 &&
|
return (num_allocated_slots > 0 &&
|
||||||
(freelist_head || num_unprovisioned_slots));
|
(freelist_head || num_unprovisioned_slots));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
|
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
|
||||||
PA_DCHECK(this != get_sentinel_page());
|
PA_DCHECK(this != get_sentinel_slot_span());
|
||||||
PA_DCHECK(!page_offset);
|
|
||||||
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
|
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
|
||||||
if (ret) {
|
if (ret) {
|
||||||
PA_DCHECK(!freelist_head);
|
PA_DCHECK(!freelist_head);
|
||||||
@ -349,16 +395,14 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const {
|
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
|
||||||
PA_DCHECK(this != get_sentinel_page());
|
PA_DCHECK(this != get_sentinel_slot_span());
|
||||||
PA_DCHECK(!page_offset);
|
|
||||||
return (!num_allocated_slots && freelist_head);
|
return (!num_allocated_slots && freelist_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
|
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
|
||||||
PA_DCHECK(this != get_sentinel_page());
|
PA_DCHECK(this != get_sentinel_slot_span());
|
||||||
PA_DCHECK(!page_offset);
|
|
||||||
bool ret = (!num_allocated_slots && !freelist_head);
|
bool ret = (!num_allocated_slots && !freelist_head);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
PA_DCHECK(!num_unprovisioned_slots);
|
PA_DCHECK(!num_unprovisioned_slots);
|
||||||
@ -368,13 +412,13 @@ ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() {
|
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
|
||||||
PA_DCHECK(is_decommitted());
|
PA_DCHECK(is_decommitted());
|
||||||
|
|
||||||
num_unprovisioned_slots = bucket->get_slots_per_span();
|
num_unprovisioned_slots = bucket->get_slots_per_span();
|
||||||
PA_DCHECK(num_unprovisioned_slots);
|
PA_DCHECK(num_unprovisioned_slots);
|
||||||
|
|
||||||
next_page = nullptr;
|
next_slot_span = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void DeferredUnmap::Run() {
|
ALWAYS_INLINE void DeferredUnmap::Run() {
|
||||||
|
@ -17,11 +17,12 @@ namespace internal {
|
|||||||
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
|
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
|
||||||
// is no longer used.
|
// is no longer used.
|
||||||
void PartitionRefCount::Free() {
|
void PartitionRefCount::Free() {
|
||||||
auto* page = PartitionPage<ThreadSafe>::FromPointerNoAlignmentCheck(this);
|
auto* slot_span =
|
||||||
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
|
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(this);
|
||||||
|
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
|
||||||
|
|
||||||
#ifdef ADDRESS_SANITIZER
|
#ifdef ADDRESS_SANITIZER
|
||||||
size_t utilized_slot_size = page->GetUtilizedSlotSize();
|
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
|
||||||
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
|
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
|
||||||
// supports extras.
|
// supports extras.
|
||||||
PA_DCHECK(root->allow_extras);
|
PA_DCHECK(root->allow_extras);
|
||||||
@ -31,15 +32,15 @@ void PartitionRefCount::Free() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (root->is_thread_safe) {
|
if (root->is_thread_safe) {
|
||||||
root->RawFree(this, page);
|
root->RawFree(this, slot_span);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* non_thread_safe_page =
|
auto* non_thread_safe_slot_span =
|
||||||
reinterpret_cast<PartitionPage<NotThreadSafe>*>(page);
|
reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
|
||||||
auto* non_thread_safe_root =
|
auto* non_thread_safe_root =
|
||||||
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
|
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
|
||||||
non_thread_safe_root->RawFree(this, non_thread_safe_page);
|
non_thread_safe_root->RawFree(this, non_thread_safe_slot_span);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
|
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
|
||||||
|
@ -52,7 +52,7 @@ struct PartitionOptions {
|
|||||||
// PartitionAllocator.
|
// PartitionAllocator.
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
struct BASE_EXPORT PartitionRoot {
|
struct BASE_EXPORT PartitionRoot {
|
||||||
using Page = internal::PartitionPage<thread_safe>;
|
using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
|
||||||
using Bucket = internal::PartitionBucket<thread_safe>;
|
using Bucket = internal::PartitionBucket<thread_safe>;
|
||||||
using SuperPageExtentEntry =
|
using SuperPageExtentEntry =
|
||||||
internal::PartitionSuperPageExtentEntry<thread_safe>;
|
internal::PartitionSuperPageExtentEntry<thread_safe>;
|
||||||
@ -92,8 +92,8 @@ struct BASE_EXPORT PartitionRoot {
|
|||||||
SuperPageExtentEntry* current_extent = nullptr;
|
SuperPageExtentEntry* current_extent = nullptr;
|
||||||
SuperPageExtentEntry* first_extent = nullptr;
|
SuperPageExtentEntry* first_extent = nullptr;
|
||||||
DirectMapExtent* direct_map_list = nullptr;
|
DirectMapExtent* direct_map_list = nullptr;
|
||||||
Page* global_empty_page_ring[kMaxFreeableSpans] = {};
|
SlotSpan* global_empty_slot_span_ring[kMaxFreeableSpans] = {};
|
||||||
int16_t global_empty_page_ring_index = 0;
|
int16_t global_empty_slot_span_ring_index = 0;
|
||||||
|
|
||||||
// Integrity check = ~reinterpret_cast<uintptr_t>(this).
|
// Integrity check = ~reinterpret_cast<uintptr_t>(this).
|
||||||
uintptr_t inverted_self = 0;
|
uintptr_t inverted_self = 0;
|
||||||
@ -119,16 +119,16 @@ struct BASE_EXPORT PartitionRoot {
|
|||||||
//
|
//
|
||||||
// Allocates out of the given bucket. Properly, this function should probably
|
// Allocates out of the given bucket. Properly, this function should probably
|
||||||
// be in PartitionBucket, but because the implementation needs to be inlined
|
// be in PartitionBucket, but because the implementation needs to be inlined
|
||||||
// for performance, and because it needs to inspect PartitionPage,
|
// for performance, and because it needs to inspect SlotSpanMetadata,
|
||||||
// it becomes impossible to have it in PartitionBucket as this causes a
|
// it becomes impossible to have it in PartitionBucket as this causes a
|
||||||
// cyclical dependency on PartitionPage function implementations.
|
// cyclical dependency on SlotSpanMetadata function implementations.
|
||||||
//
|
//
|
||||||
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but
|
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but
|
||||||
// preserves the layering of the includes.
|
// preserves the layering of the includes.
|
||||||
void Init(PartitionOptions);
|
void Init(PartitionOptions);
|
||||||
|
|
||||||
ALWAYS_INLINE static bool IsValidPage(Page* page);
|
ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
|
||||||
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
|
ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
|
||||||
|
|
||||||
ALWAYS_INLINE void IncreaseCommittedPages(size_t len)
|
ALWAYS_INLINE void IncreaseCommittedPages(size_t len)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
@ -181,14 +181,14 @@ struct BASE_EXPORT PartitionRoot {
|
|||||||
// Same as |Free()|, bypasses the allocator hooks.
|
// Same as |Free()|, bypasses the allocator hooks.
|
||||||
ALWAYS_INLINE static void FreeNoHooks(void* ptr);
|
ALWAYS_INLINE static void FreeNoHooks(void* ptr);
|
||||||
// Immediately frees the pointer bypassing the quarantine.
|
// Immediately frees the pointer bypassing the quarantine.
|
||||||
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, Page* page);
|
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span);
|
||||||
|
|
||||||
ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
|
ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
|
||||||
ALWAYS_INLINE size_t GetSize(void* ptr) const;
|
ALWAYS_INLINE size_t GetSize(void* ptr) const;
|
||||||
ALWAYS_INLINE size_t ActualSize(size_t size);
|
ALWAYS_INLINE size_t ActualSize(size_t size);
|
||||||
|
|
||||||
// Frees memory from this partition, if possible, by decommitting pages.
|
// Frees memory from this partition, if possible, by decommitting pages or
|
||||||
// |flags| is an OR of base::PartitionPurgeFlags.
|
// even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
|
||||||
void PurgeMemory(int flags);
|
void PurgeMemory(int flags);
|
||||||
|
|
||||||
void DumpStats(const char* partition_name,
|
void DumpStats(const char* partition_name,
|
||||||
@ -198,7 +198,7 @@ struct BASE_EXPORT PartitionRoot {
|
|||||||
static uint16_t SizeToBucketIndex(size_t size);
|
static uint16_t SizeToBucketIndex(size_t size);
|
||||||
|
|
||||||
// Frees memory, with |ptr| as returned by |RawAlloc()|.
|
// Frees memory, with |ptr| as returned by |RawAlloc()|.
|
||||||
ALWAYS_INLINE void RawFree(void* ptr, Page* page);
|
ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span);
|
||||||
static void RawFreeStatic(void* ptr);
|
static void RawFreeStatic(void* ptr);
|
||||||
|
|
||||||
internal::ThreadCache* thread_cache_for_testing() const {
|
internal::ThreadCache* thread_cache_for_testing() const {
|
||||||
@ -241,10 +241,10 @@ struct BASE_EXPORT PartitionRoot {
|
|||||||
bool* is_already_zeroed)
|
bool* is_already_zeroed)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
|
|
||||||
bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
|
bool ReallocDirectMappedInPlace(
|
||||||
size_t requested_size)
|
internal::SlotSpanMetadata<thread_safe>* slot_span,
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
size_t requested_size) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
void DecommitEmptySlotSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
|
|
||||||
friend class internal::ThreadCache;
|
friend class internal::ThreadCache;
|
||||||
};
|
};
|
||||||
|
@ -91,7 +91,7 @@ class PCScan<thread_safe>::PCScanTask final {
|
|||||||
void RunOnce() &&;
|
void RunOnce() &&;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using Page = PartitionPage<thread_safe>;
|
using SlotSpan = SlotSpanMetadata<thread_safe>;
|
||||||
|
|
||||||
struct ScanArea {
|
struct ScanArea {
|
||||||
uintptr_t* begin = nullptr;
|
uintptr_t* begin = nullptr;
|
||||||
@ -134,7 +134,7 @@ template <bool thread_safe>
|
|||||||
QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
|
QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
|
||||||
uintptr_t maybe_ptr) const {
|
uintptr_t maybe_ptr) const {
|
||||||
// TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to
|
// TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to
|
||||||
// quickly find a super-page.
|
// quickly find a super page.
|
||||||
const auto super_page_base = maybe_ptr & kSuperPageBaseMask;
|
const auto super_page_base = maybe_ptr & kSuperPageBaseMask;
|
||||||
|
|
||||||
auto it = super_pages_.lower_bound(super_page_base);
|
auto it = super_pages_.lower_bound(super_page_base);
|
||||||
@ -145,7 +145,7 @@ QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
|
|||||||
reinterpret_cast<void*>(maybe_ptr)))
|
reinterpret_cast<void*>(maybe_ptr)))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// We are certain here that |maybe_ptr| points to the superpage payload.
|
// We are certain here that |maybe_ptr| points to the super page payload.
|
||||||
return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner,
|
return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner,
|
||||||
pcscan_.quarantine_data_.epoch(),
|
pcscan_.quarantine_data_.epoch(),
|
||||||
reinterpret_cast<char*>(maybe_ptr));
|
reinterpret_cast<char*>(maybe_ptr));
|
||||||
@ -176,12 +176,13 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
|
|||||||
|
|
||||||
PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask));
|
PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask));
|
||||||
|
|
||||||
auto target_page =
|
auto target_slot_span =
|
||||||
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
|
SlotSpan::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
|
||||||
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page));
|
PA_DCHECK(&root_ ==
|
||||||
|
PartitionRoot<thread_safe>::FromSlotSpan(target_slot_span));
|
||||||
|
|
||||||
const size_t usable_size = PartitionSizeAdjustSubtract(
|
const size_t usable_size = PartitionSizeAdjustSubtract(
|
||||||
root_.allow_extras, target_page->GetUtilizedSlotSize());
|
root_.allow_extras, target_slot_span->GetUtilizedSlotSize());
|
||||||
// Range check for inner pointers.
|
// Range check for inner pointers.
|
||||||
if (maybe_ptr >= base + usable_size)
|
if (maybe_ptr >= base + usable_size)
|
||||||
return 0;
|
return 0;
|
||||||
@ -193,7 +194,7 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
|
|||||||
pcscan_.quarantine_data_.epoch(),
|
pcscan_.quarantine_data_.epoch(),
|
||||||
reinterpret_cast<char*>(base))
|
reinterpret_cast<char*>(base))
|
||||||
->SetBit(base);
|
->SetBit(base);
|
||||||
return target_page->bucket->slot_size;
|
return target_slot_span->bucket->slot_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
@ -205,12 +206,12 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
|
|||||||
reinterpret_cast<char*>(super_page));
|
reinterpret_cast<char*>(super_page));
|
||||||
bitmap->Iterate([allow_extras](uintptr_t ptr) {
|
bitmap->Iterate([allow_extras](uintptr_t ptr) {
|
||||||
auto* object = reinterpret_cast<void*>(ptr);
|
auto* object = reinterpret_cast<void*>(ptr);
|
||||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||||
// Use zero as a zapping value to speed up the fast bailout check in
|
// Use zero as a zapping value to speed up the fast bailout check in
|
||||||
// ScanPartition.
|
// ScanPartition.
|
||||||
memset(object, 0,
|
memset(object, 0,
|
||||||
PartitionSizeAdjustSubtract(allow_extras,
|
PartitionSizeAdjustSubtract(allow_extras,
|
||||||
page->GetUtilizedSlotSize()));
|
slot_span->GetUtilizedSlotSize()));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,7 +235,7 @@ size_t PCScan<thread_safe>::PCScanTask::ScanPartition() NO_SANITIZE("thread") {
|
|||||||
// implemented.
|
// implemented.
|
||||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
// On partitions without extras (partitions with aligned allocations),
|
// On partitions without extras (partitions with aligned allocations),
|
||||||
// pages are not allocated from the GigaCage.
|
// memory is not allocated from the GigaCage.
|
||||||
if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) {
|
if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) {
|
||||||
// With GigaCage, we first do a fast bitmask check to see if the pointer
|
// With GigaCage, we first do a fast bitmask check to see if the pointer
|
||||||
// points to the normal bucket pool.
|
// points to the normal bucket pool.
|
||||||
@ -267,9 +268,9 @@ size_t PCScan<thread_safe>::PCScanTask::SweepQuarantine() {
|
|||||||
reinterpret_cast<char*>(super_page));
|
reinterpret_cast<char*>(super_page));
|
||||||
bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) {
|
bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) {
|
||||||
auto* object = reinterpret_cast<void*>(ptr);
|
auto* object = reinterpret_cast<void*>(ptr);
|
||||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||||
swept_bytes += page->bucket->slot_size;
|
swept_bytes += slot_span->bucket->slot_size;
|
||||||
root_.FreeNoHooksImmediate(object, page);
|
root_.FreeNoHooksImmediate(object, slot_span);
|
||||||
});
|
});
|
||||||
bitmap->Clear();
|
bitmap->Clear();
|
||||||
}
|
}
|
||||||
@ -293,25 +294,27 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a snapshot of all active pages.
|
// Take a snapshot of all active slot spans.
|
||||||
static constexpr size_t kScanAreasReservationSlack = 10;
|
static constexpr size_t kScanAreasReservationSlack = 10;
|
||||||
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages /
|
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages /
|
||||||
PartitionPageSize() /
|
PartitionPageSize() /
|
||||||
kScanAreasReservationSlack;
|
kScanAreasReservationSlack;
|
||||||
scan_areas_.reserve(kScanAreasReservationSize);
|
scan_areas_.reserve(kScanAreasReservationSize);
|
||||||
{
|
{
|
||||||
// TODO(bikineev): Scan full pages.
|
// TODO(bikineev): Scan full slot spans.
|
||||||
for (const auto& bucket : root_.buckets) {
|
for (const auto& bucket : root_.buckets) {
|
||||||
for (auto* page = bucket.active_pages_head;
|
for (auto* slot_span = bucket.active_slot_spans_head;
|
||||||
page && page != page->get_sentinel_page(); page = page->next_page) {
|
slot_span && slot_span != slot_span->get_sentinel_slot_span();
|
||||||
|
slot_span = slot_span->next_slot_span) {
|
||||||
// The active list may contain false positives, skip them.
|
// The active list may contain false positives, skip them.
|
||||||
if (page->is_empty() || page->is_decommitted())
|
if (slot_span->is_empty() || slot_span->is_decommitted())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
auto* payload_begin = static_cast<uintptr_t*>(Page::ToPointer(page));
|
auto* payload_begin =
|
||||||
|
static_cast<uintptr_t*>(SlotSpan::ToPointer(slot_span));
|
||||||
auto* payload_end =
|
auto* payload_end =
|
||||||
payload_begin +
|
payload_begin +
|
||||||
(page->bucket->get_bytes_per_span() / sizeof(uintptr_t));
|
(slot_span->bucket->get_bytes_per_span() / sizeof(uintptr_t));
|
||||||
scan_areas_.push_back({payload_begin, payload_end});
|
scan_areas_.push_back({payload_begin, payload_end});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ template <bool thread_safe>
|
|||||||
class BASE_EXPORT PCScan final {
|
class BASE_EXPORT PCScan final {
|
||||||
public:
|
public:
|
||||||
using Root = PartitionRoot<thread_safe>;
|
using Root = PartitionRoot<thread_safe>;
|
||||||
using Page = PartitionPage<thread_safe>;
|
using SlotSpan = SlotSpanMetadata<thread_safe>;
|
||||||
|
|
||||||
explicit PCScan(Root* root) : root_(root) {}
|
explicit PCScan(Root* root) : root_(root) {}
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ class BASE_EXPORT PCScan final {
|
|||||||
|
|
||||||
~PCScan();
|
~PCScan();
|
||||||
|
|
||||||
ALWAYS_INLINE void MoveToQuarantine(void* ptr, Page* page);
|
ALWAYS_INLINE void MoveToQuarantine(void* ptr, SlotSpan* slot_span);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class PCScanTask;
|
class PCScanTask;
|
||||||
@ -113,15 +113,15 @@ void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() {
|
|||||||
|
|
||||||
template <bool thread_safe>
|
template <bool thread_safe>
|
||||||
ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr,
|
ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr,
|
||||||
Page* page) {
|
SlotSpan* slot_span) {
|
||||||
PA_DCHECK(!page->bucket->is_direct_mapped());
|
PA_DCHECK(!slot_span->bucket->is_direct_mapped());
|
||||||
|
|
||||||
QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator,
|
QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator,
|
||||||
quarantine_data_.epoch(), ptr)
|
quarantine_data_.epoch(), ptr)
|
||||||
->SetBit(reinterpret_cast<uintptr_t>(ptr));
|
->SetBit(reinterpret_cast<uintptr_t>(ptr));
|
||||||
|
|
||||||
const bool is_limit_reached =
|
const bool is_limit_reached =
|
||||||
quarantine_data_.Account(page->bucket->slot_size);
|
quarantine_data_.Account(slot_span->bucket->slot_size);
|
||||||
if (is_limit_reached) {
|
if (is_limit_reached) {
|
||||||
// Post a background task to not block the current thread.
|
// Post a background task to not block the current thread.
|
||||||
ScheduleTask(TaskType::kNonBlocking);
|
ScheduleTask(TaskType::kNonBlocking);
|
||||||
|
@ -21,7 +21,7 @@ class PCScanTest : public testing::Test {
|
|||||||
PartitionOptions::PCScan::kEnabled});
|
PartitionOptions::PCScan::kEnabled});
|
||||||
}
|
}
|
||||||
~PCScanTest() override {
|
~PCScanTest() override {
|
||||||
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
|
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
|
||||||
PartitionPurgeDiscardUnusedSystemPages);
|
PartitionPurgeDiscardUnusedSystemPages);
|
||||||
PartitionAllocGlobalUninitForTesting();
|
PartitionAllocGlobalUninitForTesting();
|
||||||
}
|
}
|
||||||
@ -47,17 +47,17 @@ class PCScanTest : public testing::Test {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
using Page = ThreadSafePartitionRoot::Page;
|
using SlotSpan = ThreadSafePartitionRoot::SlotSpan;
|
||||||
|
|
||||||
struct FullPageAllocation {
|
struct FullSlotSpanAllocation {
|
||||||
Page* page;
|
SlotSpan* slot_span;
|
||||||
void* first;
|
void* first;
|
||||||
void* last;
|
void* last;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Assumes heap is purged.
|
// Assumes heap is purged.
|
||||||
FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
|
FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root,
|
||||||
size_t object_size) {
|
size_t object_size) {
|
||||||
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing());
|
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing());
|
||||||
|
|
||||||
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
|
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
|
||||||
@ -76,24 +76,24 @@ FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
|
|||||||
last = PartitionPointerAdjustSubtract(true, ptr);
|
last = PartitionPointerAdjustSubtract(true, ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPECT_EQ(ThreadSafePartitionRoot::Page::FromPointer(first),
|
EXPECT_EQ(SlotSpan::FromPointer(first), SlotSpan::FromPointer(last));
|
||||||
ThreadSafePartitionRoot::Page::FromPointer(last));
|
|
||||||
if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage())
|
if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage())
|
||||||
EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
|
EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
|
||||||
reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
|
reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
|
||||||
EXPECT_EQ(num_slots,
|
EXPECT_EQ(num_slots, static_cast<size_t>(
|
||||||
static_cast<size_t>(bucket.active_pages_head->num_allocated_slots));
|
bucket.active_slot_spans_head->num_allocated_slots));
|
||||||
EXPECT_EQ(nullptr, bucket.active_pages_head->freelist_head);
|
EXPECT_EQ(nullptr, bucket.active_slot_spans_head->freelist_head);
|
||||||
EXPECT_TRUE(bucket.active_pages_head);
|
EXPECT_TRUE(bucket.active_slot_spans_head);
|
||||||
EXPECT_TRUE(bucket.active_pages_head != Page::get_sentinel_page());
|
EXPECT_TRUE(bucket.active_slot_spans_head !=
|
||||||
|
SlotSpan::get_sentinel_slot_span());
|
||||||
|
|
||||||
return {bucket.active_pages_head, PartitionPointerAdjustAdd(true, first),
|
return {bucket.active_slot_spans_head, PartitionPointerAdjustAdd(true, first),
|
||||||
PartitionPointerAdjustAdd(true, last)};
|
PartitionPointerAdjustAdd(true, last)};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsInFreeList(void* object) {
|
bool IsInFreeList(void* object) {
|
||||||
auto* page = Page::FromPointerNoAlignmentCheck(object);
|
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
|
||||||
for (auto* entry = page->freelist_head; entry;
|
for (auto* entry = slot_span->freelist_head; entry;
|
||||||
entry = EncodedPartitionFreelistEntry::Decode(entry->next)) {
|
entry = EncodedPartitionFreelistEntry::Decode(entry->next)) {
|
||||||
if (entry == object)
|
if (entry == object)
|
||||||
return true;
|
return true;
|
||||||
@ -138,21 +138,23 @@ TEST_F(PCScanTest, ArbitraryObjectInQuarantine) {
|
|||||||
TEST_F(PCScanTest, FirstObjectInQuarantine) {
|
TEST_F(PCScanTest, FirstObjectInQuarantine) {
|
||||||
static constexpr size_t kAllocationSize = 16;
|
static constexpr size_t kAllocationSize = 16;
|
||||||
|
|
||||||
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
|
FullSlotSpanAllocation full_slot_span =
|
||||||
EXPECT_FALSE(IsInQuarantine(full_page.first));
|
GetFullSlotSpan(root(), kAllocationSize);
|
||||||
|
EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
|
||||||
|
|
||||||
root().FreeNoHooks(full_page.first);
|
root().FreeNoHooks(full_slot_span.first);
|
||||||
EXPECT_TRUE(IsInQuarantine(full_page.first));
|
EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PCScanTest, LastObjectInQuarantine) {
|
TEST_F(PCScanTest, LastObjectInQuarantine) {
|
||||||
static constexpr size_t kAllocationSize = 16;
|
static constexpr size_t kAllocationSize = 16;
|
||||||
|
|
||||||
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
|
FullSlotSpanAllocation full_slot_span =
|
||||||
EXPECT_FALSE(IsInQuarantine(full_page.last));
|
GetFullSlotSpan(root(), kAllocationSize);
|
||||||
|
EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
|
||||||
|
|
||||||
root().FreeNoHooks(full_page.last);
|
root().FreeNoHooks(full_slot_span.last);
|
||||||
EXPECT_TRUE(IsInQuarantine(full_page.last));
|
EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -217,23 +219,22 @@ TEST_F(PCScanTest, DanglingReferenceSameSlotSpanButDifferentPages) {
|
|||||||
static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
|
static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
|
||||||
static_cast<size_t>(PartitionPageSize() * 0.75);
|
static_cast<size_t>(PartitionPageSize() * 0.75);
|
||||||
|
|
||||||
FullPageAllocation full_page = GetFullPage(
|
FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
|
||||||
root(),
|
root(),
|
||||||
PartitionSizeAdjustSubtract(
|
PartitionSizeAdjustSubtract(
|
||||||
true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
|
true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
|
||||||
|
|
||||||
// Assert that the first and the last objects are in the same slot span but on
|
// Assert that the first and the last objects are in the same slot span but on
|
||||||
// different partition pages.
|
// different partition pages.
|
||||||
ASSERT_EQ(ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
|
ASSERT_EQ(SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.first),
|
||||||
full_page.first),
|
SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.last));
|
||||||
ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
|
ASSERT_NE(
|
||||||
full_page.last));
|
reinterpret_cast<size_t>(full_slot_span.first) & PartitionPageBaseMask(),
|
||||||
ASSERT_NE(reinterpret_cast<size_t>(full_page.first) & PartitionPageBaseMask(),
|
reinterpret_cast<size_t>(full_slot_span.last) & PartitionPageBaseMask());
|
||||||
reinterpret_cast<size_t>(full_page.last) & PartitionPageBaseMask());
|
|
||||||
|
|
||||||
// Create two objects, on different partition pages.
|
// Create two objects, on different partition pages.
|
||||||
auto* value = new (full_page.first) ValueList;
|
auto* value = new (full_slot_span.first) ValueList;
|
||||||
auto* source = new (full_page.last) SourceList;
|
auto* source = new (full_slot_span.last) SourceList;
|
||||||
source->next = value;
|
source->next = value;
|
||||||
|
|
||||||
TestDanglingReference(*this, source, value);
|
TestDanglingReference(*this, source, value);
|
||||||
|
11
third_party/blink/renderer/platform/instrumentation/partition_alloc_memory_dump_provider.cc
vendored
11
third_party/blink/renderer/platform/instrumentation/partition_alloc_memory_dump_provider.cc
vendored
@ -134,16 +134,17 @@ void PartitionStatsDumperImpl::PartitionsDumpBucketStats(
|
|||||||
memory_stats->decommittable_bytes);
|
memory_stats->decommittable_bytes);
|
||||||
allocator_dump->AddScalar("discardable_size", "bytes",
|
allocator_dump->AddScalar("discardable_size", "bytes",
|
||||||
memory_stats->discardable_bytes);
|
memory_stats->discardable_bytes);
|
||||||
|
// TODO(bartekn): Rename the scalar names.
|
||||||
allocator_dump->AddScalar("total_pages_size", "bytes",
|
allocator_dump->AddScalar("total_pages_size", "bytes",
|
||||||
memory_stats->allocated_page_size);
|
memory_stats->allocated_slot_span_size);
|
||||||
allocator_dump->AddScalar("active_pages", "objects",
|
allocator_dump->AddScalar("active_pages", "objects",
|
||||||
memory_stats->num_active_pages);
|
memory_stats->num_active_slot_spans);
|
||||||
allocator_dump->AddScalar("full_pages", "objects",
|
allocator_dump->AddScalar("full_pages", "objects",
|
||||||
memory_stats->num_full_pages);
|
memory_stats->num_full_slot_spans);
|
||||||
allocator_dump->AddScalar("empty_pages", "objects",
|
allocator_dump->AddScalar("empty_pages", "objects",
|
||||||
memory_stats->num_empty_pages);
|
memory_stats->num_empty_slot_spans);
|
||||||
allocator_dump->AddScalar("decommitted_pages", "objects",
|
allocator_dump->AddScalar("decommitted_pages", "objects",
|
||||||
memory_stats->num_decommitted_pages);
|
memory_stats->num_decommitted_slot_spans);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
Reference in New Issue
Block a user