0

[PA] Obliterate secondary aligned allocator support

This was there in case the primary allocator can't support aligned
allocations. The only reason we kept this option open was because of
BRP "before allocation" mode, which is now gone (as of
crrev.com/c/5117767). And we aren't planning to go back that route.

This is a no-op.

Change-Id: Iea8cccfa94e4fd93ba8c2da3c2194d7e14351cd4
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5119422
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: Siddhartha S <ssid@chromium.org>
Reviewed-by: Benoit Lize <lizeb@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1237899}
This commit is contained in:
Bartek Nowierski
2023-12-15 03:18:26 +00:00
committed by Chromium LUCI CQ
parent d74737a7b4
commit 37ba7ea764
16 changed files with 16 additions and 132 deletions

@ -1232,8 +1232,6 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableLargeEmptySlotSpanRing();
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator()
->EnableLargeEmptySlotSpanRing();
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

@ -40,14 +40,8 @@ void EnablePartitionAllocThreadCacheForRootIfDisabled(PartitionRoot* root) {
void DisablePartitionAllocThreadCacheForProcess() {
PA_CHECK(allocator_shim::internal::PartitionAllocMalloc::
AllocatorConfigurationFinalized());
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
auto* aligned_allocator =
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
DisableThreadCacheForRootIfEnabled(regular_allocator);
if (aligned_allocator != regular_allocator) {
DisableThreadCacheForRootIfEnabled(aligned_allocator);
}
DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::Allocator());
DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
}

@ -28,7 +28,6 @@ TEST(HardeningTest, PartialCorruption) {
char* to_corrupt = const_cast<char*>(important_data.c_str());
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
@ -53,7 +52,6 @@ TEST(HardeningTest, OffHeapPointerCrashing) {
char* to_corrupt = const_cast<char*>(important_data.c_str());
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
@ -74,7 +72,6 @@ TEST(HardeningTest, OffHeapPointerCrashing) {
TEST(HardeningTest, MetadataPointerCrashing) {
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
@ -101,7 +98,6 @@ TEST(HardeningTest, MetadataPointerCrashing) {
TEST(HardeningTest, SuccessfulCorruption) {
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();

@ -379,7 +379,6 @@ class PartitionAllocTest
}
PartitionOptions pkey_opts = GetCommonPartitionOptions();
pkey_opts.aligned_alloc = PartitionOptions::kAllowed;
pkey_opts.thread_isolation = ThreadIsolationOption(pkey_);
// We always want to have a pkey allocator initialized to make sure that the
// other pools still work. As part of the initializition, we tag some memory
@ -400,7 +399,6 @@ class PartitionAllocTest
#endif // BUILDFLAG(ENABLE_PKEYS)
PartitionOptions opts = GetCommonPartitionOptions();
opts.aligned_alloc = PartitionOptions::kAllowed;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
opts.backup_ref_ptr = enable_backup_ref_ptr;
#endif
@ -421,14 +419,6 @@ class PartitionAllocTest
PartitionTestOptions{.use_memory_reclaimer = true,
.uncap_empty_slot_span_memory = true,
.set_bucket_distribution = true});
PartitionOptions aligned_opts = GetCommonPartitionOptions();
aligned_opts.aligned_alloc = PartitionOptions::kAllowed;
InitializeTestRoot(
aligned_allocator.root(), aligned_opts,
PartitionTestOptions{.use_memory_reclaimer = true,
.uncap_empty_slot_span_memory = true,
.set_bucket_distribution = true});
}
size_t RealAllocSize() const {
@ -635,9 +625,6 @@ class PartitionAllocTest
bool UseBRPPool() const { return allocator.root()->brp_enabled(); }
partition_alloc::PartitionAllocatorForTesting allocator;
// TODO(bartekn): Remove. We no longer have partitions that don't support
// aligned alloc, so no need for a special aligned allocator.
partition_alloc::PartitionAllocatorForTesting aligned_allocator;
#if BUILDFLAG(ENABLE_PKEYS)
partition_alloc::PartitionAllocatorForTesting pkey_allocator;
#endif
@ -3852,7 +3839,6 @@ TEST_P(PartitionAllocTest, AlignedAllocations) {
for (size_t alloc_size : alloc_sizes) {
for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
alignment <<= 1) {
VerifyAlignment(aligned_allocator.root(), alloc_size, alignment);
VerifyAlignment(allocator.root(), alloc_size, alignment);
}
}

@ -398,6 +398,7 @@ static_assert(kAlignment % alignof(PartitionRefCount) == 0,
// Allocate extra space for the reference count to satisfy the alignment
// requirement.
static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
// TODO(bartekn): Remove, as we no longer support non-zero offsets.
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
constexpr size_t kPartitionPastAllocationAdjustment = 0;

@ -972,8 +972,6 @@ void PartitionRoot::Init(PartitionOptions opts) {
ReserveBackupRefPtrGuardRegionIfNeeded();
#endif
settings.allow_aligned_alloc =
opts.aligned_alloc == PartitionOptions::kAllowed;
#if BUILDFLAG(PA_DCHECK_IS_ON)
settings.use_cookie = true;
#else
@ -1064,10 +1062,9 @@ void PartitionRoot::Init(PartitionOptions opts) {
}
#endif // PA_CONFIG(EXTRAS_REQUIRED)
// Re-confirm the above PA_CHECKs, by making sure there are no
// pre-allocation extras when AlignedAlloc is allowed. Post-allocation
// extras are ok.
PA_CHECK(!settings.allow_aligned_alloc || !settings.extras_offset);
// Make sure there are no pre-allocation extras as they'd interfere with
// AlignedAlloc. Post-allocation extras are ok.
PA_CHECK(!settings.extras_offset);
settings.quarantine_mode =
#if BUILDFLAG(USE_STARSCAN)

@ -166,16 +166,6 @@ struct PartitionOptions {
static constexpr auto kDisabled = EnableToggle::kDisabled;
static constexpr auto kEnabled = EnableToggle::kEnabled;
// By default all allocations will be aligned to `kAlignment`,
// likely to be 8B or 16B depending on platforms and toolchains.
// AlignedAlloc() allows to enforce higher alignment.
// This option determines whether it is supported for the partition.
// Allowing AlignedAlloc() comes at a cost of disallowing extras in front
// of the allocation.
// TODO(bartekn): Remove. We no longer have a need for partitions that don't
// support aligned alloc.
AllowToggle aligned_alloc = kDisallowed;
EnableToggle thread_cache = kDisabled;
AllowToggle star_scan_quarantine = kDisallowed;
EnableToggle backup_ref_ptr = kDisabled;
@ -265,7 +255,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool with_thread_cache = false;
bool allow_aligned_alloc = false;
#if BUILDFLAG(PA_DCHECK_IS_ON)
bool use_cookie = false;
#else
@ -299,6 +288,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#if PA_CONFIG(EXTRAS_REQUIRED)
uint32_t extras_size = 0;
// TODO(bartekn): Remove, as we no longer support non-zero offsets.
uint32_t extras_offset = 0;
#else
// Teach the compiler that code can be optimized in builds that use no
@ -2319,7 +2309,6 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
// allocation from the beginning of the slot, thus messing up alignment.
// Extras after the allocation are acceptable, but they have to be taken into
// account in the request size calculation to avoid crbug.com/1185484.
PA_DCHECK(settings.allow_aligned_alloc);
PA_DCHECK(!settings.extras_offset);
// This is mandated by |posix_memalign()|, so should never fire.
PA_CHECK(std::has_single_bit(alignment));

@ -155,7 +155,6 @@ class MainPartitionConstructor {
partition_alloc::PartitionOptions::kDisabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::PartitionOptions opts;
opts.aligned_alloc = partition_alloc::PartitionOptions::kAllowed;
opts.thread_cache = thread_cache;
opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
opts.backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled;
@ -176,26 +175,10 @@ std::atomic<partition_alloc::PartitionRoot*> g_original_root(nullptr);
std::atomic<bool> g_roots_finalized = false;
class AlignedPartitionConstructor {
public:
static partition_alloc::PartitionRoot* New(void* buffer) {
return g_root.Get();
}
};
// TODO(bartekn): Remove. Main partition now always supports aligned alloc, so
// no need for dedicated partition.
LeakySingleton<partition_alloc::PartitionRoot, AlignedPartitionConstructor>
g_aligned_root PA_CONSTINIT = {};
partition_alloc::PartitionRoot* OriginalAllocator() {
return g_original_root.load(std::memory_order_relaxed);
}
partition_alloc::PartitionRoot* AlignedAllocator() {
return g_aligned_root.Get();
}
bool AllocatorConfigurationFinalized() {
return g_roots_finalized.load();
}
@ -203,21 +186,12 @@ bool AllocatorConfigurationFinalized() {
void* AllocateAlignedMemory(size_t alignment, size_t size) {
// Memory returned by the regular allocator *always* respects |kAlignment|,
// which is a power of two, and any valid alignment is also a power of two. So
// we can directly fulfill these requests with the main allocator.
//
// This has several advantages:
// - The thread cache is supported on the main partition
// - Reduced fragmentation
// - Better coverage for MiraclePtr variants requiring extras
// we can directly fulfill these requests with the regular Alloc function.
//
// There are several call sites in Chromium where base::AlignedAlloc is called
// with a small alignment. Some may be due to overly-careful code, some are
// because the client code doesn't know the required alignment at compile
// time.
//
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
// instance) directly call PartitionFree(), so there is no risk of
// mismatch. (see below the default_dispatch definition).
if (alignment <= partition_alloc::internal::kAlignment) {
// This is mandated by |posix_memalign()| and friends, so should never fire.
PA_CHECK(std::has_single_bit(alignment));
@ -227,9 +201,8 @@ void* AllocateAlignedMemory(size_t alignment, size_t size) {
size);
}
return AlignedAllocator()
->AlignedAllocInline<partition_alloc::AllocFlags::kNoHooks>(alignment,
size);
return Allocator()->AlignedAllocInline<partition_alloc::AllocFlags::kNoHooks>(
alignment, size);
}
} // namespace
@ -500,11 +473,6 @@ partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
return ::OriginalAllocator();
}
// static
partition_alloc::PartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
return ::AlignedAllocator();
}
} // namespace allocator_shim::internal
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@ -524,7 +492,6 @@ void EnablePartitionAllocMemoryReclaimer() {
// registered for memory reclaimer there.
PA_DCHECK(!AllocatorConfigurationFinalized());
PA_DCHECK(OriginalAllocator() == nullptr);
PA_DCHECK(AlignedAllocator() == Allocator());
}
void ConfigurePartitions(
@ -543,12 +510,10 @@ void ConfigurePartitions(
// out the aligned partition.
PA_CHECK(!enable_brp || split_main_partition);
// Calling Get() is actually important, even if the return values weren't
// used, because it has a side effect of initializing the variables, if they
// weren't already.
// Calling Get() is actually important, even if the return value isn't
// used, because it has a side effect of initializing the variable, if it
// wasn't already.
auto* current_root = g_root.Get();
auto* current_aligned_root = g_aligned_root.Get();
PA_DCHECK(current_root == current_aligned_root);
if (!split_main_partition) {
switch (distribution) {
@ -575,7 +540,6 @@ void ConfigurePartitions(
partition_alloc::PartitionAllocator>
new_main_allocator([&]() {
partition_alloc::PartitionOptions opts;
opts.aligned_alloc = partition_alloc::PartitionOptions::kAllowed;
opts.thread_cache = partition_alloc::PartitionOptions::kDisabled;
opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
opts.backup_ref_ptr =
@ -603,16 +567,10 @@ void ConfigurePartitions(
}());
partition_alloc::PartitionRoot* new_root = new_main_allocator->root();
// Now switch traffic to the new partitions.
// The new main root can also support AlignedAlloc.
// Now switch traffic to the new partition.
g_original_root = current_root;
g_aligned_root.Replace(new_root);
g_root.Replace(new_root);
// No need for g_original_aligned_root, because in cases where g_aligned_root
// is replaced, it must've been g_original_root.
PA_CHECK(current_aligned_root == g_original_root);
// Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory(
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
@ -681,10 +639,6 @@ void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
partition_alloc::internal::PCScan::RegisterScannableRoot(
OriginalAllocator());
}
if (Allocator() != AlignedAllocator()) {
partition_alloc::internal::PCScan::RegisterScannableRoot(
AlignedAllocator());
}
allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled();
allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
@ -754,13 +708,6 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
partition_alloc::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper);
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
if (AlignedAllocator() != Allocator()) {
AlignedAllocator()->DumpStats("posix_memalign", true,
&aligned_allocator_dumper);
}
// Dump stats for nonscannable and nonquarantinable allocators.
auto& nonscannable_allocator =
allocator_shim::NonScannableAllocator::Instance();
@ -784,21 +731,18 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
info.hblks =
partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
// Resident bytes.
info.hblkhd =
partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes);
// Allocated bytes.
info.uordblks =
partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes);

@ -23,8 +23,6 @@ class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) PartitionAllocMalloc {
static partition_alloc::PartitionRoot* Allocator();
// May return |nullptr|, will never return the same pointer as |Allocator()|.
static partition_alloc::PartitionRoot* OriginalAllocator();
// May return the same pointer as |Allocator()|.
static partition_alloc::PartitionRoot* AlignedAllocator();
};
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)

@ -184,9 +184,6 @@ TEST(PartitionAllocAsMalloc, Alignment) {
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
PartitionAllocMalloc::OriginalAllocator()) %
alignof(partition_alloc::PartitionRoot));
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
PartitionAllocMalloc::AlignedAllocator()) %
alignof(partition_alloc::PartitionRoot));
}
#if BUILDFLAG(IS_APPLE) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

@ -53,7 +53,6 @@ class PartitionAllocPCScanTestBase : public testing::Test {
PartitionAllocPCScanTestBase()
: allocator_([]() {
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
opts.star_scan_quarantine = PartitionOptions::kAllowed;
opts.memory_tagging = {
.enabled = base::CPU::GetInstanceNoAllocation().has_mte()

@ -61,7 +61,6 @@ class DeltaCounter {
// Forbid extras, since they make finding out which bucket is used harder.
std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator() {
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
opts.thread_cache = PartitionOptions::kEnabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@ -275,7 +274,6 @@ TEST_P(PartitionAllocThreadCacheTest, Purge) {
TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
opts.star_scan_quarantine = PartitionOptions::kAllowed;
PartitionAllocatorForTesting allocator(opts);

@ -113,7 +113,6 @@ class PkeyTest : public testing::Test {
isolated_globals.allocator->init([]() {
partition_alloc::PartitionOptions opts;
opts.aligned_alloc = PartitionOptions::kAllowed;
opts.thread_isolation = ThreadIsolationOption(isolated_globals.pkey);
return opts;
}());

@ -119,11 +119,6 @@ ALWAYS_INLINE partition_alloc::PartitionRoot*
GetPartitionRootForMemorySafetyCheckedAllocation() {
return allocator_shim::internal::PartitionAllocMalloc::Allocator();
}
ALWAYS_INLINE partition_alloc::PartitionRoot*
GetAlignedPartitionRootForMemorySafetyCheckedAllocation() {
return allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
template <MemorySafetyCheck checks>
@ -145,7 +140,7 @@ NOINLINE void* HandleMemorySafetyCheckedOperatorNew(
std::align_val_t alignment) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
if constexpr (ShouldUsePartitionAlloc(checks)) {
return GetAlignedPartitionRootForMemorySafetyCheckedAllocation()
return GetPartitionRootForMemorySafetyCheckedAllocation()
->AlignedAlloc<GetAllocFlags(checks)>(static_cast<size_t>(alignment),
count);
} else
@ -174,7 +169,7 @@ NOINLINE void HandleMemorySafetyCheckedOperatorDelete(
std::align_val_t alignment) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
if constexpr (ShouldUsePartitionAlloc(checks)) {
GetAlignedPartitionRootForMemorySafetyCheckedAllocation()
GetPartitionRootForMemorySafetyCheckedAllocation()
->Free<GetFreeFlags(checks)>(ptr);
} else
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

@ -133,12 +133,6 @@ void ReportPartitionAllocStats(ProcessMemoryDump* pmd,
original_allocator->DumpStats("original", is_light_dump,
&partition_stats_dumper);
}
auto* aligned_allocator =
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
if (aligned_allocator != allocator) {
aligned_allocator->DumpStats("aligned", is_light_dump,
&partition_stats_dumper);
}
auto& nonscannable_allocator =
allocator_shim::NonScannableAllocator::Instance();
if (auto* root = nonscannable_allocator.root())

@ -27,7 +27,6 @@ ThreadIsolatedAllocator::~ThreadIsolatedAllocator() = default;
void ThreadIsolatedAllocator::Initialize(int pkey) {
pkey_ = pkey;
partition_alloc::PartitionOptions opts;
opts.aligned_alloc = partition_alloc::PartitionOptions::kAllowed;
opts.thread_isolation = partition_alloc::ThreadIsolationOption(pkey_);
allocator_.init(opts);
}