0

[discardable] Release Discardable Memory from Freelists

We now release memory going into the freelists of
`ClientDiscardableSharedMemoryManager`. This is done using
`madvise(MADV_REMOVE)` or similar.

We hide these changes being a feature which is disabled by default, so
this CL doesn't have any effect on current functionality.

Bug: 1142593
Change-Id: I302ddf8506480f656454a08fe6b016db66c68c8f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2412833
Commit-Queue: Thiabaud Engelbrecht <thiabaud@google.com>
Reviewed-by: Wez <wez@chromium.org>
Reviewed-by: Steven Holte <holte@chromium.org>
Reviewed-by: Chris Palmer <palmer@chromium.org>
Reviewed-by: Benoit L <lizeb@chromium.org>
Reviewed-by: ssid <ssid@chromium.org>
Reviewed-by: Peng Huang <penghuang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#838087}
This commit is contained in:
Thiabaud Engelbrecht
2020-12-17 15:56:58 +00:00
committed by Chromium LUCI CQ
parent cc898ba6b7
commit a66cf2e479
13 changed files with 143 additions and 19 deletions

@ -8,6 +8,7 @@
#include <algorithm>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/atomicops.h"
#include "base/bits.h"
#include "base/feature_list.h"
@ -460,6 +461,35 @@ bool DiscardableSharedMemory::Purge(Time current_time) {
return true;
}
void DiscardableSharedMemory::ReleaseMemoryIfPossible(size_t offset,
size_t length) {
#if defined(OS_POSIX) && !defined(OS_NACL)
// Linux and Android provide MADV_REMOVE which is preferred as it has a
// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
// provide MADV_FREE which has the same result but memory is purged lazily.
#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
#define MADV_PURGE_ARGUMENT MADV_REMOVE
#elif defined(OS_APPLE)
// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
// reusable bit, which allows both Activity Monitor and memory-infra to
// correctly track the pages.
#define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
#else // defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
#define MADV_PURGE_ARGUMENT MADV_FREE
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
// Advise the kernel to remove resources associated with purged pages.
// Subsequent accesses of memory pages will succeed, but might result in
// zero-fill-on-demand pages.
if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) + offset,
length, MADV_PURGE_ARGUMENT)) {
DPLOG(ERROR) << "madvise() failed";
}
#else // defined(OS_POSIX) && !defined(OS_NACL)
DiscardSystemPages(
static_cast<char*>(shared_memory_mapping_.memory()) + offset, length);
#endif // defined(OS_POSIX) && !defined(OS_NACL)
}
bool DiscardableSharedMemory::IsMemoryResident() const {
DCHECK(shared_memory_mapping_.IsValid());

@ -116,6 +116,13 @@ class BASE_EXPORT DiscardableSharedMemory {
// different process. Returns NULL time if purged.
Time last_known_usage() const { return last_known_usage_; }
// Releases any allocated pages in the specified range, if supported by the
// platform. Address space in the specified range continues to be reserved.
// The memory is not guaranteed to be released immediately.
// |offset| and |length| are both in bytes. |offset| and |length| must both be
// page aligned.
void ReleaseMemoryIfPossible(size_t offset, size_t length);
// This returns true and sets |last_known_usage_| to 0 if
// DiscardableSharedMemory object was successfully purged. Purging can fail
// for two reasons; object might be locked or our last known usage timestamp

@ -81,8 +81,9 @@ size_t ProcessMemoryDump::GetSystemPageSize() {
}
// static
size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
size_t mapped_size) {
base::Optional<size_t> ProcessMemoryDump::CountResidentBytes(
void* start_address,
size_t mapped_size) {
const size_t page_size = GetSystemPageSize();
const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
DCHECK_EQ(0u, start_pointer % page_size);
@ -160,8 +161,8 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
DCHECK(!failure);
if (failure) {
total_resident_pages = 0;
LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
return base::nullopt;
}
return total_resident_pages;
}

@ -76,7 +76,8 @@ class BASE_EXPORT ProcessMemoryDump {
// |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
// value returned is valid only if the given range is currently mmapped by the
// process. The |start_address| must be page-aligned.
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
static base::Optional<size_t> CountResidentBytes(void* start_address,
size_t mapped_size);
// The same as above, but the given mapped range should belong to the
// shared_memory's mapped region.

@ -486,17 +486,20 @@ TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
const size_t size1 = 5 * page_size;
void* memory1 = Map(size1);
memset(memory1, 0, size1);
size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1, size1);
ASSERT_EQ(res1, size1);
base::Optional<size_t> res1 =
ProcessMemoryDump::CountResidentBytes(memory1, size1);
ASSERT_TRUE(res1.has_value());
ASSERT_EQ(res1.value(), size1);
Unmap(memory1, size1);
// Allocate a large memory segment (> 8Mib).
const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
void* memory2 = Map(kVeryLargeMemorySize);
memset(memory2, 0, kVeryLargeMemorySize);
size_t res2 =
base::Optional<size_t> res2 =
ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
ASSERT_EQ(res2, kVeryLargeMemorySize);
ASSERT_TRUE(res2.has_value());
ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
Unmap(memory2, kVeryLargeMemorySize);
}

@ -173,6 +173,8 @@ const Metric kAllocatorDumpNamesForMetrics[] = {
EmitTo::kSizeInUkmAndUma, &Memory_Experimental::SetDiscardable},
{"discardable", "Discardable.FreelistSize", MetricSize::kSmall,
"freelist_size", EmitTo::kSizeInUmaOnly, nullptr},
{"discardable", "Discardable.ResidentSize", MetricSize::kSmall,
"resident_size", EmitTo::kSizeInUmaOnly, nullptr},
{"discardable", "Discardable.VirtualSize", MetricSize::kSmall,
"virtual_size", EmitTo::kSizeInUmaOnly, nullptr},
{"extensions/functions", "ExtensionFunctions", MetricSize::kLarge,

@ -112,10 +112,8 @@ ClientDiscardableSharedMemoryManager::DiscardableMemoryImpl::
DCHECK(!is_locked());
return;
}
if (is_locked())
manager_->UnlockSpan(span_.get());
manager_->ReleaseMemory(this, std::move(span_));
manager_->UnlockAndReleaseMemory(this, std::move(span_));
}
bool ClientDiscardableSharedMemoryManager::DiscardableMemoryImpl::Lock() {
@ -537,9 +535,15 @@ void ClientDiscardableSharedMemoryManager::UnlockSpan(
return span->shared_memory()->Unlock(offset, length);
}
void ClientDiscardableSharedMemoryManager::ReleaseMemory(
void ClientDiscardableSharedMemoryManager::UnlockAndReleaseMemory(
DiscardableMemoryImpl* memory,
std::unique_ptr<DiscardableSharedMemoryHeap::Span> span) {
memory->manager_->lock_.AssertAcquired();
// lock_.AssertAcquired();
if (memory->is_locked()) {
UnlockSpan(span.get());
}
DCHECK(span);
auto removed = allocated_memory_.erase(memory);
DCHECK_EQ(removed, 1u);

@ -114,6 +114,7 @@ class DISCARDABLE_MEMORY_EXPORT ClientDiscardableSharedMemoryManager
bool is_purge_scheduled_ GUARDED_BY(lock_) = false;
private:
friend class TestClientDiscardableSharedMemoryManager;
class DiscardableMemoryImpl : public base::DiscardableMemory {
public:
DiscardableMemoryImpl(
@ -174,8 +175,9 @@ class DISCARDABLE_MEMORY_EXPORT ClientDiscardableSharedMemoryManager
// Releases all unlocked memory that was last locked at least |min_age| ago.
void PurgeUnlockedMemory(base::TimeDelta min_age);
void ReleaseFreeMemoryImpl();
void ReleaseMemory(DiscardableMemoryImpl* memory,
std::unique_ptr<DiscardableSharedMemoryHeap::Span> span)
void UnlockAndReleaseMemory(
DiscardableMemoryImpl* memory,
std::unique_ptr<DiscardableSharedMemoryHeap::Span> span)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void ReleaseSpan(std::unique_ptr<DiscardableSharedMemoryHeap::Span> span)
EXCLUSIVE_LOCKS_REQUIRED(lock_);

@ -10,15 +10,21 @@
#include <utility>
#include "base/bits.h"
#include "base/feature_list.h"
#include "base/format_macros.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/discardable_shared_memory.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
namespace discardable_memory {
const base::Feature kReleaseDiscardableFreeListPages{
"ReleaseDiscardableFreeListPages", base::FEATURE_DISABLED_BY_DEFAULT};
namespace {
bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) {
@ -142,6 +148,25 @@ void DiscardableSharedMemoryHeap::MergeIntoFreeLists(
// First add length of |span| to |num_free_blocks_|.
num_free_blocks_ += span->length_;
if (base::FeatureList::IsEnabled(kReleaseDiscardableFreeListPages)) {
SCOPED_UMA_HISTOGRAM_SHORT_TIMER("Memory.Discardable.FreeListReleaseTime");
// Release as much memory as possible before putting it into the freelists
// in order to reduce their size. Getting this memory back is still much
// cheaper than an IPC, while also saving us space in the freelists.
//
// The "+ 1" in the offset is for the SharedState that's at the start of
// the DiscardableSharedMemory. See DiscardableSharedMemory for details on
// what this is used for. We don't want to remove it, so we offset by an
// extra page.
size_t offset = (1 + span->start_) * base::GetPageSize() -
reinterpret_cast<size_t>(span->shared_memory()->memory());
// Since we always offset by at least one page because of the SharedState,
// our offset should never be 0.
DCHECK_GT(offset, 0u);
span->shared_memory()->ReleaseMemoryIfPossible(
offset, span->length_ * base::GetPageSize());
}
// Merge with previous span if possible.
auto prev_it = spans_.find(span->start_ - 1);
if (prev_it != spans_.end() && IsInFreeList(prev_it->second)) {
@ -250,6 +275,29 @@ size_t DiscardableSharedMemoryHeap::GetFreelistSize() const {
return num_free_blocks_ * block_size_;
}
base::Optional<size_t> DiscardableSharedMemoryHeap::GetResidentSize() const {
size_t resident_size = 0;
// Each member of |free_spans_| is a LinkedList of Spans. We need to iterate
// over each of these.
for (const base::LinkedList<Span>& span_list : free_spans_) {
for (base::LinkNode<Span>* curr = span_list.head(); curr != span_list.end();
curr = curr->next()) {
Span* free_span = curr->value();
// A given span over a piece of Shared Memory (which we will call
// |shared_memory|) has Span::start_ initialized to a value equivalent
// to reinterpret_cast<shared_memory->memory()) / block_size_.
void* mem = reinterpret_cast<void*>(free_span->start() * block_size_);
base::Optional<size_t> resident_in_span =
base::trace_event::ProcessMemoryDump::CountResidentBytes(
mem, free_span->length() * base::GetPageSize());
if (!resident_in_span)
return base::nullopt;
resident_size += resident_in_span.value();
}
}
return resident_size;
}
bool DiscardableSharedMemoryHeap::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
@ -275,6 +323,12 @@ bool DiscardableSharedMemoryHeap::OnMemoryDump(
total_dump->AddScalar("virtual_size",
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
total_size);
auto resident_size = GetResidentSize();
if (resident_size) {
total_dump->AddScalar("resident_size",
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
resident_size.value());
}
} else {
// This iterates over all the memory allocated by the heap, and calls
// |OnMemoryDump| for each. It does not contain any information about the
@ -292,6 +346,7 @@ void DiscardableSharedMemoryHeap::InsertIntoFreeList(
std::unique_ptr<DiscardableSharedMemoryHeap::Span> span) {
DCHECK(!IsInFreeList(span.get()));
size_t index = std::min(span->length_, base::size(free_spans_)) - 1;
free_spans_[index].Append(span.release());
}

@ -150,6 +150,8 @@ class DISCARDABLE_MEMORY_EXPORT DiscardableSharedMemoryHeap {
void ReleaseMemory(const base::DiscardableSharedMemory* shared_memory,
size_t size);
base::Optional<size_t> GetResidentSize() const;
// Dumps memory statistics about a memory segment for chrome://tracing.
void OnMemoryDump(const base::DiscardableSharedMemory* shared_memory,
size_t size,

@ -205,17 +205,21 @@ ResultExpr EvaluateSyscallImpl(int fs_denied_errno,
}
if (sysno == __NR_madvise) {
// Only allow MADV_DONTNEED, MADV_RANDOM, MADV_NORMAL and MADV_FREE.
// Only allow MADV_DONTNEED, MADV_RANDOM, MADV_REMOVE, MADV_NORMAL and
// MADV_FREE.
const Arg<int> advice(2);
return If(AnyOf(advice == MADV_DONTNEED,
advice == MADV_RANDOM,
return If(AnyOf(advice == MADV_DONTNEED, advice == MADV_RANDOM,
advice == MADV_REMOVE,
advice == MADV_NORMAL
#if defined(MADV_FREE)
// MADV_FREE was introduced in Linux 4.5 and started being
// defined in glibc 2.24.
, advice == MADV_FREE
,
advice == MADV_FREE
#endif
), Allow()).Else(Error(EPERM));
),
Allow())
.Else(Error(EPERM));
}
#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \

@ -14669,6 +14669,9 @@ reviews. Googlers can read more about this at go/gwsq-gerrit.
<histogram_suffixes name="ProcessMemoryAllocatorSmall2" separator=".">
<suffix name="Discardable.FreelistSize"
label="Freelist size used by ClientDiscardableMemoryManager."/>
<suffix name="Discardable.ResidentSize"
label="Amount of resident memory held by
ClientDiscardableSharedMemoryManager."/>
<suffix name="Discardable.VirtualSize"
label="Virtual memory used by ClientDiscardableMemoryManager."/>
<suffix name="DownloadService"

@ -388,6 +388,16 @@ reviews. Googlers can read more about this at go/gwsq-gerrit.
<summary>TBD.</summary>
</histogram>
<histogram name="Memory.Discardable.FreeListReleaseTime" units="ms"
expires_after="2021-08-01">
<owner>thiabaud@google.com</owner>
<owner>lizeb@chromium.org</owner>
<summary>
Records how long it takes for memory to be released from the freelist of the
discardable shared memory allocator with |madvise|.
</summary>
</histogram>
<histogram name="Memory.Discardable.FreelistSize.Foreground" units="KiB"
expires_after="2021-05-30">
<owner>thiabaud@google.com</owner>