0

Clean up ThreadLocalStorage::HasBeenDestroyed.

This CL has no intended behavior change. It addresses some comments from gab@ on
another CL that already landed:
https://chromium-review.googlesource.com/c/chromium/src/+/978393.

Bug: 825218
Change-Id: I78fee61253c6b885350774b774477060580984be
Reviewed-on: https://chromium-review.googlesource.com/998313
Commit-Queue: Erik Chen <erikchen@chromium.org>
Reviewed-by: Gabriel Charette <gab@chromium.org>
Cr-Commit-Position: refs/heads/master@{#552172}
This commit is contained in:
Erik Chen
2018-04-19 21:51:45 +00:00
committed by Commit Bot
parent 241f17e583
commit 5d005e82a9
3 changed files with 69 additions and 112 deletions
base/threading
components/services/heap_profiling/public/cpp

@ -90,7 +90,7 @@ base::subtle::Atomic32 g_native_tls_key =
// kDestroyed.
// b) All calls to Slot::Get() DCHECK that the state is not kDestroyed, and
// any system which might potentially invoke Slot::Get() after destruction
// of TLS must check ThreadLOcalStorage::ThreadIsBeingDestroyed().
// of TLS must check ThreadLocalStorage::ThreadIsBeingDestroyed().
// c) After a full pass of the pthread_keys, on the next invocation of
// ConstructTlsVector(), we'll then set the key to nullptr.
// d) At this stage, the TLS system is back in its uninitialized state.
@ -106,7 +106,7 @@ base::subtle::Atomic32 g_native_tls_key =
void* const kUninitialized = nullptr;
// A sentinel value to indicate that the TLS system has been destroyed.
void* const kDestroyed = reinterpret_cast<void*>(-3);
void* const kDestroyed = reinterpret_cast<void*>(1);
// The maximum number of slots in our thread local storage stack.
constexpr int kThreadLocalStorageSize = 256;

@ -19,7 +19,7 @@
#endif
namespace heap_profiling {
class MemlogAllocatorShimInternal;
class ScopedAllowLogging;
} // namespace heap_profiling
namespace base {
@ -156,7 +156,7 @@ class BASE_EXPORT ThreadLocalStorage {
friend class base::SamplingHeapProfiler;
friend class base::internal::ThreadLocalStorageTestInternal;
friend class base::trace_event::MallocDumpProvider;
friend class heap_profiling::MemlogAllocatorShimInternal;
friend class heap_profiling::ScopedAllowLogging;
static bool HasBeenDestroyed();
DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);

@ -45,35 +45,47 @@ using CaptureMode = base::trace_event::AllocationContextTracker::CaptureMode;
namespace heap_profiling {
namespace {
// In the very unlikely scenario where a thread has grabbed the SendBuffer lock,
// and then performs a heap allocation/free, ignore the allocation. Failing to
// do so will cause non-deterministic deadlock, depending on whether the
// allocation is dispatched to the same SendBuffer.
// A ScopedAllowLogging instance must be instantiated in the scope of all hooks.
// AllocatorShimLogAlloc/AllocatorShimLogFree must only be called if it
// evaluates to true.
//
// On macOS, this flag is also used to prevent double-counting during sampling.
// There are two reasons why logging may be disabled.
// 1) To prevent reentrancy from logging code.
// 2) During thread destruction, Chrome TLS has been destroyed and it can no
// longer be used to determine if reentrancy is occurring. Attempting to
// access Chrome TLS after it has been destroyed is disallowed.
//
// Failure to prevent reentrancy can cause non-deterministic deadlock. This
// happens if a thread has grabbed the SendBuffer lock, then performs a heap
// allocation/free, which in turn tries to grab the SendBuffer lock.
//
// On macOS, this guard is also used to prevent double-counting during sampling.
// The implementation of libmalloc will sometimes call malloc [from
// one zone to another] - without this flag, the allocation would get two
// one zone to another] - without this guard, the allocation would get two
// chances of being sampled.
base::LazyInstance<base::ThreadLocalBoolean>::Leaky g_prevent_reentrancy =
LAZY_INSTANCE_INITIALIZER;
} // namespace
// This class is friended by ThreadLocalStorage.
class MemlogAllocatorShimInternal {
class ScopedAllowLogging {
public:
static bool ShouldLogAllocationOnCurrentThread() {
// Thread is being destroyed and TLS is no longer available.
if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
return false;
// Prevent re-entrancy.
return !g_prevent_reentrancy.Pointer()->Get();
ScopedAllowLogging()
: allowed_(LIKELY(!base::ThreadLocalStorage::HasBeenDestroyed()) &&
LIKELY(!prevent_reentrancy_.Pointer()->Get())) {
if (allowed_)
prevent_reentrancy_.Pointer()->Set(true);
}
~ScopedAllowLogging() {
if (allowed_)
prevent_reentrancy_.Pointer()->Set(false);
}
explicit operator bool() const { return allowed_; }
private:
const bool allowed_;
static base::LazyInstance<base::ThreadLocalBoolean>::Leaky
prevent_reentrancy_;
};
base::LazyInstance<base::ThreadLocalBoolean>::Leaky
ScopedAllowLogging::prevent_reentrancy_;
namespace {
using base::allocator::AllocatorDispatch;
@ -151,15 +163,14 @@ void DestructShimState(void* shim_state) {
// Technically, this code could be called after Thread destruction and we would
// need to guard this with ThreadLocalStorage::HasBeenDestroyed(), but all calls
// to this are guarded behind ShouldLogAllocationOnCurrentThread, which already
// makes the check.
// to this are guarded behind ScopedAllowLogging, which already makes the check.
base::ThreadLocalStorage::Slot& ShimStateTLS() {
static base::NoDestructor<base::ThreadLocalStorage::Slot> shim_state_tls(
&DestructShimState);
return *shim_state_tls;
}
// We don't need to worry about re-entrancy because g_prevent_reentrancy
// We don't need to worry about re-entrancy because ScopedAllowLogging
// already guards against that.
ShimState* GetShimState() {
ShimState* state = static_cast<ShimState*>(ShimStateTLS().Get());
@ -302,19 +313,13 @@ void DoSend(const void* address,
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
void* ptr = next->alloc_function(next, size, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, size, nullptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
return ptr;
@ -324,19 +329,13 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, n * size, nullptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
return ptr;
}
@ -345,19 +344,13 @@ void* HookAllocAligned(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, size, nullptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
return ptr;
}
@ -366,39 +359,28 @@ void* HookRealloc(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
void* ptr = next->realloc_function(next, address, size, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address);
if (size > 0) // realloc(size == 0) means free()
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, size, nullptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
return ptr;
}
void HookFree(const AllocatorDispatch* self, void* address, void* context) {
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
next->free_function(next, address, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
@ -414,20 +396,15 @@ unsigned HookBatchMalloc(const AllocatorDispatch* self,
void** results,
unsigned num_requested,
void* context) {
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
unsigned count =
next->batch_malloc_function(next, size, results, num_requested, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
for (unsigned i = 0; i < count; ++i)
AllocatorShimLogAlloc(AllocatorType::kMalloc, results[i], size, nullptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
return count;
}
@ -436,19 +413,14 @@ void HookBatchFree(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
for (unsigned i = 0; i < num_to_be_freed; ++i)
AllocatorShimLogFree(to_be_freed[i]);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
@ -456,18 +428,13 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context) {
// If this is our first time passing through, set the reentrancy bit.
bool should_log =
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread();
if (LIKELY(should_log))
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
const AllocatorDispatch* const next = self->next;
next->free_definite_size_function(next, ptr, size, context);
if (LIKELY(should_log)) {
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(ptr);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
@ -486,40 +453,30 @@ AllocatorDispatch g_hooks = {
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
void HookPartitionAlloc(void* address, size_t size, const char* type) {
// If this is our first time passing through, set the reentrancy bit.
if (LIKELY(
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread())) {
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kPartitionAlloc, address, size, type);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
void HookPartitionFree(void* address) {
// If this is our first time passing through, set the reentrancy bit.
if (LIKELY(
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread())) {
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
void HookGCAlloc(uint8_t* address, size_t size, const char* type) {
if (LIKELY(
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread())) {
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kOilpan, address, size, type);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
void HookGCFree(uint8_t* address) {
if (LIKELY(
MemlogAllocatorShimInternal::ShouldLogAllocationOnCurrentThread())) {
g_prevent_reentrancy.Pointer()->Set(true);
ScopedAllowLogging allow_logging;
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address);
g_prevent_reentrancy.Pointer()->Set(false);
}
}
@ -624,7 +581,7 @@ class FrameSerializer {
} // namespace
void InitTLSSlot() {
ignore_result(g_prevent_reentrancy.Pointer()->Get());
{ ScopedAllowLogging allow_logging; }
ignore_result(ShimStateTLS());
}