summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2013-10-17 11:18:11 +0000
committerKostya Serebryany <kcc@google.com>2013-10-17 11:18:11 +0000
commit8fa4edcb169ab77355c77e065fc5387085787a8a (patch)
tree1a506894ffecdc14fe7b1053e1b6ec28eac13a5e
parentfc81346321671489e2864f0614bf44b6af5d9bdb (diff)
downloadcompiler-rt-8fa4edcb169ab77355c77e065fc5387085787a8a.tar.gz
compiler-rt-8fa4edcb169ab77355c77e065fc5387085787a8a.tar.bz2
compiler-rt-8fa4edcb169ab77355c77e065fc5387085787a8a.tar.xz
[asan] Fix a deadlock between asan's allocator and lsan
Summary: This fixes a deadlock which happens in lsan on a large memalign-allocated chunk that resides in lsan's root set. Reviewers: samsonov, earthdok Reviewed By: earthdok CC: llvm-commits Differential Revision: http://llvm-reviews.chandlerc.com/D1957 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@192885 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/asan/asan_allocator2.cc24
-rw-r--r--lib/asan/lit_tests/TestCases/assign_large_valloc_to_global.cc5
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h1
-rw-r--r--lib/sanitizer_common/sanitizer_mutex.h4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc2
5 files changed, 27 insertions, 9 deletions
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
index 4fd9d5a6..162a4262 100644
--- a/lib/asan/asan_allocator2.cc
+++ b/lib/asan/asan_allocator2.cc
@@ -186,14 +186,19 @@ COMPILER_CHECK(kChunkHeader2Size <= 16);
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize() {
+ uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
- return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
- }
- void *AllocBeg() {
- if (from_memalign)
+ return *reinterpret_cast<uptr *>(
+ allocator.GetMetaData(AllocBeg(locked_version)));
+ }
+ void *AllocBeg(bool locked_version = false) {
+ if (from_memalign) {
+ if (locked_version)
+ return allocator.GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(this));
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
+ }
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
// If we don't use stack depot, we store the alloc/free stack traces
@@ -213,8 +218,8 @@ struct AsanChunk: ChunkBase {
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
return (available - kChunkHeader2Size) / sizeof(u32);
}
- bool AddrIsInside(uptr addr) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize());
+ bool AddrIsInside(uptr addr, bool locked_version = false) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
@@ -722,7 +727,8 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
+ m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
return 0;
}
@@ -755,7 +761,7 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize();
+ return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {
diff --git a/lib/asan/lit_tests/TestCases/assign_large_valloc_to_global.cc b/lib/asan/lit_tests/TestCases/assign_large_valloc_to_global.cc
new file mode 100644
index 00000000..5275dd16
--- /dev/null
+++ b/lib/asan/lit_tests/TestCases/assign_large_valloc_to_global.cc
@@ -0,0 +1,5 @@
+// Make sure we don't report a leak nor hang.
+// RUN: %clangxx_asan -O3 %s -o %t && %t
+#include <malloc.h>
+int *p = (int*)valloc(1 << 20);
+int main() { }
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 9fdc39ba..0e99d298 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -1051,6 +1051,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.AssertHeld();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return 0;
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
index 469981c3..0f05f67f 100644
--- a/lib/sanitizer_common/sanitizer_mutex.h
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -40,6 +40,10 @@ class StaticSpinMutex {
atomic_store(&state_, 0, memory_order_release);
}
+ void AssertHeld() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
+ }
+
private:
atomic_uint8_t state_;
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 3541d439..d92a07fe 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -742,6 +742,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
allocated[i] = (char *)a.Allocate(&stats, size, 1);
}
+ a.ForceLock();
for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
// if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
char *p1 = allocated[i % kNumAllocs];
@@ -757,6 +758,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
p = reinterpret_cast<void *>(~0L - (i % 1024));
EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
}
+ a.ForceUnlock();
for (uptr i = 0; i < kNumAllocs; i++)
a.Deallocate(&stats, allocated[i]);