[libsanitizer] merge from upstream r169371
From-SVN: r194221
This commit is contained in:
parent
cc4d934fa0
commit
a040845458
44 changed files with 858 additions and 339 deletions
|
@ -1,3 +1,7 @@
|
|||
2012-12-05 Kostya Serebryany <kcc@google.com>
|
||||
|
||||
* All files: Merge from upstream r169371.
|
||||
|
||||
2012-12-04 Kostya Serebryany <kcc@google.com>
|
||||
Jack Howarth <howarth@bromo.med.uc.edu>
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
168699
|
||||
169371
|
||||
|
||||
The first line of this file holds the svn revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -177,6 +177,8 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
|
|||
|
||||
#if ASAN_INTERCEPT___CXA_THROW
|
||||
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
|
||||
Printf("__asan's __cxa_throw %p; REAL(__cxa_throw) %p PLAIN %p\n",
|
||||
__interceptor___cxa_throw, REAL(__cxa_throw), __cxa_throw);
|
||||
CHECK(REAL(__cxa_throw));
|
||||
__asan_handle_no_return();
|
||||
REAL(__cxa_throw)(a, b, c);
|
||||
|
|
|
@ -159,6 +159,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4;
|
|||
const int kAsanStackAfterReturnMagic = 0xf5;
|
||||
const int kAsanInitializationOrderMagic = 0xf6;
|
||||
const int kAsanUserPoisonedMemoryMagic = 0xf7;
|
||||
const int kAsanStackUseAfterScopeMagic = 0xf8;
|
||||
const int kAsanGlobalRedzoneMagic = 0xf9;
|
||||
const int kAsanInternalHeapMagic = 0xfe;
|
||||
|
||||
|
|
|
@ -156,7 +156,9 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
|
|||
stack->trace[0] = pc;
|
||||
if ((max_s) > 1) {
|
||||
stack->max_size = max_s;
|
||||
#if defined(__arm__) || defined(__powerpc__) || defined(__powerpc64__)
|
||||
#if defined(__arm__) || \
|
||||
defined(__powerpc__) || defined(__powerpc64__) || \
|
||||
defined(__sparc__)
|
||||
_Unwind_Backtrace(Unwind_Trace, stack);
|
||||
// Pop off the two ASAN functions from the backtrace.
|
||||
stack->PopStackFrames(2);
|
||||
|
|
|
@ -149,3 +149,40 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
|
|||
bool __asan_address_is_poisoned(void const volatile *addr) {
|
||||
return __asan::AddressIsPoisoned((uptr)addr);
|
||||
}
|
||||
|
||||
// This is a simplified version of __asan_(un)poison_memory_region, which
|
||||
// assumes that left border of region to be poisoned is properly aligned.
|
||||
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
||||
if (size == 0) return;
|
||||
uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
|
||||
PoisonShadow(addr, aligned_size,
|
||||
do_poison ? kAsanStackUseAfterScopeMagic : 0);
|
||||
if (size == aligned_size)
|
||||
return;
|
||||
s8 end_offset = (s8)(size - aligned_size);
|
||||
s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
|
||||
s8 end_value = *shadow_end;
|
||||
if (do_poison) {
|
||||
// If possible, mark all the bytes mapping to last shadow byte as
|
||||
// unaddressable.
|
||||
if (end_value > 0 && end_value <= end_offset)
|
||||
*shadow_end = kAsanStackUseAfterScopeMagic;
|
||||
} else {
|
||||
// If necessary, mark few first bytes mapping to last shadow byte
|
||||
// as addressable
|
||||
if (end_value != 0)
|
||||
*shadow_end = Max(end_value, end_offset);
|
||||
}
|
||||
}
|
||||
|
||||
void __asan_poison_stack_memory(uptr addr, uptr size) {
|
||||
if (flags()->verbosity > 0)
|
||||
Report("poisoning: %p %zx\n", (void*)addr, size);
|
||||
PoisonAlignedStackMemory(addr, size, true);
|
||||
}
|
||||
|
||||
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
|
||||
if (flags()->verbosity > 0)
|
||||
Report("unpoisoning: %p %zx\n", (void*)addr, size);
|
||||
PoisonAlignedStackMemory(addr, size, false);
|
||||
}
|
||||
|
|
|
@ -455,6 +455,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
|
|||
case kAsanUserPoisonedMemoryMagic:
|
||||
bug_descr = "use-after-poison";
|
||||
break;
|
||||
case kAsanStackUseAfterScopeMagic:
|
||||
bug_descr = "stack-use-after-scope";
|
||||
break;
|
||||
case kAsanGlobalRedzoneMagic:
|
||||
bug_descr = "global-buffer-overflow";
|
||||
break;
|
||||
|
|
|
@ -246,6 +246,8 @@ static NOINLINE void force_interface_symbols() {
|
|||
case 34: __asan_malloc_hook(0, 0); break;
|
||||
case 35: __asan_free_hook(0); break;
|
||||
case 36: __asan_symbolize(0, 0, 0); break;
|
||||
case 37: __asan_poison_stack_memory(0, 0); break;
|
||||
case 38: __asan_unpoison_stack_memory(0, 0); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
//===-- asan_interceptors_dynamic.cc --------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
|
|
|
@ -64,6 +64,15 @@ extern "C" {
|
|||
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack)
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
// These two functions are used by instrumented code in the
|
||||
// use-after-scope mode. They mark memory for local variables as
|
||||
// unaddressable when they leave scope and addressable before the
|
||||
// function exits.
|
||||
void __asan_poison_stack_memory(uptr addr, uptr size)
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __asan_unpoison_stack_memory(uptr addr, uptr size)
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
// Marks memory region [addr, addr+size) as unaddressable.
|
||||
// This memory must be previously allocated by the user program. Accessing
|
||||
// addresses in this region from instrumented code is forbidden until
|
||||
|
|
|
@ -47,13 +47,6 @@ void InternalFree(void *addr) {
|
|||
LIBC_FREE(addr);
|
||||
}
|
||||
|
||||
void *InternalAllocBlock(void *p) {
|
||||
CHECK_NE(p, (void*)0);
|
||||
u64 *pp = (u64*)((uptr)p & ~0x7);
|
||||
for (; pp[0] != kBlockMagic; pp--) {}
|
||||
return pp + 1;
|
||||
}
|
||||
|
||||
// LowLevelAllocator
|
||||
static LowLevelAllocateCallback low_level_alloc_callback;
|
||||
|
||||
|
|
|
@ -1,25 +1,18 @@
|
|||
//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
|
||||
//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Specialized allocator which works only in 64-bit address space.
|
||||
// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
|
||||
// The main feature of this allocator is that the header is located far away
|
||||
// from the user memory region, so that the tool does not use extra shadow
|
||||
// for the header.
|
||||
//
|
||||
// Status: not yet ready.
|
||||
// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#define SANITIZER_ALLOCATOR_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#if SANITIZER_WORDSIZE != 64
|
||||
# error "sanitizer_allocator64.h can only be used on 64-bit platforms"
|
||||
#endif
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_list.h"
|
||||
|
@ -28,7 +21,10 @@
|
|||
namespace __sanitizer {
|
||||
|
||||
// Maps size class id to size and back.
|
||||
class DefaultSizeClassMap {
|
||||
template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
|
||||
uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
|
||||
uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
|
||||
class SplineSizeClassMap {
|
||||
private:
|
||||
// Here we use a spline composed of 5 polynomials of oder 1.
|
||||
// The first size class is l0, then the classes go with step s0
|
||||
|
@ -36,38 +32,20 @@ class DefaultSizeClassMap {
|
|||
// Steps should be powers of two for cheap division.
|
||||
// The size of the last size class should be a power of two.
|
||||
// There should be at most 256 size classes.
|
||||
static const uptr l0 = 1 << 4;
|
||||
static const uptr l1 = 1 << 9;
|
||||
static const uptr l2 = 1 << 12;
|
||||
static const uptr l3 = 1 << 15;
|
||||
static const uptr l4 = 1 << 18;
|
||||
static const uptr l5 = 1 << 21;
|
||||
|
||||
static const uptr s0 = 1 << 4;
|
||||
static const uptr s1 = 1 << 6;
|
||||
static const uptr s2 = 1 << 9;
|
||||
static const uptr s3 = 1 << 12;
|
||||
static const uptr s4 = 1 << 15;
|
||||
|
||||
static const uptr u0 = 0 + (l1 - l0) / s0;
|
||||
static const uptr u1 = u0 + (l2 - l1) / s1;
|
||||
static const uptr u2 = u1 + (l3 - l2) / s2;
|
||||
static const uptr u3 = u2 + (l4 - l3) / s3;
|
||||
static const uptr u4 = u3 + (l5 - l4) / s4;
|
||||
|
||||
// Max cached in local cache blocks.
|
||||
static const uptr c0 = 256;
|
||||
static const uptr c1 = 64;
|
||||
static const uptr c2 = 16;
|
||||
static const uptr c3 = 4;
|
||||
static const uptr c4 = 1;
|
||||
|
||||
public:
|
||||
// The number of size classes should be a power of two for fast division.
|
||||
static const uptr kNumClasses = u4 + 1;
|
||||
static const uptr kMaxSize = l5;
|
||||
static const uptr kMinSize = l0;
|
||||
|
||||
COMPILER_CHECK(kNumClasses <= 256);
|
||||
COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
|
||||
COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
|
||||
|
||||
static uptr Size(uptr class_id) {
|
||||
|
@ -97,13 +75,30 @@ class DefaultSizeClassMap {
|
|||
}
|
||||
};
|
||||
|
||||
class DefaultSizeClassMap: public SplineSizeClassMap<
|
||||
/* l: */1 << 4, 1 << 9, 1 << 12, 1 << 15, 1 << 18, 1 << 21,
|
||||
/* s: */1 << 4, 1 << 6, 1 << 9, 1 << 12, 1 << 15,
|
||||
/* c: */256, 64, 16, 4, 1> {
|
||||
private:
|
||||
COMPILER_CHECK(kNumClasses == 256);
|
||||
};
|
||||
|
||||
class CompactSizeClassMap: public SplineSizeClassMap<
|
||||
/* l: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12, 1 << 15,
|
||||
/* s: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12,
|
||||
/* c: */256, 64, 16, 4, 1> {
|
||||
private:
|
||||
COMPILER_CHECK(kNumClasses <= 32);
|
||||
};
|
||||
|
||||
struct AllocatorListNode {
|
||||
AllocatorListNode *next;
|
||||
};
|
||||
|
||||
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
|
||||
|
||||
|
||||
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
||||
//
|
||||
// Space: a portion of address space of kSpaceSize bytes starting at
|
||||
// a fixed address (kSpaceBeg). Both constants are powers of two and
|
||||
// kSpaceBeg is kSpaceSize-aligned.
|
||||
|
@ -217,14 +212,15 @@ class SizeClassAllocator64 {
|
|||
static uptr AllocBeg() { return kSpaceBeg; }
|
||||
static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
|
||||
|
||||
static const uptr kNumClasses = 256; // Power of two <= 256
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
|
||||
|
||||
private:
|
||||
static const uptr kRegionSize = kSpaceSize / kNumClasses;
|
||||
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
|
||||
COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
|
||||
static const uptr kRegionSize = kSpaceSize / kNumClasses;
|
||||
COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
|
||||
// kRegionSize must be >= 2^32.
|
||||
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
||||
// Populate the free list with at most this number of bytes at once
|
||||
// or with one element if its size is greater.
|
||||
static const uptr kPopulateSize = 1 << 18;
|
||||
|
@ -239,8 +235,9 @@ class SizeClassAllocator64 {
|
|||
COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
uptr res = sizeof(RegionInfo) * kNumClasses;
|
||||
CHECK_EQ(res % GetPageSizeCached(), 0);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
|
||||
CHECK_EQ(res % PageSize, 0);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -305,8 +302,10 @@ class SizeClassAllocator64 {
|
|||
// Objects of this type should be used as local caches for SizeClassAllocator64.
|
||||
// Since the typical use of this class is to have one object per thread in TLS,
|
||||
// is has to be POD.
|
||||
template<const uptr kNumClasses, class SizeClassAllocator>
|
||||
template<class SizeClassAllocator>
|
||||
struct SizeClassAllocatorLocalCache {
|
||||
typedef SizeClassAllocator Allocator;
|
||||
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
|
||||
// Don't need to call Init if the object is a global (i.e. zero-initialized).
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
|
@ -458,11 +457,13 @@ class LargeMmapAllocator {
|
|||
};
|
||||
|
||||
Header *GetHeader(uptr p) {
|
||||
CHECK_EQ(p % page_size_, 0);
|
||||
return reinterpret_cast<Header*>(p - page_size_);
|
||||
}
|
||||
Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
|
||||
|
||||
void *GetUser(Header *h) {
|
||||
CHECK_EQ((uptr)h % page_size_, 0);
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
||||
}
|
||||
|
|
@ -49,9 +49,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
|||
// Internal allocator
|
||||
void *InternalAlloc(uptr size);
|
||||
void InternalFree(void *p);
|
||||
// Given the pointer p into a valid allocated block,
|
||||
// returns a pointer to the beginning of the block.
|
||||
void *InternalAllocBlock(void *p);
|
||||
|
||||
// InternalScopedBuffer can be used instead of large stack arrays to
|
||||
// keep frame size low.
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
|
||||
|
@ -215,21 +216,60 @@ void ReExec() {
|
|||
}
|
||||
|
||||
// ----------------- sanitizer_procmaps.h
|
||||
// Linker initialized.
|
||||
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
|
||||
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
|
||||
|
||||
MemoryMappingLayout::MemoryMappingLayout() {
|
||||
proc_self_maps_buff_len_ =
|
||||
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
|
||||
&proc_self_maps_buff_mmaped_size_, 1 << 26);
|
||||
CHECK_GT(proc_self_maps_buff_len_, 0);
|
||||
// internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
|
||||
proc_self_maps_.len =
|
||||
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
|
||||
&proc_self_maps_.mmaped_size, 1 << 26);
|
||||
if (proc_self_maps_.mmaped_size == 0) {
|
||||
LoadFromCache();
|
||||
CHECK_GT(proc_self_maps_.len, 0);
|
||||
}
|
||||
// internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
|
||||
Reset();
|
||||
// FIXME: in the future we may want to cache the mappings on demand only.
|
||||
CacheMemoryMappings();
|
||||
}
|
||||
|
||||
MemoryMappingLayout::~MemoryMappingLayout() {
|
||||
UnmapOrDie(proc_self_maps_buff_, proc_self_maps_buff_mmaped_size_);
|
||||
// Only unmap the buffer if it is different from the cached one. Otherwise
|
||||
// it will be unmapped when the cache is refreshed.
|
||||
if (proc_self_maps_.data != cached_proc_self_maps_.data) {
|
||||
UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryMappingLayout::Reset() {
|
||||
current_ = proc_self_maps_buff_;
|
||||
current_ = proc_self_maps_.data;
|
||||
}
|
||||
|
||||
// static
|
||||
void MemoryMappingLayout::CacheMemoryMappings() {
|
||||
SpinMutexLock l(&cache_lock_);
|
||||
// Don't invalidate the cache if the mappings are unavailable.
|
||||
ProcSelfMapsBuff old_proc_self_maps;
|
||||
old_proc_self_maps = cached_proc_self_maps_;
|
||||
cached_proc_self_maps_.len =
|
||||
ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
|
||||
&cached_proc_self_maps_.mmaped_size, 1 << 26);
|
||||
if (cached_proc_self_maps_.mmaped_size == 0) {
|
||||
cached_proc_self_maps_ = old_proc_self_maps;
|
||||
} else {
|
||||
if (old_proc_self_maps.mmaped_size) {
|
||||
UnmapOrDie(old_proc_self_maps.data,
|
||||
old_proc_self_maps.mmaped_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryMappingLayout::LoadFromCache() {
|
||||
SpinMutexLock l(&cache_lock_);
|
||||
if (cached_proc_self_maps_.data) {
|
||||
proc_self_maps_ = cached_proc_self_maps_;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a hex value in str and update str.
|
||||
|
@ -263,7 +303,7 @@ static bool IsDecimal(char c) {
|
|||
|
||||
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
char filename[], uptr filename_size) {
|
||||
char *last = proc_self_maps_buff_ + proc_self_maps_buff_len_;
|
||||
char *last = proc_self_maps_.data + proc_self_maps_.len;
|
||||
if (current_ >= last) return false;
|
||||
uptr dummy;
|
||||
if (!start) start = &dummy;
|
||||
|
|
|
@ -160,6 +160,15 @@ void MemoryMappingLayout::Reset() {
|
|||
current_filetype_ = 0;
|
||||
}
|
||||
|
||||
// static
|
||||
void MemoryMappingLayout::CacheMemoryMappings() {
|
||||
// No-op on Mac for now.
|
||||
}
|
||||
|
||||
void MemoryMappingLayout::LoadFromCache() {
|
||||
// No-op on Mac for now.
|
||||
}
|
||||
|
||||
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
|
||||
// Google Perftools, http://code.google.com/p/google-perftools.
|
||||
|
||||
|
|
|
@ -167,7 +167,10 @@ void SetStackSizeLimitInBytes(uptr limit) {
|
|||
struct rlimit rlim;
|
||||
rlim.rlim_cur = limit;
|
||||
rlim.rlim_max = limit;
|
||||
CHECK_EQ(0, setrlimit(RLIMIT_STACK, &rlim));
|
||||
if (setrlimit(RLIMIT_STACK, &rlim)) {
|
||||
Report("setrlimit() failed %d\n", errno);
|
||||
Die();
|
||||
}
|
||||
CHECK(!StackSizeIsUnlimited());
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define SANITIZER_PROCMAPS_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
@ -27,6 +28,14 @@ class MemoryMappingLayout {
|
|||
};
|
||||
|
||||
#else // _WIN32
|
||||
#if defined(__linux__)
|
||||
struct ProcSelfMapsBuff {
|
||||
char *data;
|
||||
uptr mmaped_size;
|
||||
uptr len;
|
||||
};
|
||||
#endif // defined(__linux__)
|
||||
|
||||
class MemoryMappingLayout {
|
||||
public:
|
||||
MemoryMappingLayout();
|
||||
|
@ -37,9 +46,14 @@ class MemoryMappingLayout {
|
|||
// address 'addr'. Returns true on success.
|
||||
bool GetObjectNameAndOffset(uptr addr, uptr *offset,
|
||||
char filename[], uptr filename_size);
|
||||
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
|
||||
// to obtain the memory mappings. It should fall back to pre-cached data
|
||||
// instead of aborting.
|
||||
static void CacheMemoryMappings();
|
||||
~MemoryMappingLayout();
|
||||
|
||||
private:
|
||||
void LoadFromCache();
|
||||
// Default implementation of GetObjectNameAndOffset.
|
||||
// Quite slow, because it iterates through the whole process map for each
|
||||
// lookup.
|
||||
|
@ -71,10 +85,12 @@ class MemoryMappingLayout {
|
|||
}
|
||||
|
||||
# if defined __linux__
|
||||
char *proc_self_maps_buff_;
|
||||
uptr proc_self_maps_buff_mmaped_size_;
|
||||
uptr proc_self_maps_buff_len_;
|
||||
ProcSelfMapsBuff proc_self_maps_;
|
||||
char *current_;
|
||||
|
||||
// Static mappings cache.
|
||||
static ProcSelfMapsBuff cached_proc_self_maps_;
|
||||
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
|
||||
# elif defined __APPLE__
|
||||
template<u32 kLCSegment, typename SegmentCommand>
|
||||
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
|
||||
|
|
|
@ -34,6 +34,8 @@ static uptr patch_pc(uptr pc) {
|
|||
#if defined(__powerpc__) || defined(__powerpc64__)
|
||||
// PCs are always 4 byte aligned.
|
||||
return pc - 4;
|
||||
#elif defined(__sparc__)
|
||||
return pc - 8;
|
||||
#else
|
||||
return pc - 1;
|
||||
#endif
|
||||
|
|
|
@ -254,6 +254,17 @@ class Symbolizer {
|
|||
// Otherwise, the data was filled by external symbolizer.
|
||||
return actual_frames;
|
||||
}
|
||||
|
||||
bool SymbolizeData(uptr addr, AddressInfo *frame) {
|
||||
LoadedModule *module = FindModuleForAddress(addr);
|
||||
if (module == 0)
|
||||
return false;
|
||||
const char *module_name = module->full_name();
|
||||
uptr module_offset = addr - module->base_address();
|
||||
frame->FillAddressAndModuleInfo(addr, module_name, module_offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
|
||||
int input_fd, output_fd;
|
||||
if (!StartSymbolizerSubprocess(path_to_symbolizer, &input_fd, &output_fd))
|
||||
|
@ -305,6 +316,10 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
|
|||
return symbolizer.SymbolizeCode(address, frames, max_frames);
|
||||
}
|
||||
|
||||
bool SymbolizeData(uptr address, AddressInfo *frame) {
|
||||
return symbolizer.SymbolizeData(address, frame);
|
||||
}
|
||||
|
||||
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
|
||||
return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer);
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ struct AddressInfo {
|
|||
// of descriptions actually filled.
|
||||
// This function should NOT be called from two threads simultaneously.
|
||||
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
|
||||
bool SymbolizeData(uptr address, AddressInfo *frame);
|
||||
|
||||
// Starts external symbolizer program in a subprocess. Sanitizer communicates
|
||||
// with external symbolizer via pipes.
|
||||
|
|
|
@ -23,8 +23,12 @@
|
|||
namespace __tsan {
|
||||
|
||||
#ifdef TSAN_GO
|
||||
const bool kGoMode = true;
|
||||
const bool kCppMode = false;
|
||||
const char *const kTsanOptionsEnv = "GORACE";
|
||||
#else
|
||||
const bool kGoMode = false;
|
||||
const bool kCppMode = true;
|
||||
const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
|
||||
#endif
|
||||
|
||||
|
@ -122,11 +126,17 @@ T max(T a, T b) {
|
|||
}
|
||||
|
||||
template<typename T>
|
||||
T RoundUp(T p, int align) {
|
||||
T RoundUp(T p, u64 align) {
|
||||
DCHECK_EQ(align & (align - 1), 0);
|
||||
return (T)(((u64)p + align - 1) & ~(align - 1));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T RoundDown(T p, u64 align) {
|
||||
DCHECK_EQ(align & (align - 1), 0);
|
||||
return (T)((u64)p & ~(align - 1));
|
||||
}
|
||||
|
||||
struct MD5Hash {
|
||||
u64 hash[2];
|
||||
bool operator==(const MD5Hash &other) const;
|
||||
|
|
|
@ -38,6 +38,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
|||
f->enable_annotations = true;
|
||||
f->suppress_equal_stacks = true;
|
||||
f->suppress_equal_addresses = true;
|
||||
f->suppress_java = false;
|
||||
f->report_bugs = true;
|
||||
f->report_thread_leaks = true;
|
||||
f->report_destroy_locked = true;
|
||||
|
@ -46,7 +47,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
|||
f->strip_path_prefix = "";
|
||||
f->suppressions = "";
|
||||
f->exitcode = 66;
|
||||
f->log_fileno = kStderrFd;
|
||||
f->log_path = "stderr";
|
||||
f->atexit_sleep_ms = 1000;
|
||||
f->verbosity = 0;
|
||||
f->profile_memory = "";
|
||||
|
@ -54,6 +55,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
|||
f->stop_on_start = false;
|
||||
f->running_on_valgrind = false;
|
||||
f->external_symbolizer_path = "";
|
||||
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
|
||||
|
||||
// Let a frontend override.
|
||||
OverrideFlags(f);
|
||||
|
@ -62,6 +64,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
|||
ParseFlag(env, &f->enable_annotations, "enable_annotations");
|
||||
ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
|
||||
ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
|
||||
ParseFlag(env, &f->suppress_java, "suppress_java");
|
||||
ParseFlag(env, &f->report_bugs, "report_bugs");
|
||||
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
|
||||
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
|
||||
|
@ -70,19 +73,26 @@ void InitializeFlags(Flags *f, const char *env) {
|
|||
ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
|
||||
ParseFlag(env, &f->suppressions, "suppressions");
|
||||
ParseFlag(env, &f->exitcode, "exitcode");
|
||||
ParseFlag(env, &f->log_fileno, "log_fileno");
|
||||
ParseFlag(env, &f->log_path, "log_path");
|
||||
ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
|
||||
ParseFlag(env, &f->verbosity, "verbosity");
|
||||
ParseFlag(env, &f->profile_memory, "profile_memory");
|
||||
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
|
||||
ParseFlag(env, &f->stop_on_start, "stop_on_start");
|
||||
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
|
||||
ParseFlag(env, &f->history_size, "history_size");
|
||||
|
||||
if (!f->report_bugs) {
|
||||
f->report_thread_leaks = false;
|
||||
f->report_destroy_locked = false;
|
||||
f->report_signal_unsafe = false;
|
||||
}
|
||||
|
||||
if (f->history_size < 0 || f->history_size > 7) {
|
||||
Printf("ThreadSanitizer: incorrect value for history_size"
|
||||
" (must be [0..7])\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __tsan
|
||||
|
|
|
@ -29,6 +29,9 @@ struct Flags {
|
|||
// Supress a race report if we've already output another race report
|
||||
// on the same address.
|
||||
bool suppress_equal_addresses;
|
||||
// Suppress weird race reports that can be seen if JVM is embed
|
||||
// into the process.
|
||||
bool suppress_java;
|
||||
// Turns off bug reporting entirely (useful for benchmarking).
|
||||
bool report_bugs;
|
||||
// Report thread leaks at exit?
|
||||
|
@ -47,8 +50,10 @@ struct Flags {
|
|||
const char *suppressions;
|
||||
// Override exit status if something was reported.
|
||||
int exitcode;
|
||||
// Log fileno (1 - stdout, 2 - stderr).
|
||||
int log_fileno;
|
||||
// Write logs to "log_path.pid".
|
||||
// The special values are "stdout" and "stderr".
|
||||
// The default is "stderr".
|
||||
const char *log_path;
|
||||
// Sleep in main thread before exiting for that many ms
|
||||
// (useful to catch "at exit" races).
|
||||
int atexit_sleep_ms;
|
||||
|
@ -64,6 +69,12 @@ struct Flags {
|
|||
bool running_on_valgrind;
|
||||
// Path to external symbolizer.
|
||||
const char *external_symbolizer_path;
|
||||
// Per-thread history size, controls how many previous memory accesses
|
||||
// are remembered per thread. Possible values are [0..7].
|
||||
// history_size=0 amounts to 32K memory accesses. Each next value doubles
|
||||
// the amount of memory accesses, up to history_size=7 that amounts to
|
||||
// 4M memory accesses. The default value is 2 (128K memory accesses).
|
||||
int history_size;
|
||||
};
|
||||
|
||||
Flags *flags();
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "tsan_interceptors.h"
|
||||
#include "interception/interception.h"
|
||||
#include "tsan_interface.h"
|
||||
#include "tsan_platform.h"
|
||||
#include "tsan_rtl.h"
|
||||
|
@ -135,6 +135,15 @@ static SignalContext *SigCtx(ThreadState *thr) {
|
|||
|
||||
static unsigned g_thread_finalize_key;
|
||||
|
||||
class ScopedInterceptor {
|
||||
public:
|
||||
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
|
||||
~ScopedInterceptor();
|
||||
private:
|
||||
ThreadState *const thr_;
|
||||
const int in_rtl_;
|
||||
};
|
||||
|
||||
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
|
||||
uptr pc)
|
||||
: thr_(thr)
|
||||
|
@ -158,6 +167,30 @@ ScopedInterceptor::~ScopedInterceptor() {
|
|||
CHECK_EQ(in_rtl_, thr_->in_rtl);
|
||||
}
|
||||
|
||||
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
|
||||
ThreadState *thr = cur_thread(); \
|
||||
StatInc(thr, StatInterceptor); \
|
||||
StatInc(thr, StatInt_##func); \
|
||||
const uptr caller_pc = GET_CALLER_PC(); \
|
||||
ScopedInterceptor si(thr, #func, caller_pc); \
|
||||
/* Subtract one from pc as we need current instruction address */ \
|
||||
const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
|
||||
(void)pc; \
|
||||
/**/
|
||||
|
||||
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
|
||||
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
|
||||
if (REAL(func) == 0) { \
|
||||
Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
|
||||
Die(); \
|
||||
} \
|
||||
if (thr->in_rtl > 1) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
/**/
|
||||
|
||||
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
|
||||
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
|
||||
|
||||
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
|
||||
|
||||
struct BlockingCall {
|
||||
|
@ -259,7 +292,6 @@ static void finalize(void *arg) {
|
|||
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
|
||||
SCOPED_TSAN_INTERCEPTOR(atexit, f);
|
||||
return atexit_ctx->atexit(thr, pc, f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(void, longjmp, void *env, int val) {
|
||||
|
@ -308,6 +340,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
|
|||
return p;
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
|
||||
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
|
||||
return user_alloc(thr, pc, sz, align);
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
|
||||
void *p = 0;
|
||||
{
|
||||
|
@ -1347,6 +1384,35 @@ TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
|
|||
return REAL(gettimeofday)(tv, tz);
|
||||
}
|
||||
|
||||
// Linux kernel has a bug that leads to kernel deadlock if a process
|
||||
// maps TBs of memory and then calls mlock().
|
||||
static void MlockIsUnsupported() {
|
||||
static atomic_uint8_t printed;
|
||||
if (atomic_exchange(&printed, 1, memory_order_relaxed))
|
||||
return;
|
||||
Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
|
||||
MlockIsUnsupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
|
||||
MlockIsUnsupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(int, mlockall, int flags) {
|
||||
MlockIsUnsupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(int, munlockall, void) {
|
||||
MlockIsUnsupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
void ProcessPendingSignals(ThreadState *thr) {
|
||||
|
@ -1396,6 +1462,11 @@ void ProcessPendingSignals(ThreadState *thr) {
|
|||
thr->in_signal_handler = false;
|
||||
}
|
||||
|
||||
static void unreachable() {
|
||||
Printf("FATAL: ThreadSanitizer: unreachable called\n");
|
||||
Die();
|
||||
}
|
||||
|
||||
void InitializeInterceptors() {
|
||||
CHECK_GT(cur_thread()->in_rtl, 0);
|
||||
|
||||
|
@ -1408,6 +1479,7 @@ void InitializeInterceptors() {
|
|||
TSAN_INTERCEPT(siglongjmp);
|
||||
|
||||
TSAN_INTERCEPT(malloc);
|
||||
TSAN_INTERCEPT(__libc_memalign);
|
||||
TSAN_INTERCEPT(calloc);
|
||||
TSAN_INTERCEPT(realloc);
|
||||
TSAN_INTERCEPT(free);
|
||||
|
@ -1524,6 +1596,14 @@ void InitializeInterceptors() {
|
|||
TSAN_INTERCEPT(nanosleep);
|
||||
TSAN_INTERCEPT(gettimeofday);
|
||||
|
||||
TSAN_INTERCEPT(mlock);
|
||||
TSAN_INTERCEPT(munlock);
|
||||
TSAN_INTERCEPT(mlockall);
|
||||
TSAN_INTERCEPT(munlockall);
|
||||
|
||||
// Need to setup it, because interceptors check that the function is resolved.
|
||||
// But atexit is emitted directly into the module, so can't be resolved.
|
||||
REAL(atexit) = (int(*)(void(*)()))unreachable;
|
||||
atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
|
||||
AtExitContext();
|
||||
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
//===-- tsan_interceptors.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef TSAN_INTERCEPTORS_H
|
||||
#define TSAN_INTERCEPTORS_H
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "tsan_rtl.h"
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
class ScopedInterceptor {
|
||||
public:
|
||||
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
|
||||
~ScopedInterceptor();
|
||||
private:
|
||||
ThreadState *const thr_;
|
||||
const int in_rtl_;
|
||||
};
|
||||
|
||||
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
|
||||
ThreadState *thr = cur_thread(); \
|
||||
StatInc(thr, StatInterceptor); \
|
||||
StatInc(thr, StatInt_##func); \
|
||||
const uptr caller_pc = GET_CALLER_PC(); \
|
||||
ScopedInterceptor si(thr, #func, caller_pc); \
|
||||
/* Subtract one from pc as we need current instruction address */ \
|
||||
const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
|
||||
(void)pc; \
|
||||
/**/
|
||||
|
||||
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
|
||||
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
|
||||
if (thr->in_rtl > 1) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
/**/
|
||||
|
||||
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
|
||||
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
|
||||
|
||||
} // namespace __tsan
|
||||
|
||||
#endif // TSAN_INTERCEPTORS_H
|
|
@ -14,6 +14,8 @@
|
|||
#ifndef TSAN_INTERFACE_H
|
||||
#define TSAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
// This header should NOT include any other headers.
|
||||
// All functions in this header are extern "C" and start with __tsan_.
|
||||
|
||||
|
@ -23,27 +25,30 @@ extern "C" {
|
|||
|
||||
// This function should be called at the very beginning of the process,
|
||||
// before any instrumented code is executed and before any call to malloc.
|
||||
void __tsan_init();
|
||||
void __tsan_init() SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_read1(void *addr);
|
||||
void __tsan_read2(void *addr);
|
||||
void __tsan_read4(void *addr);
|
||||
void __tsan_read8(void *addr);
|
||||
void __tsan_read16(void *addr);
|
||||
void __tsan_read1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_read2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_read4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_read8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_read16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_write1(void *addr);
|
||||
void __tsan_write2(void *addr);
|
||||
void __tsan_write4(void *addr);
|
||||
void __tsan_write8(void *addr);
|
||||
void __tsan_write16(void *addr);
|
||||
void __tsan_write1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_write2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_write4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_write8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_write16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_vptr_update(void **vptr_p, void *new_val);
|
||||
void __tsan_vptr_update(void **vptr_p, void *new_val)
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_func_entry(void *call_pc);
|
||||
void __tsan_func_exit();
|
||||
void __tsan_func_entry(void *call_pc) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_func_exit() SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
|
||||
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
|
||||
void __tsan_read_range(void *addr, unsigned long size) // NOLINT
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_write_range(void *addr, unsigned long size) // NOLINT
|
||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "tsan_interface_ann.h"
|
||||
#include "tsan_mutex.h"
|
||||
|
@ -157,48 +158,50 @@ bool IsExpectedReport(uptr addr, uptr size) {
|
|||
using namespace __tsan; // NOLINT
|
||||
|
||||
extern "C" {
|
||||
void AnnotateHappensBefore(char *f, int l, uptr addr) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
|
||||
SCOPED_ANNOTATION(AnnotateHappensBefore);
|
||||
Release(cur_thread(), CALLERPC, addr);
|
||||
}
|
||||
|
||||
void AnnotateHappensAfter(char *f, int l, uptr addr) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
|
||||
SCOPED_ANNOTATION(AnnotateHappensAfter);
|
||||
Acquire(cur_thread(), CALLERPC, addr);
|
||||
}
|
||||
|
||||
void AnnotateCondVarSignal(char *f, int l, uptr cv) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
|
||||
SCOPED_ANNOTATION(AnnotateCondVarSignal);
|
||||
}
|
||||
|
||||
void AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
|
||||
SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
|
||||
}
|
||||
|
||||
void AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
|
||||
SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
|
||||
}
|
||||
|
||||
void AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
|
||||
uptr lock) {
|
||||
SCOPED_ANNOTATION(AnnotateCondVarWait);
|
||||
}
|
||||
|
||||
void AnnotateRWLockCreate(char *f, int l, uptr m) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockCreate);
|
||||
MutexCreate(thr, pc, m, true, true, false);
|
||||
}
|
||||
|
||||
void AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
|
||||
MutexCreate(thr, pc, m, true, true, true);
|
||||
}
|
||||
|
||||
void AnnotateRWLockDestroy(char *f, int l, uptr m) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockDestroy);
|
||||
MutexDestroy(thr, pc, m);
|
||||
}
|
||||
|
||||
void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
|
||||
uptr is_w) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockAcquired);
|
||||
if (is_w)
|
||||
MutexLock(thr, pc, m);
|
||||
|
@ -206,7 +209,8 @@ void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
|
|||
MutexReadLock(thr, pc, m);
|
||||
}
|
||||
|
||||
void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
|
||||
uptr is_w) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockReleased);
|
||||
if (is_w)
|
||||
MutexUnlock(thr, pc, m);
|
||||
|
@ -214,19 +218,20 @@ void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
|
|||
MutexReadUnlock(thr, pc, m);
|
||||
}
|
||||
|
||||
void AnnotateTraceMemory(char *f, int l, uptr mem) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
|
||||
SCOPED_ANNOTATION(AnnotateTraceMemory);
|
||||
}
|
||||
|
||||
void AnnotateFlushState(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateFlushState);
|
||||
}
|
||||
|
||||
void AnnotateNewMemory(char *f, int l, uptr mem, uptr size) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
|
||||
uptr size) {
|
||||
SCOPED_ANNOTATION(AnnotateNewMemory);
|
||||
}
|
||||
|
||||
void AnnotateNoOp(char *f, int l, uptr mem) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
|
||||
SCOPED_ANNOTATION(AnnotateNoOp);
|
||||
}
|
||||
|
||||
|
@ -238,7 +243,7 @@ static void ReportMissedExpectedRace(ExpectRace *race) {
|
|||
Printf("==================\n");
|
||||
}
|
||||
|
||||
void AnnotateFlushExpectedRaces(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
|
||||
Lock lock(&dyn_ann_ctx->mtx);
|
||||
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
|
||||
|
@ -253,32 +258,39 @@ void AnnotateFlushExpectedRaces(char *f, int l) {
|
|||
}
|
||||
}
|
||||
|
||||
void AnnotateEnableRaceDetection(char *f, int l, int enable) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
|
||||
char *f, int l, int enable) {
|
||||
SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
|
||||
// FIXME: Reconsider this functionality later. It may be irrelevant.
|
||||
}
|
||||
|
||||
void AnnotateMutexIsUsedAsCondVar(char *f, int l, uptr mu) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
|
||||
char *f, int l, uptr mu) {
|
||||
SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
|
||||
}
|
||||
|
||||
void AnnotatePCQGet(char *f, int l, uptr pcq) {
|
||||
void INTERFACE_ATTRIBUTE AnnotatePCQGet(
|
||||
char *f, int l, uptr pcq) {
|
||||
SCOPED_ANNOTATION(AnnotatePCQGet);
|
||||
}
|
||||
|
||||
void AnnotatePCQPut(char *f, int l, uptr pcq) {
|
||||
void INTERFACE_ATTRIBUTE AnnotatePCQPut(
|
||||
char *f, int l, uptr pcq) {
|
||||
SCOPED_ANNOTATION(AnnotatePCQPut);
|
||||
}
|
||||
|
||||
void AnnotatePCQDestroy(char *f, int l, uptr pcq) {
|
||||
void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
|
||||
char *f, int l, uptr pcq) {
|
||||
SCOPED_ANNOTATION(AnnotatePCQDestroy);
|
||||
}
|
||||
|
||||
void AnnotatePCQCreate(char *f, int l, uptr pcq) {
|
||||
void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
|
||||
char *f, int l, uptr pcq) {
|
||||
SCOPED_ANNOTATION(AnnotatePCQCreate);
|
||||
}
|
||||
|
||||
void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateExpectRace(
|
||||
char *f, int l, uptr mem, char *desc) {
|
||||
SCOPED_ANNOTATION(AnnotateExpectRace);
|
||||
Lock lock(&dyn_ann_ctx->mtx);
|
||||
AddExpectRace(&dyn_ann_ctx->expect,
|
||||
|
@ -286,7 +298,8 @@ void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
|
|||
DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
|
||||
}
|
||||
|
||||
static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
|
||||
static void BenignRaceImpl(
|
||||
char *f, int l, uptr mem, uptr size, char *desc) {
|
||||
Lock lock(&dyn_ann_ctx->mtx);
|
||||
AddExpectRace(&dyn_ann_ctx->benign,
|
||||
f, l, mem, size, desc);
|
||||
|
@ -294,69 +307,76 @@ static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
|
|||
}
|
||||
|
||||
// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
|
||||
void AnnotateBenignRaceSized(char *f, int l, uptr mem, uptr size, char *desc) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
|
||||
char *f, int l, uptr mem, uptr size, char *desc) {
|
||||
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
|
||||
BenignRaceImpl(f, l, mem, size, desc);
|
||||
}
|
||||
|
||||
void AnnotateBenignRace(char *f, int l, uptr mem, char *desc) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateBenignRace(
|
||||
char *f, int l, uptr mem, char *desc) {
|
||||
SCOPED_ANNOTATION(AnnotateBenignRace);
|
||||
BenignRaceImpl(f, l, mem, 1, desc);
|
||||
}
|
||||
|
||||
void AnnotateIgnoreReadsBegin(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
|
||||
IgnoreCtl(cur_thread(), false, true);
|
||||
}
|
||||
|
||||
void AnnotateIgnoreReadsEnd(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
|
||||
IgnoreCtl(cur_thread(), false, false);
|
||||
}
|
||||
|
||||
void AnnotateIgnoreWritesBegin(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
|
||||
IgnoreCtl(cur_thread(), true, true);
|
||||
}
|
||||
|
||||
void AnnotateIgnoreWritesEnd(char *f, int l) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
|
||||
SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
|
||||
IgnoreCtl(cur_thread(), true, false);
|
||||
IgnoreCtl(thr, true, false);
|
||||
}
|
||||
|
||||
void AnnotatePublishMemoryRange(char *f, int l, uptr addr, uptr size) {
|
||||
void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
|
||||
char *f, int l, uptr addr, uptr size) {
|
||||
SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
|
||||
}
|
||||
|
||||
void AnnotateUnpublishMemoryRange(char *f, int l, uptr addr, uptr size) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
|
||||
char *f, int l, uptr addr, uptr size) {
|
||||
SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
|
||||
}
|
||||
|
||||
void AnnotateThreadName(char *f, int l, char *name) {
|
||||
void INTERFACE_ATTRIBUTE AnnotateThreadName(
|
||||
char *f, int l, char *name) {
|
||||
SCOPED_ANNOTATION(AnnotateThreadName);
|
||||
ThreadSetName(thr, name);
|
||||
}
|
||||
|
||||
void WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
|
||||
void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
|
||||
SCOPED_ANNOTATION(AnnotateHappensBefore);
|
||||
}
|
||||
|
||||
void WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
|
||||
void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
|
||||
SCOPED_ANNOTATION(AnnotateHappensAfter);
|
||||
}
|
||||
|
||||
void WTFAnnotateBenignRaceSized(char *f, int l, uptr mem, uptr sz, char *desc) {
|
||||
void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
|
||||
char *f, int l, uptr mem, uptr sz, char *desc) {
|
||||
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
|
||||
}
|
||||
|
||||
int RunningOnValgrind() {
|
||||
int INTERFACE_ATTRIBUTE RunningOnValgrind() {
|
||||
return flags()->running_on_valgrind;
|
||||
}
|
||||
|
||||
double __attribute__((weak)) ValgrindSlowdown(void) {
|
||||
double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
|
||||
return 10.0;
|
||||
}
|
||||
|
||||
const char *ThreadSanitizerQuery(const char *query) {
|
||||
const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
|
||||
if (internal_strcmp(query, "pure_happens_before") == 0)
|
||||
return "1";
|
||||
else
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#ifndef TSAN_INTERFACE_ANN_H
|
||||
#define TSAN_INTERFACE_ANN_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
// This header should NOT include any other headers.
|
||||
// All functions in this header are extern "C" and start with __tsan_.
|
||||
|
||||
|
@ -19,8 +21,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
void __tsan_acquire(void *addr);
|
||||
void __tsan_release(void *addr);
|
||||
void __tsan_acquire(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
void __tsan_release(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
|
|
@ -112,34 +112,101 @@ static morder ConvertOrder(morder mo) {
|
|||
return mo;
|
||||
}
|
||||
|
||||
template<typename T> T func_xchg(T v, T op) {
|
||||
return op;
|
||||
template<typename T> T func_xchg(volatile T *v, T op) {
|
||||
return __sync_lock_test_and_set(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_add(T v, T op) {
|
||||
return v + op;
|
||||
template<typename T> T func_add(volatile T *v, T op) {
|
||||
return __sync_fetch_and_add(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_sub(T v, T op) {
|
||||
return v - op;
|
||||
template<typename T> T func_sub(volatile T *v, T op) {
|
||||
return __sync_fetch_and_sub(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_and(T v, T op) {
|
||||
return v & op;
|
||||
template<typename T> T func_and(volatile T *v, T op) {
|
||||
return __sync_fetch_and_and(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_or(T v, T op) {
|
||||
return v | op;
|
||||
template<typename T> T func_or(volatile T *v, T op) {
|
||||
return __sync_fetch_and_or(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_xor(T v, T op) {
|
||||
return v ^ op;
|
||||
template<typename T> T func_xor(volatile T *v, T op) {
|
||||
return __sync_fetch_and_xor(v, op);
|
||||
}
|
||||
|
||||
template<typename T> T func_nand(T v, T op) {
|
||||
return ~v & op;
|
||||
template<typename T> T func_nand(volatile T *v, T op) {
|
||||
// clang does not support __sync_fetch_and_nand.
|
||||
T cmp = *v;
|
||||
for (;;) {
|
||||
T newv = ~(cmp & op);
|
||||
T cur = __sync_val_compare_and_swap(v, cmp, newv);
|
||||
if (cmp == cur)
|
||||
return cmp;
|
||||
cmp = cur;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
|
||||
return __sync_val_compare_and_swap(v, cmp, xch);
|
||||
}
|
||||
|
||||
// clang does not support 128-bit atomic ops.
|
||||
// Atomic ops are executed under tsan internal mutex,
|
||||
// here we assume that the atomic variables are not accessed
|
||||
// from non-instrumented code.
|
||||
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
|
||||
a128 func_xchg(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_add(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = cmp + op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_sub(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = cmp - op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_and(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = cmp & op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_or(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = cmp | op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_xor(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = cmp ^ op;
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_nand(volatile a128 *v, a128 op) {
|
||||
a128 cmp = *v;
|
||||
*v = ~(cmp & op);
|
||||
return cmp;
|
||||
}
|
||||
|
||||
a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
|
||||
a128 cur = *v;
|
||||
if (cur == cmp)
|
||||
*v = xch;
|
||||
return cur;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define SCOPED_ATOMIC(func, ...) \
|
||||
mo = ConvertOrder(mo); \
|
||||
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
|
||||
|
@ -164,6 +231,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
|
|||
thr->clock.acquire(&s->clock);
|
||||
T v = *a;
|
||||
s->mtx.ReadUnlock();
|
||||
__sync_synchronize();
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -179,6 +247,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
|
|||
*a = v;
|
||||
return;
|
||||
}
|
||||
__sync_synchronize();
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->clock.ReleaseStore(&s->clock);
|
||||
|
@ -186,7 +255,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
|
|||
s->mtx.Unlock();
|
||||
}
|
||||
|
||||
template<typename T, T (*F)(T v, T op)>
|
||||
template<typename T, T (*F)(volatile T *v, T op)>
|
||||
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
|
@ -196,10 +265,9 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
|
|||
thr->clock.release(&s->clock);
|
||||
else if (IsAcquireOrder(mo))
|
||||
thr->clock.acquire(&s->clock);
|
||||
T c = *a;
|
||||
*a = F(c, v);
|
||||
v = F(a, v);
|
||||
s->mtx.Unlock();
|
||||
return c;
|
||||
return v;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -256,16 +324,13 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
|
|||
thr->clock.release(&s->clock);
|
||||
else if (IsAcquireOrder(mo))
|
||||
thr->clock.acquire(&s->clock);
|
||||
T cur = *a;
|
||||
bool res = false;
|
||||
if (cur == *c) {
|
||||
*a = v;
|
||||
res = true;
|
||||
} else {
|
||||
*c = cur;
|
||||
}
|
||||
T cc = *c;
|
||||
T pr = func_cas(a, cc, v);
|
||||
s->mtx.Unlock();
|
||||
return res;
|
||||
if (pr == cc)
|
||||
return true;
|
||||
*c = pr;
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
#ifndef TSAN_INTERFACE_ATOMIC_H
|
||||
#define TSAN_INTERFACE_ATOMIC_H
|
||||
|
||||
#ifndef INTERFACE_ATTRIBUTE
|
||||
# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -41,157 +45,159 @@ typedef enum {
|
|||
} __tsan_memory_order;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
|
||||
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
|
||||
void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#undef INTERFACE_ATTRIBUTE
|
||||
|
||||
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
|
||||
|
|
|
@ -10,10 +10,53 @@
|
|||
// Platform-specific code.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/*
|
||||
C++ linux memory layout:
|
||||
0000 0000 0000 - 03c0 0000 0000: protected
|
||||
03c0 0000 0000 - 1000 0000 0000: shadow
|
||||
1000 0000 0000 - 6000 0000 0000: protected
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7d00 0000 0000: -
|
||||
7d00 0000 0000 - 7e00 0000 0000: heap
|
||||
7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
|
||||
|
||||
C++ COMPAT linux memory layout:
|
||||
0000 0000 0000 - 0400 0000 0000: protected
|
||||
0400 0000 0000 - 1000 0000 0000: shadow
|
||||
1000 0000 0000 - 2900 0000 0000: protected
|
||||
2900 0000 0000 - 2c00 0000 0000: modules
|
||||
2c00 0000 0000 - 6000 0000 0000: -
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7d00 0000 0000: -
|
||||
7d00 0000 0000 - 7e00 0000 0000: heap
|
||||
7e00 0000 0000 - 7f00 0000 0000: -
|
||||
7f00 0000 0000 - 7fff ffff ffff: main thread stack
|
||||
|
||||
Go linux and darwin memory layout:
|
||||
0000 0000 0000 - 0000 1000 0000: executable
|
||||
0000 1000 0000 - 00f8 0000 0000: -
|
||||
00f8 0000 0000 - 0118 0000 0000: heap
|
||||
0118 0000 0000 - 1000 0000 0000: -
|
||||
1000 0000 0000 - 1460 0000 0000: shadow
|
||||
1460 0000 0000 - 6000 0000 0000: -
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7fff ffff ffff: -
|
||||
|
||||
Go windows memory layout:
|
||||
0000 0000 0000 - 0000 1000 0000: executable
|
||||
0000 1000 0000 - 00f8 0000 0000: -
|
||||
00f8 0000 0000 - 0118 0000 0000: heap
|
||||
0118 0000 0000 - 0100 0000 0000: -
|
||||
0100 0000 0000 - 0560 0000 0000: shadow
|
||||
0560 0000 0000 - 0760 0000 0000: traces
|
||||
0760 0000 0000 - 07ff ffff ffff: -
|
||||
*/
|
||||
|
||||
#ifndef TSAN_PLATFORM_H
|
||||
#define TSAN_PLATFORM_H
|
||||
|
||||
#include "tsan_rtl.h"
|
||||
#include "tsan_defs.h"
|
||||
#include "tsan_trace.h"
|
||||
|
||||
#if defined(__LP64__) || defined(_WIN64)
|
||||
namespace __tsan {
|
||||
|
@ -39,6 +82,13 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
|
|||
|
||||
static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
|
||||
|
||||
#if defined(_WIN32)
|
||||
const uptr kTraceMemBegin = 0x056000000000ULL;
|
||||
#else
|
||||
const uptr kTraceMemBegin = 0x600000000000ULL;
|
||||
#endif
|
||||
const uptr kTraceMemSize = 0x020000000000ULL;
|
||||
|
||||
// This has to be a macro to allow constant initialization of constants below.
|
||||
#ifndef TSAN_GO
|
||||
#define MemToShadow(addr) \
|
||||
|
@ -85,6 +135,12 @@ void FlushShadowMemory();
|
|||
|
||||
const char *InitializePlatform();
|
||||
void FinalizePlatform();
|
||||
void MapThreadTrace(uptr addr, uptr size);
|
||||
uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
|
||||
uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
|
||||
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
|
||||
return p;
|
||||
}
|
||||
|
||||
void internal_start_thread(void(*func)(void*), void *arg);
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ void InitializeShadowMemory() {
|
|||
const uptr kClosedLowBeg = 0x200000;
|
||||
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
|
||||
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
|
||||
const uptr kClosedMidEnd = kLinuxAppMemBeg - 1;
|
||||
const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
|
||||
ProtectRange(kClosedLowBeg, kClosedLowEnd);
|
||||
ProtectRange(kClosedMidBeg, kClosedMidEnd);
|
||||
DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
|
||||
|
@ -118,6 +118,16 @@ void InitializeShadowMemory() {
|
|||
}
|
||||
#endif
|
||||
|
||||
void MapThreadTrace(uptr addr, uptr size) {
|
||||
DPrintf("Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
|
||||
CHECK_GE(addr, kTraceMemBegin);
|
||||
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
|
||||
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
|
||||
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
static uptr g_data_start;
|
||||
static uptr g_data_end;
|
||||
|
||||
|
@ -190,28 +200,50 @@ static int InitTlsSize() {
|
|||
}
|
||||
#endif // #ifndef TSAN_GO
|
||||
|
||||
static rlim_t getlim(int res) {
|
||||
rlimit rlim;
|
||||
CHECK_EQ(0, getrlimit(res, &rlim));
|
||||
return rlim.rlim_cur;
|
||||
}
|
||||
|
||||
static void setlim(int res, rlim_t lim) {
|
||||
// The following magic is to prevent clang from replacing it with memset.
|
||||
volatile rlimit rlim;
|
||||
rlim.rlim_cur = lim;
|
||||
rlim.rlim_max = lim;
|
||||
setrlimit(res, (rlimit*)&rlim);
|
||||
}
|
||||
|
||||
const char *InitializePlatform() {
|
||||
void *p = 0;
|
||||
if (sizeof(p) == 8) {
|
||||
// Disable core dumps, dumping of 16TB usually takes a bit long.
|
||||
// The following magic is to prevent clang from replacing it with memset.
|
||||
volatile rlimit lim;
|
||||
lim.rlim_cur = 0;
|
||||
lim.rlim_max = 0;
|
||||
setrlimit(RLIMIT_CORE, (rlimit*)&lim);
|
||||
setlim(RLIMIT_CORE, 0);
|
||||
}
|
||||
bool reexec = false;
|
||||
// TSan doesn't play well with unlimited stack size (as stack
|
||||
// overlaps with shadow memory). If we detect unlimited stack size,
|
||||
// we re-exec the program with limited stack size as a best effort.
|
||||
if (StackSizeIsUnlimited()) {
|
||||
const uptr kMaxStackSize = 32 * 1024 * 1024; // 32 Mb
|
||||
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
|
||||
const uptr kMaxStackSize = 32 * 1024 * 1024;
|
||||
Report("WARNING: Program is run with unlimited stack size, which "
|
||||
"wouldn't work with ThreadSanitizer.\n");
|
||||
Report("Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize);
|
||||
SetStackSizeLimitInBytes(kMaxStackSize);
|
||||
ReExec();
|
||||
reexec = true;
|
||||
}
|
||||
|
||||
if (getlim(RLIMIT_AS) != (rlim_t)-1) {
|
||||
Report("WARNING: Program is run with limited virtual address space, which "
|
||||
"wouldn't work with ThreadSanitizer.\n");
|
||||
Report("Re-execing with unlimited virtual address space.\n");
|
||||
setlim(RLIMIT_AS, -1);
|
||||
reexec = true;
|
||||
}
|
||||
|
||||
if (reexec)
|
||||
ReExec();
|
||||
|
||||
#ifndef TSAN_GO
|
||||
CheckPIE();
|
||||
g_tls_size = (uptr)InitTlsSize();
|
||||
|
|
|
@ -24,6 +24,7 @@ ReportDesc::ReportDesc()
|
|||
}
|
||||
|
||||
ReportDesc::~ReportDesc() {
|
||||
// FIXME(dvyukov): it must be leaking a lot of memory.
|
||||
}
|
||||
|
||||
#ifndef TSAN_GO
|
||||
|
@ -78,8 +79,9 @@ static void PrintMop(const ReportMop *mop, bool first) {
|
|||
|
||||
static void PrintLocation(const ReportLocation *loc) {
|
||||
if (loc->type == ReportLocationGlobal) {
|
||||
Printf(" Location is global '%s' of size %zu at %zx %s:%d\n",
|
||||
loc->name, loc->size, loc->addr, loc->file, loc->line);
|
||||
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
|
||||
loc->name, loc->size, loc->addr, loc->file, loc->line,
|
||||
loc->module, loc->offset);
|
||||
} else if (loc->type == ReportLocationHeap) {
|
||||
Printf(" Location is heap block of size %zu at %p allocated",
|
||||
loc->size, loc->addr);
|
||||
|
@ -89,7 +91,7 @@ static void PrintLocation(const ReportLocation *loc) {
|
|||
Printf(" by thread %d:\n", loc->tid);
|
||||
PrintStack(loc->stack);
|
||||
} else if (loc->type == ReportLocationStack) {
|
||||
Printf(" Location is stack of thread %d:\n", loc->tid);
|
||||
Printf(" Location is stack of thread %d:\n\n", loc->tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -149,6 +151,10 @@ void PrintReport(const ReportDesc *rep) {
|
|||
#else
|
||||
|
||||
void PrintStack(const ReportStack *ent) {
|
||||
if (ent == 0) {
|
||||
Printf(" [failed to restore the stack]\n\n");
|
||||
return;
|
||||
}
|
||||
for (int i = 0; ent; ent = ent->next, i++) {
|
||||
Printf(" %s()\n %s:%d +0x%zx\n",
|
||||
ent->func, ent->file, ent->line, (void*)ent->offset);
|
||||
|
|
|
@ -56,6 +56,8 @@ struct ReportLocation {
|
|||
ReportLocationType type;
|
||||
uptr addr;
|
||||
uptr size;
|
||||
char *module;
|
||||
uptr offset;
|
||||
int tid;
|
||||
char *name;
|
||||
char *file;
|
||||
|
|
|
@ -82,7 +82,8 @@ ThreadContext::ThreadContext(int tid)
|
|||
, epoch0()
|
||||
, epoch1()
|
||||
, dead_info()
|
||||
, dead_next() {
|
||||
, dead_next()
|
||||
, name() {
|
||||
}
|
||||
|
||||
static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
|
||||
|
@ -189,7 +190,12 @@ void Initialize(ThreadState *thr) {
|
|||
ctx->dead_list_tail = 0;
|
||||
InitializeFlags(&ctx->flags, env);
|
||||
// Setup correct file descriptor for error reports.
|
||||
__sanitizer_set_report_fd(flags()->log_fileno);
|
||||
if (internal_strcmp(flags()->log_path, "stdout") == 0)
|
||||
__sanitizer_set_report_fd(kStdoutFd);
|
||||
else if (internal_strcmp(flags()->log_path, "stderr") == 0)
|
||||
__sanitizer_set_report_fd(kStderrFd);
|
||||
else
|
||||
__sanitizer_set_report_path(flags()->log_path);
|
||||
InitializeSuppressions();
|
||||
#ifndef TSAN_GO
|
||||
// Initialize external symbolizer before internal threads are started.
|
||||
|
@ -279,13 +285,27 @@ void TraceSwitch(ThreadState *thr) {
|
|||
thr->nomalloc++;
|
||||
ScopedInRtl in_rtl;
|
||||
Lock l(&thr->trace.mtx);
|
||||
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
|
||||
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
|
||||
TraceHeader *hdr = &thr->trace.headers[trace];
|
||||
hdr->epoch0 = thr->fast_state.epoch();
|
||||
hdr->stack0.ObtainCurrent(thr, 0);
|
||||
thr->nomalloc--;
|
||||
}
|
||||
|
||||
uptr TraceTopPC(ThreadState *thr) {
|
||||
Event *events = (Event*)GetThreadTrace(thr->tid);
|
||||
uptr pc = events[thr->fast_state.GetTracePos()];
|
||||
return pc;
|
||||
}
|
||||
|
||||
uptr TraceSize() {
|
||||
return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
|
||||
}
|
||||
|
||||
uptr TraceParts() {
|
||||
return TraceSize() / kTracePartSize;
|
||||
}
|
||||
|
||||
#ifndef TSAN_GO
|
||||
extern "C" void __tsan_trace_switch() {
|
||||
TraceSwitch(cur_thread());
|
||||
|
@ -342,7 +362,7 @@ static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
|
|||
}
|
||||
|
||||
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
|
||||
return thr->clock.get(old.tid()) >= old.epoch();
|
||||
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE
|
||||
|
@ -451,7 +471,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
|||
|
||||
// We must not store to the trace if we do not store to the shadow.
|
||||
// That is, this call must be moved somewhere below.
|
||||
TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
|
||||
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
|
||||
|
||||
MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
|
||||
shadow_mem, cur);
|
||||
|
@ -502,6 +522,7 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
|
|||
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
|
||||
MemoryAccessRange(thr, pc, addr, size, true);
|
||||
Shadow s(thr->fast_state);
|
||||
s.ClearIgnoreBit();
|
||||
s.MarkAsFreed();
|
||||
s.SetWrite(true);
|
||||
s.SetAddr0AndSizeLog(0, 3);
|
||||
|
@ -510,6 +531,7 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
|
|||
|
||||
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
|
||||
Shadow s(thr->fast_state);
|
||||
s.ClearIgnoreBit();
|
||||
s.SetWrite(true);
|
||||
s.SetAddr0AndSizeLog(0, 3);
|
||||
MemoryRangeSet(thr, pc, addr, size, s.raw());
|
||||
|
@ -521,7 +543,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
|
|||
StatInc(thr, StatFuncEnter);
|
||||
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
|
||||
|
||||
// Shadow stack maintenance can be replaced with
|
||||
// stack unwinding during trace switch (which presumably must be faster).
|
||||
|
@ -551,7 +573,7 @@ void FuncExit(ThreadState *thr) {
|
|||
StatInc(thr, StatFuncExit);
|
||||
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
|
||||
|
||||
DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
|
||||
#ifndef TSAN_GO
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#define TSAN_RTL_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_allocator64.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "tsan_clock.h"
|
||||
#include "tsan_defs.h"
|
||||
#include "tsan_flags.h"
|
||||
|
@ -33,6 +33,11 @@
|
|||
#include "tsan_trace.h"
|
||||
#include "tsan_vector.h"
|
||||
#include "tsan_report.h"
|
||||
#include "tsan_platform.h"
|
||||
|
||||
#if SANITIZER_WORDSIZE != 64
|
||||
# error "ThreadSanitizer is supported only on 64-bit platforms"
|
||||
#endif
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
|
@ -55,8 +60,7 @@ const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
|
|||
|
||||
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
|
||||
DefaultSizeClassMap> PrimaryAllocator;
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses,
|
||||
PrimaryAllocator> AllocatorCache;
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator SecondaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
SecondaryAllocator> Allocator;
|
||||
|
@ -67,18 +71,19 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
|
|||
u64 v1, u64 v2);
|
||||
|
||||
// FastState (from most significant bit):
|
||||
// unused : 1
|
||||
// ignore : 1
|
||||
// tid : kTidBits
|
||||
// epoch : kClkBits
|
||||
// unused : -
|
||||
// ignore_bit : 1
|
||||
// history_size : 3
|
||||
class FastState {
|
||||
public:
|
||||
FastState(u64 tid, u64 epoch) {
|
||||
x_ = tid << kTidShift;
|
||||
x_ |= epoch << kClkShift;
|
||||
DCHECK(tid == this->tid());
|
||||
DCHECK(epoch == this->epoch());
|
||||
DCHECK_EQ(tid, this->tid());
|
||||
DCHECK_EQ(epoch, this->epoch());
|
||||
DCHECK_EQ(GetIgnoreBit(), false);
|
||||
}
|
||||
|
||||
explicit FastState(u64 x)
|
||||
|
@ -90,6 +95,11 @@ class FastState {
|
|||
}
|
||||
|
||||
u64 tid() const {
|
||||
u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
|
||||
return res;
|
||||
}
|
||||
|
||||
u64 TidWithIgnore() const {
|
||||
u64 res = x_ >> kTidShift;
|
||||
return res;
|
||||
}
|
||||
|
@ -108,13 +118,34 @@ class FastState {
|
|||
|
||||
void SetIgnoreBit() { x_ |= kIgnoreBit; }
|
||||
void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
|
||||
bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
|
||||
bool GetIgnoreBit() const { return (s64)x_ < 0; }
|
||||
|
||||
void SetHistorySize(int hs) {
|
||||
CHECK_GE(hs, 0);
|
||||
CHECK_LE(hs, 7);
|
||||
x_ = (x_ & ~7) | hs;
|
||||
}
|
||||
|
||||
int GetHistorySize() const {
|
||||
return (int)(x_ & 7);
|
||||
}
|
||||
|
||||
void ClearHistorySize() {
|
||||
x_ &= ~7;
|
||||
}
|
||||
|
||||
u64 GetTracePos() const {
|
||||
const int hs = GetHistorySize();
|
||||
// When hs == 0, the trace consists of 2 parts.
|
||||
const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
|
||||
return epoch() & mask;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class Shadow;
|
||||
static const int kTidShift = 64 - kTidBits - 1;
|
||||
static const int kClkShift = kTidShift - kClkBits;
|
||||
static const u64 kIgnoreBit = 1ull;
|
||||
static const u64 kIgnoreBit = 1ull << 63;
|
||||
static const u64 kFreedBit = 1ull << 63;
|
||||
u64 x_;
|
||||
};
|
||||
|
@ -128,9 +159,14 @@ class FastState {
|
|||
// addr0 : 3
|
||||
class Shadow : public FastState {
|
||||
public:
|
||||
explicit Shadow(u64 x) : FastState(x) { }
|
||||
explicit Shadow(u64 x)
|
||||
: FastState(x) {
|
||||
}
|
||||
|
||||
explicit Shadow(const FastState &s) : FastState(s.x_) { }
|
||||
explicit Shadow(const FastState &s)
|
||||
: FastState(s.x_) {
|
||||
ClearHistorySize();
|
||||
}
|
||||
|
||||
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
|
||||
DCHECK_EQ(x_ & 31, 0);
|
||||
|
@ -152,7 +188,7 @@ class Shadow : public FastState {
|
|||
|
||||
static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
|
||||
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
|
||||
DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
|
||||
DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
|
||||
return shifted_xor == 0;
|
||||
}
|
||||
|
||||
|
@ -335,6 +371,7 @@ struct ThreadContext {
|
|||
StackTrace creation_stack;
|
||||
ThreadDeadInfo *dead_info;
|
||||
ThreadContext *dead_next; // In dead thread list.
|
||||
char *name; // As annotated by user.
|
||||
|
||||
explicit ThreadContext(int tid);
|
||||
};
|
||||
|
@ -491,6 +528,7 @@ int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
|
|||
void ThreadJoin(ThreadState *thr, uptr pc, int tid);
|
||||
void ThreadDetach(ThreadState *thr, uptr pc, int tid);
|
||||
void ThreadFinalize(ThreadState *thr);
|
||||
void ThreadSetName(ThreadState *thr, const char *name);
|
||||
int ThreadCount(ThreadState *thr);
|
||||
void ProcessPendingSignals(ThreadState *thr);
|
||||
|
||||
|
@ -531,19 +569,24 @@ void AfterSleep(ThreadState *thr, uptr pc);
|
|||
#endif
|
||||
|
||||
void TraceSwitch(ThreadState *thr);
|
||||
uptr TraceTopPC(ThreadState *thr);
|
||||
uptr TraceSize();
|
||||
uptr TraceParts();
|
||||
|
||||
extern "C" void __tsan_trace_switch();
|
||||
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
|
||||
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
||||
EventType typ, uptr addr) {
|
||||
StatInc(thr, StatEvents);
|
||||
if (UNLIKELY((epoch % kTracePartSize) == 0)) {
|
||||
u64 pos = fs.GetTracePos();
|
||||
if (UNLIKELY((pos % kTracePartSize) == 0)) {
|
||||
#ifndef TSAN_GO
|
||||
HACKY_CALL(__tsan_trace_switch);
|
||||
#else
|
||||
TraceSwitch(thr);
|
||||
#endif
|
||||
}
|
||||
Event *evp = &thr->trace.events[epoch % kTraceSize];
|
||||
Event *trace = (Event*)GetThreadTrace(fs.tid());
|
||||
Event *evp = &trace[pos];
|
||||
Event ev = (u64)addr | ((u64)typ << 61);
|
||||
*evp = ev;
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->owner_tid == SyncVar::kInvalidTid) {
|
||||
CHECK_EQ(s->recursion, 0);
|
||||
|
@ -105,7 +105,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->recursion == 0) {
|
||||
if (!s->is_broken) {
|
||||
|
@ -142,7 +142,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
|
||||
|
@ -162,7 +162,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
Printf("ThreadSanitizer WARNING: read unlock of a write "
|
||||
|
@ -186,7 +186,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
// Seems to be read unlock.
|
||||
StatInc(thr, StatMutexReadUnlock);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&s->read_clock);
|
||||
|
@ -203,7 +203,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
// First, it's a bug to increment the epoch w/o writing to the trace.
|
||||
// Then, the acquire/release logic can be factored out as well.
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.ReleaseStore(&s->clock);
|
||||
|
|
|
@ -123,8 +123,7 @@ ScopedReport::ScopedReport(ReportType typ) {
|
|||
|
||||
ScopedReport::~ScopedReport() {
|
||||
ctx_->report_mtx.Unlock();
|
||||
rep_->~ReportDesc();
|
||||
internal_free(rep_);
|
||||
DestroyAndFree(rep_);
|
||||
}
|
||||
|
||||
void ScopedReport::AddStack(const StackTrace *stack) {
|
||||
|
@ -156,6 +155,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
|
|||
rt->id = tctx->tid;
|
||||
rt->pid = tctx->os_id;
|
||||
rt->running = (tctx->status == ThreadStatusRunning);
|
||||
rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
|
||||
rt->stack = SymbolizeStack(tctx->creation_stack);
|
||||
}
|
||||
|
||||
|
@ -218,9 +218,11 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
|
|||
loc->type = ReportLocationGlobal;
|
||||
loc->addr = addr;
|
||||
loc->size = size;
|
||||
loc->module = symb->module ? internal_strdup(symb->module) : 0;
|
||||
loc->offset = symb->offset;
|
||||
loc->tid = 0;
|
||||
loc->name = symb->func;
|
||||
loc->file = symb->file;
|
||||
loc->name = symb->func ? internal_strdup(symb->func) : 0;
|
||||
loc->file = symb->file ? internal_strdup(symb->file) : 0;
|
||||
loc->line = symb->line;
|
||||
loc->stack = 0;
|
||||
internal_free(symb);
|
||||
|
@ -261,12 +263,12 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
|||
return;
|
||||
}
|
||||
Lock l(&trace->mtx);
|
||||
const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
|
||||
const int partidx = (epoch / kTracePartSize) % TraceParts();
|
||||
TraceHeader* hdr = &trace->headers[partidx];
|
||||
if (epoch < hdr->epoch0)
|
||||
return;
|
||||
const u64 eend = epoch % kTraceSize;
|
||||
const u64 ebegin = eend / kTracePartSize * kTracePartSize;
|
||||
const u64 eend = epoch % TraceSize();
|
||||
const u64 ebegin = RoundDown(eend, kTracePartSize);
|
||||
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
|
||||
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
|
||||
InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024
|
||||
|
@ -275,8 +277,9 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
|||
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
|
||||
}
|
||||
uptr pos = hdr->stack0.Size();
|
||||
Event *events = (Event*)GetThreadTrace(tid);
|
||||
for (uptr i = ebegin; i <= eend; i++) {
|
||||
Event ev = trace->events[i];
|
||||
Event ev = events[i];
|
||||
EventType typ = (EventType)(ev >> 61);
|
||||
uptr pc = (uptr)(ev & 0xffffffffffffull);
|
||||
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
|
||||
|
@ -382,6 +385,39 @@ bool IsFiredSuppression(Context *ctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
// On programs that use Java we see weird reports like:
|
||||
// WARNING: ThreadSanitizer: data race (pid=22512)
|
||||
// Read of size 8 at 0x7d2b00084318 by thread 100:
|
||||
// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
|
||||
// #1 <null> <null>:0 (0x7f7ad9b40193)
|
||||
// Previous write of size 8 at 0x7d2b00084318 by thread 105:
|
||||
// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
|
||||
// #1 <null> <null>:0 (0x7f7ad9b42707)
|
||||
static bool IsJavaNonsense(const ReportDesc *rep) {
|
||||
for (uptr i = 0; i < rep->mops.Size(); i++) {
|
||||
ReportMop *mop = rep->mops[i];
|
||||
ReportStack *frame = mop->stack;
|
||||
if (frame != 0 && frame->func != 0
|
||||
&& (internal_strcmp(frame->func, "memset") == 0
|
||||
|| internal_strcmp(frame->func, "memcpy") == 0
|
||||
|| internal_strcmp(frame->func, "strcmp") == 0
|
||||
|| internal_strcmp(frame->func, "strncpy") == 0
|
||||
|| internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
|
||||
frame = frame->next;
|
||||
if (frame == 0
|
||||
|| (frame->func == 0 && frame->file == 0 && frame->line == 0
|
||||
&& frame->module == 0)) {
|
||||
if (frame) {
|
||||
FiredSuppression supp = {rep->typ, frame->pc};
|
||||
CTX()->fired_suppressions.PushBack(supp);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ReportRace(ThreadState *thr) {
|
||||
if (!flags()->report_bugs)
|
||||
return;
|
||||
|
@ -414,8 +450,7 @@ void ReportRace(ThreadState *thr) {
|
|||
ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
|
||||
const uptr kMop = 2;
|
||||
StackTrace traces[kMop];
|
||||
const uptr toppc = thr->trace.events[thr->fast_state.epoch() % kTraceSize]
|
||||
& ((1ull << 61) - 1);
|
||||
const uptr toppc = TraceTopPC(thr);
|
||||
traces[0].ObtainCurrent(thr, toppc);
|
||||
if (IsFiredSuppression(ctx, rep, traces[0]))
|
||||
return;
|
||||
|
@ -430,6 +465,9 @@ void ReportRace(ThreadState *thr) {
|
|||
rep.AddMemoryAccess(addr, s, &traces[i]);
|
||||
}
|
||||
|
||||
if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
|
||||
return;
|
||||
|
||||
for (uptr i = 0; i < kMop; i++) {
|
||||
FastState s(thr->racy_state[i]);
|
||||
ThreadContext *tctx = ctx->threads[s.tid()];
|
||||
|
|
|
@ -96,6 +96,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|||
ThreadContext *tctx = 0;
|
||||
if (ctx->dead_list_size > kThreadQuarantineSize
|
||||
|| ctx->thread_seq >= kMaxTid) {
|
||||
// Reusing old thread descriptor and tid.
|
||||
if (ctx->dead_list_size == 0) {
|
||||
Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
|
||||
kMaxTid);
|
||||
|
@ -115,12 +116,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|||
tctx->sync.Reset();
|
||||
tid = tctx->tid;
|
||||
DestroyAndFree(tctx->dead_info);
|
||||
if (tctx->name) {
|
||||
internal_free(tctx->name);
|
||||
tctx->name = 0;
|
||||
}
|
||||
} else {
|
||||
// Allocating new thread descriptor and tid.
|
||||
StatInc(thr, StatThreadMaxTid);
|
||||
tid = ctx->thread_seq++;
|
||||
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
|
||||
tctx = new(mem) ThreadContext(tid);
|
||||
ctx->threads[tid] = tctx;
|
||||
MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
|
||||
}
|
||||
CHECK_NE(tctx, 0);
|
||||
CHECK_GE(tid, 0);
|
||||
|
@ -141,12 +148,11 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|||
if (tid) {
|
||||
thr->fast_state.IncrementEpoch();
|
||||
// Can't increment epoch w/o writing to the trace as well.
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&tctx->sync);
|
||||
StatInc(thr, StatSyncRelease);
|
||||
|
||||
tctx->creation_stack.ObtainCurrent(thr, pc);
|
||||
}
|
||||
return tid;
|
||||
|
@ -185,7 +191,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
|
|||
CHECK_EQ(tctx->status, ThreadStatusCreated);
|
||||
tctx->status = ThreadStatusRunning;
|
||||
tctx->os_id = os_id;
|
||||
tctx->epoch0 = tctx->epoch1 + 1;
|
||||
// RoundUp so that one trace part does not contain events
|
||||
// from different threads.
|
||||
tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize);
|
||||
tctx->epoch1 = (u64)-1;
|
||||
new(thr) ThreadState(CTX(), tid, tctx->unique_id,
|
||||
tctx->epoch0, stk_addr, stk_size,
|
||||
|
@ -202,6 +210,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
|
|||
thr->fast_synch_epoch = tctx->epoch0;
|
||||
thr->clock.set(tid, tctx->epoch0);
|
||||
thr->clock.acquire(&tctx->sync);
|
||||
thr->fast_state.SetHistorySize(flags()->history_size);
|
||||
const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts();
|
||||
thr->trace.headers[trace].epoch0 = tctx->epoch0;
|
||||
StatInc(thr, StatSyncAcquire);
|
||||
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
|
||||
"tls_addr=%zx tls_size=%zx\n",
|
||||
|
@ -236,7 +247,7 @@ void ThreadFinish(ThreadState *thr) {
|
|||
} else {
|
||||
thr->fast_state.IncrementEpoch();
|
||||
// Can't increment epoch w/o writing to the trace as well.
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&tctx->sync);
|
||||
|
@ -247,9 +258,8 @@ void ThreadFinish(ThreadState *thr) {
|
|||
// Save from info about the thread.
|
||||
tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
|
||||
ThreadDeadInfo();
|
||||
internal_memcpy(&tctx->dead_info->trace.events[0],
|
||||
&thr->trace.events[0], sizeof(thr->trace.events));
|
||||
for (int i = 0; i < kTraceParts; i++) {
|
||||
for (uptr i = 0; i < TraceParts(); i++) {
|
||||
tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
|
||||
tctx->dead_info->trace.headers[i].stack0.CopyFrom(
|
||||
thr->trace.headers[i].stack0);
|
||||
}
|
||||
|
@ -318,6 +328,20 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
|
|||
}
|
||||
}
|
||||
|
||||
void ThreadSetName(ThreadState *thr, const char *name) {
|
||||
Context *ctx = CTX();
|
||||
Lock l(&ctx->thread_mtx);
|
||||
ThreadContext *tctx = ctx->threads[thr->tid];
|
||||
CHECK_NE(tctx, 0);
|
||||
CHECK_EQ(tctx->status, ThreadStatusRunning);
|
||||
if (tctx->name) {
|
||||
internal_free(tctx->name);
|
||||
tctx->name = 0;
|
||||
}
|
||||
if (name)
|
||||
tctx->name = internal_strdup(name);
|
||||
}
|
||||
|
||||
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
||||
uptr size, bool is_write) {
|
||||
if (size == 0)
|
||||
|
@ -356,7 +380,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
|||
|
||||
fast_state.IncrementEpoch();
|
||||
thr->fast_state = fast_state;
|
||||
TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
|
||||
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
|
||||
|
||||
bool unaligned = (addr % kShadowCell) != 0;
|
||||
|
||||
|
|
|
@ -98,6 +98,7 @@ void StatOutput(u64 *stat) {
|
|||
name[StatInt_longjmp] = " longjmp ";
|
||||
name[StatInt_siglongjmp] = " siglongjmp ";
|
||||
name[StatInt_malloc] = " malloc ";
|
||||
name[StatInt___libc_memalign] = " __libc_memalign ";
|
||||
name[StatInt_calloc] = " calloc ";
|
||||
name[StatInt_realloc] = " realloc ";
|
||||
name[StatInt_free] = " free ";
|
||||
|
|
|
@ -95,6 +95,7 @@ enum StatType {
|
|||
StatInt_longjmp,
|
||||
StatInt_siglongjmp,
|
||||
StatInt_malloc,
|
||||
StatInt___libc_memalign,
|
||||
StatInt_calloc,
|
||||
StatInt_realloc,
|
||||
StatInt_free,
|
||||
|
|
|
@ -25,7 +25,7 @@ static char *ReadFile(const char *filename) {
|
|||
if (filename == 0 || filename[0] == 0)
|
||||
return 0;
|
||||
InternalScopedBuffer<char> tmp(4*1024);
|
||||
if (filename[0] == '/')
|
||||
if (filename[0] == '/' || GetPwd() == 0)
|
||||
internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
|
||||
else
|
||||
internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
|
||||
|
|
|
@ -50,7 +50,7 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) {
|
|||
}
|
||||
|
||||
ReportStack *SymbolizeCode(uptr addr) {
|
||||
if (0 != internal_strcmp(flags()->external_symbolizer_path, "")) {
|
||||
if (flags()->external_symbolizer_path[0]) {
|
||||
static const uptr kMaxAddrFrames = 16;
|
||||
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
|
||||
for (uptr i = 0; i < kMaxAddrFrames; i++)
|
||||
|
@ -77,6 +77,12 @@ ReportStack *SymbolizeCode(uptr addr) {
|
|||
}
|
||||
|
||||
ReportStack *SymbolizeData(uptr addr) {
|
||||
if (flags()->external_symbolizer_path[0]) {
|
||||
AddressInfo frame;
|
||||
if (!__sanitizer::SymbolizeData(addr, &frame))
|
||||
return 0;
|
||||
return NewReportStackEntry(frame);
|
||||
}
|
||||
return SymbolizeDataAddr2Line(addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,12 +17,9 @@
|
|||
|
||||
namespace __tsan {
|
||||
|
||||
#ifndef TSAN_HISTORY_SIZE // in kibitraces
|
||||
#define TSAN_HISTORY_SIZE 128
|
||||
#endif
|
||||
|
||||
const int kTracePartSize = 16 * 1024;
|
||||
const int kTraceParts = TSAN_HISTORY_SIZE * 1024 / kTracePartSize;
|
||||
const int kTracePartSizeBits = 14;
|
||||
const int kTracePartSize = 1 << kTracePartSizeBits;
|
||||
const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
|
||||
const int kTraceSize = kTracePartSize * kTraceParts;
|
||||
|
||||
// Must fit into 3 bits.
|
||||
|
@ -59,7 +56,6 @@ struct TraceHeader {
|
|||
};
|
||||
|
||||
struct Trace {
|
||||
Event events[kTraceSize];
|
||||
TraceHeader headers[kTraceParts];
|
||||
Mutex mtx;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue