libsanitizer: merge from upstream (c425db2eb558c263)
The following patch is result of libsanitizer/merge.sh from c425db2eb558c263 (yesterday evening). Bootstrapped/regtested on x86_64-linux and i686-linux (together with the follow-up 3 patches I'm about to post). BTW, seems upstream has added riscv64 support for I think lsan/tsan, so if anyone is willing to try it there, it would be a matter of copying e.g. the s390*-*-linux* libsanitizer/configure.tgt entry to riscv64-*-linux* with the obvious s/s390x/riscv64/ change in it.
This commit is contained in:
parent
4d86dc51e3
commit
28219f7f99
192 changed files with 6794 additions and 2817 deletions
|
@ -1,4 +1,4 @@
|
|||
87e6e490e79384a523bc7f0216c3db60227d6d58
|
||||
c425db2eb558c26377edc04e062c0c1f999b2770
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "asan_allocator.h"
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
|
@ -24,6 +25,7 @@
|
|||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
@ -190,28 +192,56 @@ class LargeChunkHeader {
|
|||
}
|
||||
};
|
||||
|
||||
static void FillChunk(AsanChunk *m) {
|
||||
// FIXME: Use ReleaseMemoryPagesToOS.
|
||||
Flags &fl = *flags();
|
||||
|
||||
if (fl.max_free_fill_size > 0) {
|
||||
// We have to skip the chunk header, it contains free_context_id.
|
||||
uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
|
||||
if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
|
||||
uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
|
||||
size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
|
||||
REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct QuarantineCallback {
|
||||
QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
|
||||
: cache_(cache),
|
||||
stack_(stack) {
|
||||
}
|
||||
|
||||
void Recycle(AsanChunk *m) {
|
||||
void *p = get_allocator().GetBlockBegin(m);
|
||||
if (p != m) {
|
||||
// Clear the magic value, as allocator internals may overwrite the
|
||||
// contents of deallocated chunk, confusing GetAsanChunk lookup.
|
||||
reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
|
||||
}
|
||||
|
||||
u8 old_chunk_state = CHUNK_QUARANTINE;
|
||||
if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
|
||||
CHUNK_INVALID, memory_order_acquire)) {
|
||||
CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
|
||||
}
|
||||
|
||||
void PreQuarantine(AsanChunk *m) const {
|
||||
FillChunk(m);
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
kAsanHeapFreeMagic);
|
||||
}
|
||||
|
||||
void Recycle(AsanChunk *m) const {
|
||||
void *p = get_allocator().GetBlockBegin(m);
|
||||
|
||||
// The secondary will immediately unpoison and unmap the memory, so this
|
||||
// branch is unnecessary.
|
||||
if (get_allocator().FromPrimary(p)) {
|
||||
if (p != m) {
|
||||
// Clear the magic value, as allocator internals may overwrite the
|
||||
// contents of deallocated chunk, confusing GetAsanChunk lookup.
|
||||
reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
|
||||
}
|
||||
|
||||
u8 old_chunk_state = CHUNK_QUARANTINE;
|
||||
if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
|
||||
CHUNK_INVALID,
|
||||
memory_order_acquire)) {
|
||||
CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
|
||||
}
|
||||
|
||||
PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
|
@ -221,7 +251,17 @@ struct QuarantineCallback {
|
|||
get_allocator().Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) {
|
||||
void RecyclePassThrough(AsanChunk *m) const {
|
||||
// Recycle for the secondary will immediately unpoison and unmap the
|
||||
// memory, so quarantine preparation is unnecessary.
|
||||
if (get_allocator().FromPrimary(m)) {
|
||||
// The primary allocation may need pattern fill if enabled.
|
||||
FillChunk(m);
|
||||
}
|
||||
Recycle(m);
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) const {
|
||||
void *res = get_allocator().Allocate(cache_, size, 1);
|
||||
// TODO(alekseys): Consider making quarantine OOM-friendly.
|
||||
if (UNLIKELY(!res))
|
||||
|
@ -229,9 +269,7 @@ struct QuarantineCallback {
|
|||
return res;
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
get_allocator().Deallocate(cache_, p);
|
||||
}
|
||||
void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
|
||||
|
||||
private:
|
||||
AllocatorCache* const cache_;
|
||||
|
@ -248,6 +286,22 @@ void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
|
|||
thread_stats.mmaps++;
|
||||
thread_stats.mmaped += size;
|
||||
}
|
||||
|
||||
void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
|
||||
uptr user_size) const {
|
||||
uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
|
||||
user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
|
||||
// The secondary mapping will be immediately returned to user, no value
|
||||
// poisoning that with non-zero just before unpoisoning by Allocate(). So just
|
||||
// poison head/tail invisible to Allocate().
|
||||
PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
|
||||
PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.mmaps++;
|
||||
thread_stats.mmaped += size;
|
||||
}
|
||||
|
||||
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
|
||||
PoisonShadow(p, size, 0);
|
||||
// We are about to unmap a chunk of user memory.
|
||||
|
@ -387,8 +441,9 @@ struct Allocator {
|
|||
}
|
||||
|
||||
void GetOptions(AllocatorOptions *options) const {
|
||||
options->quarantine_size_mb = quarantine.GetSize() >> 20;
|
||||
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
|
||||
options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
|
||||
options->thread_local_quarantine_size_kb =
|
||||
quarantine.GetMaxCacheSize() >> 10;
|
||||
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
||||
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
||||
options->may_return_null = AllocatorMayReturnNull();
|
||||
|
@ -472,7 +527,7 @@ struct Allocator {
|
|||
// -------------------- Allocation/Deallocation routines ---------------
|
||||
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
|
||||
AllocType alloc_type, bool can_fill) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
AsanInitFromRtl();
|
||||
if (UNLIKELY(IsRssLimitExceeded())) {
|
||||
if (AllocatorMayReturnNull())
|
||||
|
@ -502,9 +557,10 @@ struct Allocator {
|
|||
uptr needed_size = rounded_size + rz_size;
|
||||
if (alignment > min_alignment)
|
||||
needed_size += alignment;
|
||||
bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
|
||||
// If we are allocating from the secondary allocator, there will be no
|
||||
// automatic right redzone, so add the right redzone manually.
|
||||
if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
|
||||
if (!from_primary)
|
||||
needed_size += rz_size;
|
||||
CHECK(IsAligned(needed_size, min_alignment));
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
|
||||
|
@ -536,15 +592,6 @@ struct Allocator {
|
|||
ReportOutOfMemory(size, stack);
|
||||
}
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
// chunk. This is possible if CanPoisonMemory() was false for some
|
||||
// time, for example, due to flags()->start_disabled.
|
||||
// Anyway, poison the block before using it for anything else.
|
||||
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
|
||||
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
|
||||
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
|
||||
uptr alloc_end = alloc_beg + needed_size;
|
||||
uptr user_beg = alloc_beg + rz_size;
|
||||
|
@ -561,6 +608,17 @@ struct Allocator {
|
|||
|
||||
m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
|
||||
|
||||
if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
|
||||
// The allocator provides an unpoisoned chunk. This is possible for the
|
||||
// secondary allocator, or if CanPoisonMemory() was false for some time,
|
||||
// for example, due to flags()->start_disabled. Anyway, poison left and
|
||||
// right of the block before using it for anything else.
|
||||
uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
|
||||
uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
|
||||
PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
|
||||
PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
|
||||
uptr size_rounded_down_to_granularity =
|
||||
RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
|
||||
// Unpoison the bulk of the memory region.
|
||||
|
@ -628,25 +686,6 @@ struct Allocator {
|
|||
AsanThread *t = GetCurrentThread();
|
||||
m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
|
||||
|
||||
Flags &fl = *flags();
|
||||
if (fl.max_free_fill_size > 0) {
|
||||
// We have to skip the chunk header, it contains free_context_id.
|
||||
uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
|
||||
if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
|
||||
uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
|
||||
size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
|
||||
REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
|
||||
}
|
||||
}
|
||||
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
|
||||
kAsanHeapFreeMagic);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.frees++;
|
||||
thread_stats.freed += m->UsedSize();
|
||||
|
||||
// Push into quarantine.
|
||||
if (t) {
|
||||
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
|
||||
|
@ -699,6 +738,10 @@ struct Allocator {
|
|||
}
|
||||
}
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.frees++;
|
||||
thread_stats.freed += m->UsedSize();
|
||||
|
||||
QuarantineChunk(m, ptr, stack);
|
||||
}
|
||||
|
||||
|
@ -798,6 +841,10 @@ struct Allocator {
|
|||
return m->UsedSize();
|
||||
}
|
||||
|
||||
uptr AllocationSizeFast(uptr p) {
|
||||
return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
|
||||
}
|
||||
|
||||
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||
AsanChunk *m1 = GetAsanChunkByAddr(addr);
|
||||
sptr offset = 0;
|
||||
|
@ -1198,6 +1245,13 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
|||
return allocated_size;
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_allocated_size_fast(const void *p) {
|
||||
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
|
||||
uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
|
||||
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
|
||||
return ret;
|
||||
}
|
||||
|
||||
const void *__sanitizer_get_allocated_begin(const void *p) {
|
||||
return AllocationBegin(p);
|
||||
}
|
||||
|
|
|
@ -114,32 +114,98 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
|
|||
|
||||
struct AsanMapUnmapCallback {
|
||||
void OnMap(uptr p, uptr size) const;
|
||||
void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;
|
||||
void OnUnmap(uptr p, uptr size) const;
|
||||
};
|
||||
|
||||
#if SANITIZER_CAN_USE_ALLOCATOR64
|
||||
# if SANITIZER_FUCHSIA
|
||||
// This is a sentinel indicating we do not want the primary allocator arena to
|
||||
// be placed at a fixed address. It will be anonymously mmap'd.
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# if SANITIZER_RISCV64
|
||||
|
||||
// These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +
|
||||
// Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,
|
||||
// libc, and scudo tests with this configuration.
|
||||
//
|
||||
// TODO: This is specifically tuned for Sv39. 48/57 will likely require other
|
||||
// tunings, or possibly use the same tunings Fuchsia uses for other archs. The
|
||||
// VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is
|
||||
// supported, we'd need a way of dynamically checking what the VMA size is and
|
||||
// determining optimal configuration.
|
||||
|
||||
// This indicates the total amount of space dedicated for the primary allocator
|
||||
// during initialization. This is roughly proportional to the size set by the
|
||||
// FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could
|
||||
// lead to some failures in sanitized bringup tests where we can't allocate new
|
||||
// vmars because there wouldn't be enough contiguous space. We could try 2^34 if
|
||||
// we re-evaluate the SizeClassMap settings.
|
||||
const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
|
||||
|
||||
// This is roughly equivalent to the configuration for the VeryDenseSizeClassMap
|
||||
// but has fewer size classes (ideally at most 32). Fewer class sizes means the
|
||||
// region size for each class is larger, thus less chances of running out of
|
||||
// space for each region. The main differences are the MidSizeLog (which is
|
||||
// smaller) and the MaxSizeLog (which is larger).
|
||||
//
|
||||
// - The MaxSizeLog is higher to allow some of the largest allocations I've
|
||||
// observed to be placed in the primary allocator's arena as opposed to being
|
||||
// mmap'd by the secondary allocator. This helps reduce fragmentation from
|
||||
// large classes. A huge example of this the scudo allocator tests (and its
|
||||
// testing infrastructure) which malloc's/new's objects on the order of
|
||||
// hundreds of kilobytes which normally would not be in the primary allocator
|
||||
// arena with the default VeryDenseSizeClassMap.
|
||||
// - The MidSizeLog is reduced to help shrink the number of size classes and
|
||||
// increase region size. Without this, we'd see ASan complain many times about
|
||||
// a region running out of available space.
|
||||
//
|
||||
// This differs a bit from the fuchsia config in scudo, mainly from the NumBits,
|
||||
// MaxSizeLog, and NumCachedHintT. This should place the number of size classes
|
||||
// for scudo at 45 and some large objects allocated by this config would be
|
||||
// placed in the arena whereas scudo would mmap them. The asan allocator needs
|
||||
// to have a number of classes that are a power of 2 for various internal things
|
||||
// to work, so we can't match the scudo settings to a tee. The sanitizer
|
||||
// allocator is slightly slower than scudo's but this is enough to get
|
||||
// memory-intensive scudo tests to run with asan instrumentation.
|
||||
typedef SizeClassMap</*kNumBits=*/2,
|
||||
/*kMinSizeLog=*/5,
|
||||
/*kMidSizeLog=*/8,
|
||||
/*kMaxSizeLog=*/18,
|
||||
/*kNumCachedHintT=*/8,
|
||||
/*kMaxBytesCachedLog=*/10>
|
||||
SizeClassMap;
|
||||
static_assert(SizeClassMap::kNumClassesRounded <= 32,
|
||||
"The above tunings were specifically selected to ensure there "
|
||||
"would be at most 32 size classes. This restriction could be "
|
||||
"loosened to 64 size classes if we can find a configuration of "
|
||||
"allocator size and SizeClassMap tunings that allows us to "
|
||||
"reliably run all bringup tests in a sanitized environment.");
|
||||
|
||||
# else
|
||||
// These are the default allocator tunings for non-RISCV environments where the
|
||||
// VMA is usually 48 bits and we have lots of space.
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif defined(__powerpc64__)
|
||||
# endif
|
||||
# elif defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif defined(__aarch64__) && SANITIZER_ANDROID
|
||||
# elif defined(__aarch64__) && SANITIZER_ANDROID
|
||||
// Android needs to support 39, 42 and 48 bit VMA.
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
|
||||
typedef VeryCompactSizeClassMap SizeClassMap;
|
||||
#elif SANITIZER_RISCV64
|
||||
# elif SANITIZER_RISCV64
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
|
||||
typedef VeryDenseSizeClassMap SizeClassMap;
|
||||
#elif defined(__sparc__)
|
||||
# elif defined(__sparc__)
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif SANITIZER_WINDOWS
|
||||
# elif SANITIZER_WINDOWS
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
|
|
|
@ -49,14 +49,14 @@ void DescribeThread(AsanThreadContext *context) {
|
|||
}
|
||||
context->announced = true;
|
||||
InternalScopedString str;
|
||||
str.append("Thread %s", AsanThreadIdAndName(context).c_str());
|
||||
str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
|
||||
if (context->parent_tid == kInvalidTid) {
|
||||
str.append(" created by unknown thread\n");
|
||||
str.Append(" created by unknown thread\n");
|
||||
Printf("%s", str.data());
|
||||
return;
|
||||
}
|
||||
str.append(" created by %s here:\n",
|
||||
AsanThreadIdAndName(context->parent_tid).c_str());
|
||||
str.AppendF(" created by %s here:\n",
|
||||
AsanThreadIdAndName(context->parent_tid).c_str());
|
||||
Printf("%s", str.data());
|
||||
StackDepotGet(context->stack_id).Print();
|
||||
// Recursively described parent thread if needed.
|
||||
|
@ -126,29 +126,29 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
|
|||
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
|
||||
Decorator d;
|
||||
InternalScopedString str;
|
||||
str.append("%s", d.Location());
|
||||
str.Append(d.Location());
|
||||
switch (descr.access_type) {
|
||||
case kAccessTypeLeft:
|
||||
str.append("%p is located %zd bytes before",
|
||||
(void *)descr.bad_addr, descr.offset);
|
||||
str.AppendF("%p is located %zd bytes before", (void *)descr.bad_addr,
|
||||
descr.offset);
|
||||
break;
|
||||
case kAccessTypeRight:
|
||||
str.append("%p is located %zd bytes after",
|
||||
(void *)descr.bad_addr, descr.offset);
|
||||
str.AppendF("%p is located %zd bytes after", (void *)descr.bad_addr,
|
||||
descr.offset);
|
||||
break;
|
||||
case kAccessTypeInside:
|
||||
str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
|
||||
descr.offset);
|
||||
str.AppendF("%p is located %zd bytes inside of", (void *)descr.bad_addr,
|
||||
descr.offset);
|
||||
break;
|
||||
case kAccessTypeUnknown:
|
||||
str.append(
|
||||
str.AppendF(
|
||||
"%p is located somewhere around (this is AddressSanitizer bug!)",
|
||||
(void *)descr.bad_addr);
|
||||
}
|
||||
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
|
||||
(void *)descr.chunk_begin,
|
||||
(void *)(descr.chunk_begin + descr.chunk_size));
|
||||
str.append("%s", d.Default());
|
||||
str.AppendF(" %zu-byte region [%p,%p)\n", descr.chunk_size,
|
||||
(void *)descr.chunk_begin,
|
||||
(void *)(descr.chunk_begin + descr.chunk_size));
|
||||
str.Append(d.Default());
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
|
@ -243,24 +243,24 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
|
|||
pos_descr = "underflows";
|
||||
}
|
||||
InternalScopedString str;
|
||||
str.append(" [%zd, %zd)", var.beg, var_end);
|
||||
str.AppendF(" [%zd, %zd)", var.beg, var_end);
|
||||
// Render variable name.
|
||||
str.append(" '");
|
||||
str.AppendF(" '");
|
||||
for (uptr i = 0; i < var.name_len; ++i) {
|
||||
str.append("%c", var.name_pos[i]);
|
||||
str.AppendF("%c", var.name_pos[i]);
|
||||
}
|
||||
str.append("'");
|
||||
str.AppendF("'");
|
||||
if (var.line > 0) {
|
||||
str.append(" (line %zd)", var.line);
|
||||
str.AppendF(" (line %zd)", var.line);
|
||||
}
|
||||
if (pos_descr) {
|
||||
Decorator d;
|
||||
// FIXME: we may want to also print the size of the access here,
|
||||
// but in case of accesses generated by memset it may be confusing.
|
||||
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
|
||||
d.Location(), addr, pos_descr, d.Default());
|
||||
str.AppendF("%s <== Memory access at offset %zd %s this variable%s\n",
|
||||
d.Location(), addr, pos_descr, d.Default());
|
||||
} else {
|
||||
str.append("\n");
|
||||
str.AppendF("\n");
|
||||
}
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
@ -277,23 +277,23 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
|
|||
const __asan_global &g) {
|
||||
InternalScopedString str;
|
||||
Decorator d;
|
||||
str.append("%s", d.Location());
|
||||
str.Append(d.Location());
|
||||
if (addr < g.beg) {
|
||||
str.append("%p is located %zd bytes before", (void *)addr,
|
||||
g.beg - addr);
|
||||
str.AppendF("%p is located %zd bytes before", (void *)addr, g.beg - addr);
|
||||
} else if (addr + access_size > g.beg + g.size) {
|
||||
if (addr < g.beg + g.size) addr = g.beg + g.size;
|
||||
str.append("%p is located %zd bytes after", (void *)addr,
|
||||
addr - (g.beg + g.size));
|
||||
str.AppendF("%p is located %zd bytes after", (void *)addr,
|
||||
addr - (g.beg + g.size));
|
||||
} else {
|
||||
// Can it happen?
|
||||
str.append("%p is located %zd bytes inside of", (void *)addr, addr - g.beg);
|
||||
str.AppendF("%p is located %zd bytes inside of", (void *)addr,
|
||||
addr - g.beg);
|
||||
}
|
||||
str.append(" global variable '%s' defined in '",
|
||||
MaybeDemangleGlobalName(g.name));
|
||||
PrintGlobalLocation(&str, g);
|
||||
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
|
||||
str.append("%s", d.Default());
|
||||
str.AppendF(" global variable '%s' defined in '",
|
||||
MaybeDemangleGlobalName(g.name));
|
||||
PrintGlobalLocation(&str, g, /*print_module_name=*/false);
|
||||
str.AppendF("' (0x%zx) of size %zu\n", g.beg, g.size);
|
||||
str.Append(d.Default());
|
||||
PrintGlobalNameIfASCII(&str, g);
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
|
|
@ -362,8 +362,8 @@ void ErrorODRViolation::Print() {
|
|||
Printf("%s", d.Default());
|
||||
InternalScopedString g1_loc;
|
||||
InternalScopedString g2_loc;
|
||||
PrintGlobalLocation(&g1_loc, global1);
|
||||
PrintGlobalLocation(&g2_loc, global2);
|
||||
PrintGlobalLocation(&g1_loc, global1, /*print_module_name=*/true);
|
||||
PrintGlobalLocation(&g2_loc, global2, /*print_module_name=*/true);
|
||||
Printf(" [1] size=%zd '%s' %s\n", global1.size,
|
||||
MaybeDemangleGlobalName(global1.name), g1_loc.data());
|
||||
Printf(" [2] size=%zd '%s' %s\n", global2.size,
|
||||
|
@ -379,8 +379,8 @@ void ErrorODRViolation::Print() {
|
|||
"HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=detect_odr_violation=0\n");
|
||||
InternalScopedString error_msg;
|
||||
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
|
||||
MaybeDemangleGlobalName(global1.name), g1_loc.data());
|
||||
error_msg.AppendF("%s: global '%s' at %s", scariness.GetDescription(),
|
||||
MaybeDemangleGlobalName(global1.name), g1_loc.data());
|
||||
ReportErrorSummary(error_msg.data());
|
||||
}
|
||||
|
||||
|
@ -517,15 +517,15 @@ static void PrintShadowByte(InternalScopedString *str, const char *before,
|
|||
}
|
||||
|
||||
static void PrintLegend(InternalScopedString *str) {
|
||||
str->append(
|
||||
str->AppendF(
|
||||
"Shadow byte legend (one shadow byte represents %d "
|
||||
"application bytes):\n",
|
||||
(int)ASAN_SHADOW_GRANULARITY);
|
||||
PrintShadowByte(str, " Addressable: ", 0);
|
||||
str->append(" Partially addressable: ");
|
||||
str->AppendF(" Partially addressable: ");
|
||||
for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++)
|
||||
PrintShadowByte(str, "", i, " ");
|
||||
str->append("\n");
|
||||
str->AppendF("\n");
|
||||
PrintShadowByte(str, " Heap left redzone: ",
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
|
||||
|
@ -559,8 +559,8 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
|||
u8 *bytes, u8 *guilty, uptr n) {
|
||||
Decorator d;
|
||||
if (before)
|
||||
str->append("%s%p:", before,
|
||||
(void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
|
||||
str->AppendF("%s%p:", before,
|
||||
(void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
u8 *p = bytes + i;
|
||||
const char *before =
|
||||
|
@ -568,7 +568,7 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
|||
const char *after = p == guilty ? "]" : "";
|
||||
PrintShadowByte(str, before, *p, after);
|
||||
}
|
||||
str->append("\n");
|
||||
str->AppendF("\n");
|
||||
}
|
||||
|
||||
static void PrintShadowMemoryForAddress(uptr addr) {
|
||||
|
@ -577,7 +577,7 @@ static void PrintShadowMemoryForAddress(uptr addr) {
|
|||
const uptr n_bytes_per_row = 16;
|
||||
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
|
||||
InternalScopedString str;
|
||||
str.append("Shadow bytes around the buggy address:\n");
|
||||
str.AppendF("Shadow bytes around the buggy address:\n");
|
||||
for (int i = -5; i <= 5; i++) {
|
||||
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
|
||||
// Skip rows that would be outside the shadow range. This can happen when
|
||||
|
|
|
@ -68,8 +68,8 @@ void FakeStack::Destroy(int tid) {
|
|||
if (Verbosity() >= 2) {
|
||||
InternalScopedString str;
|
||||
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
|
||||
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
|
||||
NumberOfFrames(stack_size_log(), class_id));
|
||||
str.AppendF("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
|
||||
NumberOfFrames(stack_size_log(), class_id));
|
||||
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
|
||||
}
|
||||
uptr size = RequiredSize(stack_size_log_);
|
||||
|
@ -133,6 +133,12 @@ void FakeStack::HandleNoReturn() {
|
|||
needs_gc_ = true;
|
||||
}
|
||||
|
||||
// Hack: The statement below is not true if we take into account sigaltstack or
|
||||
// makecontext. It should be possible to make GC to discard wrong stack frame if
|
||||
// we use these tools. For now, let's support the simplest case and allow GC to
|
||||
// discard only frames from the default stack, assuming there is no buffer on
|
||||
// the stack which is used for makecontext or sigaltstack.
|
||||
//
|
||||
// When throw, longjmp or some such happens we don't call OnFree() and
|
||||
// as the result may leak one or more fake frames, but the good news is that
|
||||
// we are notified about all such events by HandleNoReturn().
|
||||
|
@ -140,6 +146,14 @@ void FakeStack::HandleNoReturn() {
|
|||
// We do it based on their 'real_stack' values -- everything that is lower
|
||||
// than the current real_stack is garbage.
|
||||
NOINLINE void FakeStack::GC(uptr real_stack) {
|
||||
AsanThread *curr_thread = GetCurrentThread();
|
||||
if (!curr_thread)
|
||||
return; // Try again when we have a thread.
|
||||
auto top = curr_thread->stack_top();
|
||||
auto bottom = curr_thread->stack_bottom();
|
||||
if (real_stack < bottom || real_stack > top)
|
||||
return; // Not the default stack.
|
||||
|
||||
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
|
||||
u8 *flags = GetFlags(stack_size_log(), class_id);
|
||||
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
|
||||
|
@ -147,8 +161,12 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
|
|||
if (flags[i] == 0) continue; // not allocated.
|
||||
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
|
||||
GetFrame(stack_size_log(), class_id, i));
|
||||
if (ff->real_stack < real_stack) {
|
||||
// GC only on the default stack.
|
||||
if (bottom < ff->real_stack && ff->real_stack < real_stack) {
|
||||
flags[i] = 0;
|
||||
// Poison the frame, so the any access will be reported as UAR.
|
||||
SetShadow(reinterpret_cast<uptr>(ff), BytesInSizeClass(class_id),
|
||||
class_id, kMagic8);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,11 +223,12 @@ static FakeStack *GetFakeStackFastAlways() {
|
|||
|
||||
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
|
||||
FakeStack *fs = GetFakeStackFast();
|
||||
if (!fs) return 0;
|
||||
uptr local_stack;
|
||||
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
|
||||
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
|
||||
if (!ff) return 0; // Out of fake stack.
|
||||
if (!fs)
|
||||
return 0;
|
||||
FakeFrame *ff =
|
||||
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
|
||||
if (!ff)
|
||||
return 0; // Out of fake stack.
|
||||
uptr ptr = reinterpret_cast<uptr>(ff);
|
||||
SetShadow(ptr, size, class_id, 0);
|
||||
return ptr;
|
||||
|
@ -219,9 +238,8 @@ static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
|
|||
FakeStack *fs = GetFakeStackFastAlways();
|
||||
if (!fs)
|
||||
return 0;
|
||||
uptr local_stack;
|
||||
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
|
||||
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
|
||||
FakeFrame *ff =
|
||||
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
|
||||
if (!ff)
|
||||
return 0; // Out of fake stack.
|
||||
uptr ptr = reinterpret_cast<uptr>(ff);
|
||||
|
|
|
@ -36,7 +36,6 @@ struct ListOfGlobals {
|
|||
};
|
||||
|
||||
static Mutex mu_for_globals;
|
||||
static LowLevelAllocator allocator_for_globals;
|
||||
static ListOfGlobals *list_of_all_globals;
|
||||
|
||||
static const int kDynamicInitGlobalsInitialCapacity = 512;
|
||||
|
@ -81,18 +80,19 @@ static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
|
|||
}
|
||||
|
||||
static void ReportGlobal(const Global &g, const char *prefix) {
|
||||
DataInfo info;
|
||||
bool symbolized = Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
|
||||
Report(
|
||||
"%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu "
|
||||
"%s Global[%p]: beg=%p size=%zu/%zu name=%s source=%s module=%s "
|
||||
"dyn_init=%zu "
|
||||
"odr_indicator=%p\n",
|
||||
prefix, (void *)&g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
|
||||
g.module_name, g.has_dynamic_init, (void *)g.odr_indicator);
|
||||
g.module_name, (symbolized ? info.module : "?"), g.has_dynamic_init,
|
||||
(void *)g.odr_indicator);
|
||||
|
||||
DataInfo info;
|
||||
Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
|
||||
if (info.line != 0) {
|
||||
if (symbolized && info.line != 0) {
|
||||
Report(" location: name=%s, %d\n", info.file, static_cast<int>(info.line));
|
||||
}
|
||||
else if (g.gcc_location != 0) {
|
||||
} else if (g.gcc_location != 0) {
|
||||
// Fallback to Global::gcc_location
|
||||
Report(" location: name=%s, %d\n", g.gcc_location->filename, g.gcc_location->line_no);
|
||||
}
|
||||
|
@ -158,6 +158,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
|||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
|
@ -182,7 +199,7 @@ static inline bool UseODRIndicator(const Global *g) {
|
|||
// This function may be called more than once for every global
|
||||
// so we store the globals in a map.
|
||||
static void RegisterGlobal(const Global *g) {
|
||||
CHECK(asan_inited);
|
||||
CHECK(AsanInited());
|
||||
if (flags()->report_globals >= 2)
|
||||
ReportGlobal(*g, "Added");
|
||||
CHECK(flags()->report_globals);
|
||||
|
@ -203,16 +220,18 @@ static void RegisterGlobal(const Global *g) {
|
|||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
|
||||
ListOfGlobals *l = new (GetGlobalLowLevelAllocator()) ListOfGlobals;
|
||||
l->g = g;
|
||||
l->next = list_of_all_globals;
|
||||
list_of_all_globals = l;
|
||||
if (g->has_dynamic_init) {
|
||||
if (!dynamic_init_globals) {
|
||||
dynamic_init_globals = new (allocator_for_globals) VectorOfGlobals;
|
||||
dynamic_init_globals = new (GetGlobalLowLevelAllocator()) VectorOfGlobals;
|
||||
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
|
||||
}
|
||||
DynInitGlobal dyn_global = { *g, false };
|
||||
|
@ -221,7 +240,7 @@ static void RegisterGlobal(const Global *g) {
|
|||
}
|
||||
|
||||
static void UnregisterGlobal(const Global *g) {
|
||||
CHECK(asan_inited);
|
||||
CHECK(AsanInited());
|
||||
if (flags()->report_globals >= 2)
|
||||
ReportGlobal(*g, "Removed");
|
||||
CHECK(flags()->report_globals);
|
||||
|
@ -277,24 +296,28 @@ void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
|
|||
if (c == '\0' || !IsASCII(c)) return;
|
||||
}
|
||||
if (*(char *)(g.beg + g.size - 1) != '\0') return;
|
||||
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
|
||||
(char *)g.beg);
|
||||
str->AppendF(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
|
||||
(char *)g.beg);
|
||||
}
|
||||
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
|
||||
bool print_module_name) {
|
||||
DataInfo info;
|
||||
Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
|
||||
|
||||
if (info.line != 0) {
|
||||
str->append("%s:%d", info.file, static_cast<int>(info.line));
|
||||
if (Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info) && info.line != 0) {
|
||||
str->AppendF("%s:%d", info.file, static_cast<int>(info.line));
|
||||
} else if (g.gcc_location != 0) {
|
||||
// Fallback to Global::gcc_location
|
||||
str->append("%s", g.gcc_location->filename ? g.gcc_location->filename : g.module_name);
|
||||
if (g.gcc_location->line_no) str->append(":%d", g.gcc_location->line_no);
|
||||
if (g.gcc_location->column_no) str->append(":%d", g.gcc_location->column_no);
|
||||
str->AppendF("%s", g.gcc_location->filename ? g.gcc_location->filename
|
||||
: g.module_name);
|
||||
if (g.gcc_location->line_no)
|
||||
str->AppendF(":%d", g.gcc_location->line_no);
|
||||
if (g.gcc_location->column_no)
|
||||
str->AppendF(":%d", g.gcc_location->column_no);
|
||||
} else {
|
||||
str->append("%s", g.module_name);
|
||||
str->AppendF("%s", g.module_name);
|
||||
}
|
||||
if (print_module_name && info.module)
|
||||
str->AppendF(" in %s", info.module);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
@ -348,7 +371,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
|
|||
Lock lock(&mu_for_globals);
|
||||
if (!global_registration_site_vector) {
|
||||
global_registration_site_vector =
|
||||
new (allocator_for_globals) GlobalRegistrationSiteVector;
|
||||
new (GetGlobalLowLevelAllocator()) GlobalRegistrationSiteVector;
|
||||
global_registration_site_vector->reserve(128);
|
||||
}
|
||||
GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
|
||||
|
@ -410,7 +433,7 @@ void __asan_before_dynamic_init(const char *module_name) {
|
|||
return;
|
||||
bool strict_init_order = flags()->strict_init_order;
|
||||
CHECK(module_name);
|
||||
CHECK(asan_inited);
|
||||
CHECK(AsanInited());
|
||||
Lock lock(&mu_for_globals);
|
||||
if (flags()->report_globals >= 3)
|
||||
Printf("DynInitPoison module: %s\n", module_name);
|
||||
|
@ -434,7 +457,7 @@ void __asan_after_dynamic_init() {
|
|||
!CanPoisonMemory() ||
|
||||
!dynamic_init_globals)
|
||||
return;
|
||||
CHECK(asan_inited);
|
||||
CHECK(AsanInited());
|
||||
Lock lock(&mu_for_globals);
|
||||
// FIXME: Optionally report that we're unpoisoning globals from a module.
|
||||
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
|
@ -20,7 +21,10 @@
|
|||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "asan_thread.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
// There is no general interception at all on Fuchsia.
|
||||
|
@ -84,12 +88,6 @@ using namespace __asan;
|
|||
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
|
||||
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
|
||||
#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
|
||||
AsanInterceptorContext _ctx = {#func}; \
|
||||
ctx = (void *)&_ctx; \
|
||||
(void) ctx; \
|
||||
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
|
||||
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
|
||||
ASAN_INTERCEPT_FUNC_VER(name, ver)
|
||||
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
|
||||
|
@ -98,15 +96,15 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
ASAN_WRITE_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
|
||||
ASAN_READ_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, func); \
|
||||
do { \
|
||||
if (asan_init_is_running) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
if (SANITIZER_APPLE && UNLIKELY(!asan_inited)) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
} while (false)
|
||||
# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, func); \
|
||||
do { \
|
||||
if (AsanInitIsRunning()) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
if (SANITIZER_APPLE && UNLIKELY(!AsanInited())) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
|
||||
do { \
|
||||
} while (false)
|
||||
|
@ -140,7 +138,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||
# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
|
||||
# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
|
||||
# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
||||
# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!AsanInited())
|
||||
# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
||||
if (AsanThread *t = GetCurrentThread()) { \
|
||||
*begin = t->tls_begin(); \
|
||||
|
@ -149,22 +147,46 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
*begin = *end = 0; \
|
||||
}
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
|
||||
ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
|
||||
template <class Mmap>
|
||||
static void* mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
|
||||
int prot, int flags, int fd, OFF64_T offset) {
|
||||
void *res = real_mmap(addr, length, prot, flags, fd, offset);
|
||||
if (length && res != (void *)-1) {
|
||||
const uptr beg = reinterpret_cast<uptr>(res);
|
||||
DCHECK(IsAligned(beg, GetPageSize()));
|
||||
SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
|
||||
// Only unpoison shadow if it's an ASAN managed address.
|
||||
if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
|
||||
PoisonShadow(beg, RoundUpTo(length, GetPageSize()), 0);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template <class Munmap>
|
||||
static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
|
||||
// We should not tag if munmap fail, but it's to late to tag after
|
||||
// real_munmap, as the pages could be mmaped by another thread.
|
||||
const uptr beg = reinterpret_cast<uptr>(addr);
|
||||
if (length && IsAligned(beg, GetPageSize())) {
|
||||
SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
|
||||
// Protect from unmapping the shadow.
|
||||
if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
|
||||
PoisonShadow(beg, rounded_length, 0);
|
||||
}
|
||||
return real_munmap(addr, length);
|
||||
}
|
||||
|
||||
# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
|
||||
fd, offset) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
|
||||
} while (false)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
|
||||
ASAN_MEMCPY_IMPL(ctx, to, from, size); \
|
||||
} while (false)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memset); \
|
||||
ASAN_MEMSET_IMPL(ctx, block, c, size); \
|
||||
# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
return munmap_interceptor(REAL(munmap), addr, sz); \
|
||||
} while (false)
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
|
@ -172,6 +194,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
__lsan::ScopedInterceptorDisabler disabler
|
||||
#endif
|
||||
|
||||
#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_ASAN_INITED()
|
||||
|
||||
#include "sanitizer_common/sanitizer_common_interceptors.inc"
|
||||
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
|
||||
|
||||
|
@ -196,23 +220,44 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread *)arg;
|
||||
SetCurrentThread(t);
|
||||
return t->ThreadStart(GetTid());
|
||||
auto self = GetThreadSelf();
|
||||
auto args = asanThreadArgRetval().GetArgs(self);
|
||||
t->ThreadStart(GetTid());
|
||||
|
||||
# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
__sanitizer_sigset_t sigset;
|
||||
t->GetStartData(sigset);
|
||||
SetSigProcMask(&sigset, nullptr);
|
||||
# endif
|
||||
|
||||
thread_return_t retval = (*args.routine)(args.arg_retval);
|
||||
asanThreadArgRetval().Finish(self, retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_create, void *thread,
|
||||
void *attr, void *(*start_routine)(void*), void *arg) {
|
||||
INTERCEPTOR(int, pthread_create, void *thread, void *attr,
|
||||
void *(*start_routine)(void *), void *arg) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
// Strict init-order checking is thread-hostile.
|
||||
if (flags()->strict_init_order)
|
||||
StopInitOrderChecking();
|
||||
GET_STACK_TRACE_THREAD;
|
||||
int detached = 0;
|
||||
if (attr)
|
||||
REAL(pthread_attr_getdetachstate)(attr, &detached);
|
||||
bool detached = [attr]() {
|
||||
int d = 0;
|
||||
return attr && !REAL(pthread_attr_getdetachstate)(attr, &d) &&
|
||||
IsStateDetached(d);
|
||||
}();
|
||||
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
|
||||
|
||||
__sanitizer_sigset_t sigset = {};
|
||||
# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
ScopedBlockSignals block(&sigset);
|
||||
# endif
|
||||
|
||||
AsanThread *t = AsanThread::Create(sigset, current_tid, &stack, detached);
|
||||
|
||||
int result;
|
||||
{
|
||||
|
@ -220,10 +265,13 @@ INTERCEPTOR(int, pthread_create, void *thread,
|
|||
// stored by pthread for future reuse even after thread destruction, and
|
||||
// the linked list it's stored in doesn't even hold valid pointers to the
|
||||
// objects, the latter are calculated by obscure pointer arithmetic.
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
#endif
|
||||
result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
|
||||
# endif
|
||||
asanThreadArgRetval().Create(detached, {start_routine, arg}, [&]() -> uptr {
|
||||
result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
|
||||
return result ? 0 : *(uptr *)(thread);
|
||||
});
|
||||
}
|
||||
if (result != 0) {
|
||||
// If the thread didn't start delete the AsanThread to avoid leaking it.
|
||||
|
@ -234,10 +282,52 @@ INTERCEPTOR(int, pthread_create, void *thread,
|
|||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return real_pthread_join(t, arg);
|
||||
INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
|
||||
int result;
|
||||
asanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_join)(thread, retval);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_detach, void *thread) {
|
||||
int result;
|
||||
asanThreadArgRetval().Detach((uptr)thread, [&]() {
|
||||
result = REAL(pthread_detach)(thread);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, pthread_exit, void *retval) {
|
||||
asanThreadArgRetval().Finish(GetThreadSelf(), retval);
|
||||
REAL(pthread_exit)(retval);
|
||||
}
|
||||
|
||||
# if ASAN_INTERCEPT_TRYJOIN
|
||||
INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
|
||||
int result;
|
||||
asanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_tryjoin_np)(thread, ret);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
# endif
|
||||
|
||||
# if ASAN_INTERCEPT_TIMEDJOIN
|
||||
INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
|
||||
const struct timespec *abstime) {
|
||||
int result;
|
||||
asanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
# endif
|
||||
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
|
||||
|
@ -388,7 +478,7 @@ INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
|
|||
#if ASAN_INTERCEPT_INDEX
|
||||
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
INTERCEPTOR(char*, index, const char *string, int c)
|
||||
ALIAS(WRAPPER_NAME(strchr));
|
||||
ALIAS(WRAP(strchr));
|
||||
# else
|
||||
# if SANITIZER_APPLE
|
||||
DECLARE_REAL(char*, index, const char *string, int c)
|
||||
|
@ -445,12 +535,12 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
|
|||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strcpy);
|
||||
#if SANITIZER_APPLE
|
||||
if (UNLIKELY(!asan_inited))
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return REAL(strcpy)(to, from);
|
||||
#endif
|
||||
// strcpy is called from malloc_default_purgeable_zone()
|
||||
// in __asan::ReplaceSystemAlloc() on Mac.
|
||||
if (asan_init_is_running) {
|
||||
if (AsanInitIsRunning()) {
|
||||
return REAL(strcpy)(to, from);
|
||||
}
|
||||
ENSURE_ASAN_INITED();
|
||||
|
@ -466,7 +556,8 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
|
|||
INTERCEPTOR(char*, strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return internal_strdup(s);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = internal_strlen(s);
|
||||
if (flags()->replace_str) {
|
||||
|
@ -484,7 +575,8 @@ INTERCEPTOR(char*, strdup, const char *s) {
|
|||
INTERCEPTOR(char*, __strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return internal_strdup(s);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = internal_strlen(s);
|
||||
if (flags()->replace_str) {
|
||||
|
@ -512,25 +604,41 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
|
|||
return REAL(strncpy)(to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strtol);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(strtol)(nptr, endptr, base);
|
||||
}
|
||||
template <typename Fn>
|
||||
static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
|
||||
char **endptr, int base)
|
||||
-> decltype(real(nullptr, nullptr, 0)) {
|
||||
if (!flags()->replace_str)
|
||||
return real(nptr, endptr, base);
|
||||
char *real_endptr;
|
||||
long result = REAL(strtol)(nptr, &real_endptr, base);
|
||||
auto res = real(nptr, &real_endptr, base);
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return result;
|
||||
return res;
|
||||
}
|
||||
|
||||
# define INTERCEPTOR_STRTO_BASE(ret_type, func) \
|
||||
INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
|
||||
void *ctx; \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, func); \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \
|
||||
}
|
||||
|
||||
INTERCEPTOR_STRTO_BASE(long, strtol)
|
||||
INTERCEPTOR_STRTO_BASE(long long, strtoll)
|
||||
|
||||
# if SANITIZER_GLIBC
|
||||
INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol)
|
||||
INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
|
||||
# endif
|
||||
|
||||
INTERCEPTOR(int, atoi, const char *nptr) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
|
||||
#if SANITIZER_APPLE
|
||||
if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
|
||||
#endif
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return REAL(atoi)(nptr);
|
||||
# endif
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(atoi)(nptr);
|
||||
|
@ -550,8 +658,9 @@ INTERCEPTOR(long, atol, const char *nptr) {
|
|||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atol);
|
||||
#if SANITIZER_APPLE
|
||||
if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
|
||||
#endif
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return REAL(atol)(nptr);
|
||||
# endif
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(atol)(nptr);
|
||||
|
@ -563,20 +672,6 @@ INTERCEPTOR(long, atol, const char *nptr) {
|
|||
return result;
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(strtoll)(nptr, endptr, base);
|
||||
}
|
||||
char *real_endptr;
|
||||
long long result = REAL(strtoll)(nptr, &real_endptr, base);
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(long long, atoll, const char *nptr) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
|
||||
|
@ -590,7 +685,6 @@ INTERCEPTOR(long long, atoll, const char *nptr) {
|
|||
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
|
||||
return result;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT || ASAN_INTERCEPT_ATEXIT
|
||||
static void AtCxaAtexit(void *unused) {
|
||||
|
@ -603,8 +697,9 @@ static void AtCxaAtexit(void *unused) {
|
|||
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
|
||||
void *dso_handle) {
|
||||
#if SANITIZER_APPLE
|
||||
if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
|
||||
#endif
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return REAL(__cxa_atexit)(func, arg, dso_handle);
|
||||
# endif
|
||||
ENSURE_ASAN_INITED();
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
|
@ -656,6 +751,7 @@ void InitializeAsanInterceptors() {
|
|||
static bool was_called_once;
|
||||
CHECK(!was_called_once);
|
||||
was_called_once = true;
|
||||
InitializePlatformInterceptors();
|
||||
InitializeCommonInterceptors();
|
||||
InitializeSignalInterceptors();
|
||||
|
||||
|
@ -674,11 +770,13 @@ void InitializeAsanInterceptors() {
|
|||
|
||||
ASAN_INTERCEPT_FUNC(atoi);
|
||||
ASAN_INTERCEPT_FUNC(atol);
|
||||
ASAN_INTERCEPT_FUNC(strtol);
|
||||
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
ASAN_INTERCEPT_FUNC(atoll);
|
||||
ASAN_INTERCEPT_FUNC(strtol);
|
||||
ASAN_INTERCEPT_FUNC(strtoll);
|
||||
#endif
|
||||
# if SANITIZER_GLIBC
|
||||
ASAN_INTERCEPT_FUNC(__isoc23_strtol);
|
||||
ASAN_INTERCEPT_FUNC(__isoc23_strtoll);
|
||||
# endif
|
||||
|
||||
// Intecept jump-related functions.
|
||||
ASAN_INTERCEPT_FUNC(longjmp);
|
||||
|
@ -722,6 +820,16 @@ void InitializeAsanInterceptors() {
|
|||
ASAN_INTERCEPT_FUNC(pthread_create);
|
||||
#endif
|
||||
ASAN_INTERCEPT_FUNC(pthread_join);
|
||||
ASAN_INTERCEPT_FUNC(pthread_detach);
|
||||
ASAN_INTERCEPT_FUNC(pthread_exit);
|
||||
# endif
|
||||
|
||||
# if ASAN_INTERCEPT_TIMEDJOIN
|
||||
ASAN_INTERCEPT_FUNC(pthread_timedjoin_np);
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT_TRYJOIN
|
||||
ASAN_INTERCEPT_FUNC(pthread_tryjoin_np);
|
||||
#endif
|
||||
|
||||
// Intercept atexit function.
|
||||
|
@ -741,8 +849,6 @@ void InitializeAsanInterceptors() {
|
|||
ASAN_INTERCEPT_FUNC(vfork);
|
||||
#endif
|
||||
|
||||
InitializePlatformInterceptors();
|
||||
|
||||
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -24,12 +24,12 @@ namespace __asan {
|
|||
void InitializeAsanInterceptors();
|
||||
void InitializePlatformInterceptors();
|
||||
|
||||
#define ENSURE_ASAN_INITED() \
|
||||
do { \
|
||||
CHECK(!asan_init_is_running); \
|
||||
if (UNLIKELY(!asan_inited)) { \
|
||||
AsanInitFromRtl(); \
|
||||
} \
|
||||
#define ENSURE_ASAN_INITED() \
|
||||
do { \
|
||||
CHECK(!AsanInitIsRunning()); \
|
||||
if (UNLIKELY(!AsanInited())) { \
|
||||
AsanInitFromRtl(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
} // namespace __asan
|
||||
|
@ -42,12 +42,10 @@ void InitializePlatformInterceptors();
|
|||
// Use macro to describe if specific function should be
|
||||
// intercepted on a given platform.
|
||||
#if !SANITIZER_WINDOWS
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
|
||||
# define ASAN_INTERCEPT__LONGJMP 1
|
||||
# define ASAN_INTERCEPT_INDEX 1
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
|
||||
# define ASAN_INTERCEPT__LONGJMP 0
|
||||
# define ASAN_INTERCEPT_INDEX 0
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
|
||||
|
@ -78,15 +76,10 @@ void InitializePlatformInterceptors();
|
|||
# define ASAN_INTERCEPT___LONGJMP_CHK 0
|
||||
#endif
|
||||
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \
|
||||
(!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__)))
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# endif
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
|
@ -117,6 +110,14 @@ void InitializePlatformInterceptors();
|
|||
# define ASAN_INTERCEPT___STRDUP 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_GLIBC && ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
# define ASAN_INTERCEPT_TIMEDJOIN 1
|
||||
# define ASAN_INTERCEPT_TRYJOIN 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_TIMEDJOIN 0
|
||||
# define ASAN_INTERCEPT_TRYJOIN 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && \
|
||||
(defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
|
||||
defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
|
||||
|
@ -163,6 +164,12 @@ DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
|
|||
# define ASAN_INTERCEPT_FUNC(name)
|
||||
# endif // SANITIZER_APPLE
|
||||
|
||||
#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
|
||||
AsanInterceptorContext _ctx = {#func}; \
|
||||
ctx = (void *)&_ctx; \
|
||||
(void) ctx;
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
|
||||
#endif // ASAN_INTERCEPTORS_H
|
||||
|
|
|
@ -11,13 +11,54 @@
|
|||
// ASan versions of memcpy, memmove, and memset.
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
|
||||
|
||||
#include "asan_interceptors_memintrinsics.h"
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_suppressions.h"
|
||||
|
||||
using namespace __asan;
|
||||
|
||||
// memcpy is called during __asan_init() from the internals of printf(...).
|
||||
// We do not treat memcpy with to==from as a bug.
|
||||
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
||||
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
if (LIKELY(to != from)) { \
|
||||
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
|
||||
} \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} else if (UNLIKELY(!AsanInited())) { \
|
||||
return internal_memcpy(to, from, size); \
|
||||
} \
|
||||
return REAL(memcpy)(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
// memset is called inside Printf.
|
||||
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
ASAN_WRITE_RANGE(ctx, block, size); \
|
||||
} else if (UNLIKELY(!AsanInited())) { \
|
||||
return internal_memset(block, c, size); \
|
||||
} \
|
||||
return REAL(memset)(block, c, size); \
|
||||
} while (0)
|
||||
|
||||
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} \
|
||||
return internal_memmove(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
void *__asan_memcpy(void *to, const void *from, uptr size) {
|
||||
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
|
||||
}
|
||||
|
@ -40,4 +81,26 @@ extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
|
|||
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
|
||||
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
|
||||
|
||||
#else // SANITIZER_FUCHSIA
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
|
||||
ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
|
||||
} while (false)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
|
||||
ASAN_MEMCPY_IMPL(ctx, to, from, size); \
|
||||
} while (false)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
|
||||
do { \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memset); \
|
||||
ASAN_MEMSET_IMPL(ctx, block, c, size); \
|
||||
} while (false)
|
||||
|
||||
#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
|
|
|
@ -79,43 +79,6 @@ struct AsanInterceptorContext {
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
// memcpy is called during __asan_init() from the internals of printf(...).
|
||||
// We do not treat memcpy with to==from as a bug.
|
||||
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
||||
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
if (LIKELY(to != from)) { \
|
||||
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
|
||||
} \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} else if (UNLIKELY(!asan_inited)) { \
|
||||
return internal_memcpy(to, from, size); \
|
||||
} \
|
||||
return REAL(memcpy)(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
// memset is called inside Printf.
|
||||
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
ASAN_WRITE_RANGE(ctx, block, size); \
|
||||
} else if (UNLIKELY(!asan_inited)) { \
|
||||
return internal_memset(block, c, size); \
|
||||
} \
|
||||
return REAL(memset)(block, c, size); \
|
||||
} while (0)
|
||||
|
||||
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
if (LIKELY(replace_intrin_cached)) { \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} \
|
||||
return internal_memmove(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
#define ASAN_READ_RANGE(ctx, offset, size) \
|
||||
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
|
||||
#define ASAN_WRITE_RANGE(ctx, offset, size) \
|
||||
|
|
|
@ -130,9 +130,8 @@ void InstallAtExitCheckLeaks();
|
|||
if (&__asan_on_error) \
|
||||
__asan_on_error()
|
||||
|
||||
extern int asan_inited;
|
||||
// Used to avoid infinite recursion in __asan_init().
|
||||
extern bool asan_init_is_running;
|
||||
bool AsanInited();
|
||||
bool AsanInitIsRunning(); // Used to avoid infinite recursion in __asan_init().
|
||||
extern bool replace_intrin_cached;
|
||||
extern void (*death_callback)(void);
|
||||
// These magic values are written to shadow for better error
|
||||
|
|
|
@ -130,6 +130,18 @@ typedef void* dispatch_source_t;
|
|||
typedef u64 dispatch_time_t;
|
||||
typedef void (*dispatch_function_t)(void *block);
|
||||
typedef void* (*worker_t)(void *block);
|
||||
typedef unsigned long dispatch_mach_reason;
|
||||
typedef void *dispatch_mach_msg_t;
|
||||
typedef int mach_error_t;
|
||||
typedef void *dispatch_mach_t;
|
||||
|
||||
typedef void (*dispatch_mach_handler_function_t)(void *context,
|
||||
dispatch_mach_reason reason,
|
||||
dispatch_mach_msg_t message,
|
||||
mach_error_t error);
|
||||
typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason,
|
||||
dispatch_mach_msg_t message,
|
||||
mach_error_t error);
|
||||
|
||||
// A wrapper for the ObjC blocks used to support libdispatch.
|
||||
typedef struct {
|
||||
|
@ -142,8 +154,7 @@ ALWAYS_INLINE
|
|||
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
|
||||
parent_tid, stack, /* detached */ true);
|
||||
t = AsanThread::Create(parent_tid, stack, /* detached */ true);
|
||||
t->Init();
|
||||
asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker,
|
||||
nullptr);
|
||||
|
@ -160,7 +171,7 @@ void asan_dispatch_call_block_and_release(void *block) {
|
|||
VReport(2,
|
||||
"asan_dispatch_call_block_and_release(): "
|
||||
"context: %p, pthread_self: %p\n",
|
||||
block, pthread_self());
|
||||
block, (void*)pthread_self());
|
||||
asan_register_worker_thread(context->parent_tid, &stack);
|
||||
// Call the original dispatcher for the block.
|
||||
context->func(context->block);
|
||||
|
@ -193,7 +204,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
|
|||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
|
||||
if (Verbosity() >= 2) { \
|
||||
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
|
||||
asan_ctxt, pthread_self()); \
|
||||
(void*)asan_ctxt, (void*)pthread_self()); \
|
||||
PRINT_CURRENT_STACK(); \
|
||||
} \
|
||||
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
|
||||
|
@ -210,7 +221,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
|
|||
GET_STACK_TRACE_THREAD;
|
||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||
if (Verbosity() >= 2) {
|
||||
Report("dispatch_after_f: %p\n", asan_ctxt);
|
||||
Report("dispatch_after_f: %p\n", (void*)asan_ctxt);
|
||||
PRINT_CURRENT_STACK();
|
||||
}
|
||||
return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
|
||||
|
@ -224,7 +235,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
|||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||
if (Verbosity() >= 2) {
|
||||
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
|
||||
asan_ctxt, pthread_self());
|
||||
(void*)asan_ctxt, (void*)pthread_self());
|
||||
PRINT_CURRENT_STACK();
|
||||
}
|
||||
REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
|
||||
|
@ -241,6 +252,8 @@ void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
|
|||
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
|
||||
void(^work)(void));
|
||||
void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
|
||||
dispatch_mach_t dispatch_mach_create(const char *label, dispatch_queue_t queue,
|
||||
dispatch_mach_handler_t handler);
|
||||
}
|
||||
|
||||
#define GET_ASAN_BLOCK(work) \
|
||||
|
@ -290,6 +303,34 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
|
|||
GET_ASAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_event_handler)(ds, asan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, dispatch_mach_create, const char *label,
|
||||
dispatch_queue_t dq, dispatch_mach_handler_t handler) {
|
||||
int parent_tid = GetCurrentTidOrInvalid();
|
||||
return REAL(dispatch_mach_create)(
|
||||
label, dq,
|
||||
^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
|
||||
mach_error_t error) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
asan_register_worker_thread(parent_tid, &stack);
|
||||
handler(reason, message, error);
|
||||
});
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, dispatch_mach_create_f, const char *label,
|
||||
dispatch_queue_t dq, void *ctxt,
|
||||
dispatch_mach_handler_function_t handler) {
|
||||
int parent_tid = GetCurrentTidOrInvalid();
|
||||
return REAL(dispatch_mach_create)(
|
||||
label, dq,
|
||||
^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
|
||||
mach_error_t error) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
asan_register_worker_thread(parent_tid, &stack);
|
||||
handler(ctxt, reason, message, error);
|
||||
});
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_APPLE
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
using namespace __asan;
|
||||
|
||||
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return asan_init_is_running; }
|
||||
static bool UseImpl() { return AsanInitIsRunning(); }
|
||||
static void OnAllocate(const void *ptr, uptr size) {
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
|
||||
|
|
|
@ -23,45 +23,44 @@
|
|||
using namespace __asan;
|
||||
#define COMMON_MALLOC_ZONE_NAME "asan"
|
||||
#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
|
||||
#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited
|
||||
#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
|
||||
#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
|
||||
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
|
||||
#define COMMON_MALLOC_MALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_malloc(size, &stack)
|
||||
#define COMMON_MALLOC_REALLOC(ptr, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_realloc(ptr, size, &stack);
|
||||
#define COMMON_MALLOC_CALLOC(count, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_calloc(count, size, &stack);
|
||||
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
int res = asan_posix_memalign(memptr, alignment, size, &stack);
|
||||
#define COMMON_MALLOC_VALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
|
||||
#define COMMON_MALLOC_FREE(ptr) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
#define COMMON_MALLOC_SIZE(ptr) \
|
||||
uptr size = asan_mz_size(ptr);
|
||||
#define COMMON_MALLOC_FILL_STATS(zone, stats) \
|
||||
AsanMallocStats malloc_stats; \
|
||||
FillMallocStatistics(&malloc_stats); \
|
||||
CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
|
||||
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
|
||||
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||
#define COMMON_MALLOC_NAMESPACE __asan
|
||||
#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
|
||||
#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
|
||||
# define COMMON_MALLOC_SANITIZER_INITIALIZED AsanInited()
|
||||
# define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
|
||||
# define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
|
||||
# define COMMON_MALLOC_MEMALIGN(alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
|
||||
# define COMMON_MALLOC_MALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_malloc(size, &stack)
|
||||
# define COMMON_MALLOC_REALLOC(ptr, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_realloc(ptr, size, &stack);
|
||||
# define COMMON_MALLOC_CALLOC(count, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_calloc(count, size, &stack);
|
||||
# define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
int res = asan_posix_memalign(memptr, alignment, size, &stack);
|
||||
# define COMMON_MALLOC_VALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
|
||||
# define COMMON_MALLOC_FREE(ptr) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
# define COMMON_MALLOC_SIZE(ptr) uptr size = asan_mz_size(ptr);
|
||||
# define COMMON_MALLOC_FILL_STATS(zone, stats) \
|
||||
AsanMallocStats malloc_stats; \
|
||||
FillMallocStatistics(&malloc_stats); \
|
||||
CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
|
||||
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
|
||||
# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||
# define COMMON_MALLOC_NAMESPACE __asan
|
||||
# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
|
||||
# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
|
||||
|
||||
#include "sanitizer_common/sanitizer_malloc_mac.inc"
|
||||
# include "sanitizer_common/sanitizer_malloc_mac.inc"
|
||||
|
||||
namespace COMMON_MALLOC_NAMESPACE {
|
||||
|
||||
|
|
|
@ -211,7 +211,7 @@ INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags,
|
|||
// interception takes place, so if it is not owned by the RTL heap we can
|
||||
// pass it to the ASAN heap for inspection.
|
||||
if (flags()->windows_hook_rtl_allocators) {
|
||||
if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem))
|
||||
if (!AsanInited() || OWNED_BY_RTL(hHeap, lpMem))
|
||||
return REAL(HeapSize)(hHeap, dwFlags, lpMem);
|
||||
} else {
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
|
@ -226,7 +226,7 @@ INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
|
|||
// If the ASAN runtime is not initialized, or we encounter an unsupported
|
||||
// flag, fall back to the original allocator.
|
||||
if (flags()->windows_hook_rtl_allocators) {
|
||||
if (UNLIKELY(!asan_inited ||
|
||||
if (UNLIKELY(!AsanInited() ||
|
||||
(dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
|
||||
return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes);
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc,
|
|||
|
||||
// If this heap block which was allocated before the ASAN
|
||||
// runtime came up, use the real HeapFree function.
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
if (UNLIKELY(!AsanInited())) {
|
||||
return reallocFunc(hHeap, dwFlags, lpMem, dwBytes);
|
||||
}
|
||||
bool only_asan_supported_flags =
|
||||
|
@ -420,7 +420,7 @@ size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress);
|
|||
INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags,
|
||||
void* BaseAddress) {
|
||||
if (!flags()->windows_hook_rtl_allocators ||
|
||||
UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
|
||||
UNLIKELY(!AsanInited() || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
|
||||
return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress);
|
||||
}
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
|
@ -448,7 +448,7 @@ INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags,
|
|||
// If the ASAN runtime is not initialized, or we encounter an unsupported
|
||||
// flag, fall back to the original allocator.
|
||||
if (!flags()->windows_hook_rtl_allocators ||
|
||||
UNLIKELY(!asan_inited ||
|
||||
UNLIKELY(!AsanInited() ||
|
||||
(Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
|
||||
return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size);
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@
|
|||
# elif defined(__aarch64__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
|
||||
# elif defined(__powerpc64__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
|
||||
# elif defined(__s390x__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
|
||||
# elif SANITIZER_FREEBSD
|
||||
|
|
|
@ -160,10 +160,6 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
|
|||
return;
|
||||
}
|
||||
CHECK_LT(beg.chunk, end.chunk);
|
||||
if (beg.offset > 0) {
|
||||
*beg.chunk = 0;
|
||||
beg.chunk++;
|
||||
}
|
||||
REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
|
||||
if (end.offset > 0 && end.value != 0) {
|
||||
*end.chunk = Max(end.value, end.offset);
|
||||
|
@ -449,11 +445,14 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
|||
// FIXME: Two of these three checks are disabled until we fix
|
||||
// https://github.com/google/sanitizers/issues/258.
|
||||
// if (d1 != d2)
|
||||
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
|
||||
if (a + granularity <= d1)
|
||||
CHECK_EQ(*(u8 *)MemToShadow(a), 0);
|
||||
// DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
|
||||
//
|
||||
// NOTE: curly brackets for the "if" below to silence a MSVC warning.
|
||||
if (a + granularity <= d1) {
|
||||
DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
|
||||
}
|
||||
// if (d2 + granularity <= c && c <= end)
|
||||
// CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
|
||||
// DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
|
||||
// kAsanContiguousContainerOOBMagic);
|
||||
|
||||
uptr b1 = RoundDownTo(new_end, granularity);
|
||||
|
|
|
@ -138,6 +138,12 @@ void PlatformTSDDtor(void *tsd) {
|
|||
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
|
||||
return;
|
||||
}
|
||||
# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
// After this point it's unsafe to execute signal handlers which may be
|
||||
// instrumented. It's probably not just a Linux issue.
|
||||
BlockSignals();
|
||||
# endif
|
||||
AsanThread::TSDDtor(tsd);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -60,9 +60,9 @@ void AppendToErrorMessageBuffer(const char *buffer) {
|
|||
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
|
||||
bool in_shadow, const char *after) {
|
||||
Decorator d;
|
||||
str->append("%s%s%x%x%s%s", before,
|
||||
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
|
||||
byte & 15, d.Default(), after);
|
||||
str->AppendF("%s%s%x%x%s%s", before,
|
||||
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
|
||||
byte & 15, d.Default(), after);
|
||||
}
|
||||
|
||||
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
|
||||
|
|
|
@ -35,7 +35,8 @@ int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
|
|||
|
||||
const char *MaybeDemangleGlobalName(const char *name);
|
||||
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
|
||||
bool print_module_name);
|
||||
|
||||
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
|
||||
bool in_shadow, const char *after = "\n");
|
||||
|
|
|
@ -71,8 +71,17 @@ static void CheckUnwind() {
|
|||
}
|
||||
|
||||
// -------------------------- Globals --------------------- {{{1
|
||||
int asan_inited;
|
||||
bool asan_init_is_running;
|
||||
static int asan_inited = 0;
|
||||
static int asan_init_is_running = 0;
|
||||
|
||||
void SetAsanInited(u32 val) { asan_inited = val; }
|
||||
|
||||
void SetAsanInitIsRunning(u32 val) { asan_init_is_running = val; }
|
||||
|
||||
bool AsanInited() { return asan_inited == 1; }
|
||||
|
||||
bool AsanInitIsRunning() { return asan_init_is_running == 1; }
|
||||
|
||||
bool replace_intrin_cached;
|
||||
|
||||
#if !ASAN_FIXED_MAPPING
|
||||
|
@ -382,10 +391,11 @@ void PrintAddressSpaceLayout() {
|
|||
}
|
||||
|
||||
static void AsanInitInternal() {
|
||||
if (LIKELY(asan_inited)) return;
|
||||
if (LIKELY(AsanInited()))
|
||||
return;
|
||||
SanitizerToolName = "AddressSanitizer";
|
||||
CHECK(!asan_init_is_running && "ASan init calls itself!");
|
||||
asan_init_is_running = true;
|
||||
CHECK(!AsanInitIsRunning() && "ASan init calls itself!");
|
||||
SetAsanInitIsRunning(1);
|
||||
|
||||
CacheBinaryName();
|
||||
|
||||
|
@ -398,7 +408,7 @@ static void AsanInitInternal() {
|
|||
// Stop performing init at this point if we are being loaded via
|
||||
// dlopen() and the platform supports it.
|
||||
if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) {
|
||||
asan_init_is_running = false;
|
||||
SetAsanInitIsRunning(0);
|
||||
VReport(1, "AddressSanitizer init is being performed for dlopen().\n");
|
||||
return;
|
||||
}
|
||||
|
@ -460,8 +470,8 @@ static void AsanInitInternal() {
|
|||
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
|
||||
// should be set to 1 prior to initializing the threads.
|
||||
replace_intrin_cached = flags()->replace_intrin;
|
||||
asan_inited = 1;
|
||||
asan_init_is_running = false;
|
||||
SetAsanInited(1);
|
||||
SetAsanInitIsRunning(0);
|
||||
|
||||
if (flags()->atexit)
|
||||
Atexit(asan_atexit);
|
||||
|
@ -583,7 +593,7 @@ static void UnpoisonFakeStack() {
|
|||
using namespace __asan;
|
||||
|
||||
void NOINLINE __asan_handle_no_return() {
|
||||
if (asan_init_is_running)
|
||||
if (AsanInitIsRunning())
|
||||
return;
|
||||
|
||||
if (!PlatformUnpoisonStacks())
|
||||
|
|
|
@ -35,35 +35,29 @@ RLABEL(reg, op, s, add): ;\
|
|||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \
|
||||
CLABEL(reg, op, 1, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
mov %##reg,%r11 ;\
|
||||
and $0x7,%r11d ;\
|
||||
cmp %r10d,%r11d ;\
|
||||
jl RLABEL(reg, op, 1, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##1_asm ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \
|
||||
CLABEL(reg, op, 2, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
add $0x1,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
mov %##reg,%r11 ;\
|
||||
and $0x7,%r11d ;\
|
||||
add $0x1,%r11d ;\
|
||||
cmp %r10d,%r11d ;\
|
||||
jl RLABEL(reg, op, 2, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##2_asm ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \
|
||||
CLABEL(reg, op, 4, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
add $0x3,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
mov %##reg,%r11 ;\
|
||||
and $0x7,%r11d ;\
|
||||
add $0x3,%r11d ;\
|
||||
cmp %r10d,%r11d ;\
|
||||
jl RLABEL(reg, op, 4, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##4_asm ;\
|
||||
|
|
|
@ -57,7 +57,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
|
|||
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
|
||||
using namespace __asan;
|
||||
size = 0;
|
||||
if (UNLIKELY(!asan_inited))
|
||||
if (UNLIKELY(!AsanInited()))
|
||||
return;
|
||||
request_fast = StackTrace::WillUseFastUnwind(request_fast);
|
||||
AsanThread *t = GetCurrentThread();
|
||||
|
|
|
@ -32,24 +32,24 @@ u32 GetMallocContextSize();
|
|||
// as early as possible (in functions exposed to the user), as we generally
|
||||
// don't want stack trace to contain functions from ASan internals.
|
||||
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
BufferedStackTrace stack; \
|
||||
if (max_size <= 2) { \
|
||||
stack.size = max_size; \
|
||||
if (max_size > 0) { \
|
||||
stack.top_frame_bp = GET_CURRENT_FRAME(); \
|
||||
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
|
||||
if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
|
||||
} \
|
||||
} else { \
|
||||
stack.Unwind(StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), nullptr, fast, max_size); \
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
UNINITIALIZED BufferedStackTrace stack; \
|
||||
if (max_size <= 2) { \
|
||||
stack.size = max_size; \
|
||||
if (max_size > 0) { \
|
||||
stack.top_frame_bp = GET_CURRENT_FRAME(); \
|
||||
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
|
||||
if (max_size > 1) \
|
||||
stack.trace_buffer[1] = GET_CALLER_PC(); \
|
||||
} \
|
||||
} else { \
|
||||
stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
|
||||
fast, max_size); \
|
||||
}
|
||||
|
||||
#define GET_STACK_TRACE_FATAL(pc, bp) \
|
||||
BufferedStackTrace stack; \
|
||||
stack.Unwind(pc, bp, nullptr, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
#define GET_STACK_TRACE_FATAL(pc, bp) \
|
||||
UNINITIALIZED BufferedStackTrace stack; \
|
||||
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
|
||||
|
||||
#define GET_STACK_TRACE_FATAL_HERE \
|
||||
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
|
||||
|
|
|
@ -142,7 +142,7 @@ uptr __sanitizer_get_current_allocated_bytes() {
|
|||
uptr freed = stats.freed;
|
||||
// Return sane value if malloced < freed due to racy
|
||||
// way we update accumulated stats.
|
||||
return (malloced > freed) ? malloced - freed : 1;
|
||||
return (malloced > freed) ? malloced - freed : 0;
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_heap_size() {
|
||||
|
@ -161,7 +161,7 @@ uptr __sanitizer_get_free_bytes() {
|
|||
+ stats.malloced_redzones;
|
||||
// Return sane value if total_free < total_used due to racy
|
||||
// way we update accumulated stats.
|
||||
return (total_free > total_used) ? total_free - total_used : 1;
|
||||
return (total_free > total_used) ? total_free - total_used : 0;
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_unmapped_bytes() {
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace __asan {
|
|||
// AsanThreadContext implementation.
|
||||
|
||||
void AsanThreadContext::OnCreated(void *arg) {
|
||||
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
|
||||
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg);
|
||||
if (args->stack)
|
||||
stack_id = StackDepotPut(*args->stack);
|
||||
thread = args->thread;
|
||||
|
@ -40,34 +40,49 @@ void AsanThreadContext::OnFinished() {
|
|||
thread = nullptr;
|
||||
}
|
||||
|
||||
// MIPS requires aligned address
|
||||
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||
static ThreadRegistry *asan_thread_registry;
|
||||
static ThreadArgRetval *thread_data;
|
||||
|
||||
static Mutex mu_for_thread_context;
|
||||
static LowLevelAllocator allocator_for_thread_context;
|
||||
|
||||
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
|
||||
Lock lock(&mu_for_thread_context);
|
||||
return new(allocator_for_thread_context) AsanThreadContext(tid);
|
||||
return new (GetGlobalLowLevelAllocator()) AsanThreadContext(tid);
|
||||
}
|
||||
|
||||
ThreadRegistry &asanThreadRegistry() {
|
||||
static void InitThreads() {
|
||||
static bool initialized;
|
||||
// Don't worry about thread_safety - this should be called when there is
|
||||
// a single thread.
|
||||
if (!initialized) {
|
||||
// Never reuse ASan threads: we store pointer to AsanThreadContext
|
||||
// in TSD and can't reliably tell when no more TSD destructors will
|
||||
// be called. It would be wrong to reuse AsanThreadContext for another
|
||||
// thread before all TSD destructors will be called for it.
|
||||
asan_thread_registry =
|
||||
new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
|
||||
initialized = true;
|
||||
}
|
||||
if (LIKELY(initialized))
|
||||
return;
|
||||
// Never reuse ASan threads: we store pointer to AsanThreadContext
|
||||
// in TSD and can't reliably tell when no more TSD destructors will
|
||||
// be called. It would be wrong to reuse AsanThreadContext for another
|
||||
// thread before all TSD destructors will be called for it.
|
||||
|
||||
// MIPS requires aligned address
|
||||
static ALIGNED(alignof(
|
||||
ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||
static ALIGNED(alignof(
|
||||
ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
|
||||
|
||||
asan_thread_registry =
|
||||
new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
|
||||
thread_data = new (thread_data_placeholder) ThreadArgRetval();
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
ThreadRegistry &asanThreadRegistry() {
|
||||
InitThreads();
|
||||
return *asan_thread_registry;
|
||||
}
|
||||
|
||||
ThreadArgRetval &asanThreadArgRetval() {
|
||||
InitThreads();
|
||||
return *thread_data;
|
||||
}
|
||||
|
||||
AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
|
||||
return static_cast<AsanThreadContext *>(
|
||||
asanThreadRegistry().GetThreadLocked(tid));
|
||||
|
@ -75,22 +90,29 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
|
|||
|
||||
// AsanThread implementation.
|
||||
|
||||
AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
|
||||
AsanThread *AsanThread::Create(const void *start_data, uptr data_size,
|
||||
u32 parent_tid, StackTrace *stack,
|
||||
bool detached) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
|
||||
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
|
||||
thread->start_routine_ = start_routine;
|
||||
thread->arg_ = arg;
|
||||
AsanThread *thread = (AsanThread *)MmapOrDie(size, __func__);
|
||||
if (data_size) {
|
||||
uptr availible_size = (uptr)thread + size - (uptr)(thread->start_data_);
|
||||
CHECK_LE(data_size, availible_size);
|
||||
internal_memcpy(thread->start_data_, start_data, data_size);
|
||||
}
|
||||
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
|
||||
asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
|
||||
|
||||
return thread;
|
||||
}
|
||||
|
||||
void AsanThread::GetStartData(void *out, uptr out_size) const {
|
||||
internal_memcpy(out, start_data_, out_size);
|
||||
}
|
||||
|
||||
void AsanThread::TSDDtor(void *tsd) {
|
||||
AsanThreadContext *context = (AsanThreadContext*)tsd;
|
||||
AsanThreadContext *context = (AsanThreadContext *)tsd;
|
||||
VReport(1, "T%d TSDDtor\n", context->tid);
|
||||
if (context->thread)
|
||||
context->thread->Destroy();
|
||||
|
@ -144,8 +166,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
|
|||
current_fake_stack->Destroy(this->tid());
|
||||
}
|
||||
|
||||
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
|
||||
uptr *bottom_old,
|
||||
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
|
||||
uptr *size_old) {
|
||||
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
|
||||
Report("ERROR: finishing a fiber switch that has not started\n");
|
||||
|
@ -171,7 +192,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
|
|||
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
||||
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
|
||||
// Make sure the stack bounds are fully initialized.
|
||||
if (stack_bottom_ >= stack_top_) return {0, 0};
|
||||
if (stack_bottom_ >= stack_top_)
|
||||
return {0, 0};
|
||||
return {stack_bottom_, stack_top_};
|
||||
}
|
||||
char local;
|
||||
|
@ -184,13 +206,9 @@ inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
|||
return {stack_bottom_, stack_top_};
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_top() {
|
||||
return GetStackBounds().top;
|
||||
}
|
||||
uptr AsanThread::stack_top() { return GetStackBounds().top; }
|
||||
|
||||
uptr AsanThread::stack_bottom() {
|
||||
return GetStackBounds().bottom;
|
||||
}
|
||||
uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; }
|
||||
|
||||
uptr AsanThread::stack_size() {
|
||||
const auto bounds = GetStackBounds();
|
||||
|
@ -211,8 +229,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
|||
// This CAS checks if the state was 0 and if so changes it to state 1,
|
||||
// if that was successful, it initializes the pointer.
|
||||
if (atomic_compare_exchange_strong(
|
||||
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
|
||||
memory_order_relaxed)) {
|
||||
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
|
||||
memory_order_relaxed)) {
|
||||
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
|
||||
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
|
||||
stack_size_log =
|
||||
|
@ -261,36 +279,17 @@ void AsanThread::Init(const InitOptions *options) {
|
|||
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
|
||||
thread_return_t AsanThread::ThreadStart(tid_t os_id) {
|
||||
void AsanThread::ThreadStart(tid_t os_id) {
|
||||
Init();
|
||||
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
|
||||
|
||||
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
|
||||
|
||||
if (!start_routine_) {
|
||||
// start_routine_ == 0 if we're on the main thread or on one of the
|
||||
// OS X libdispatch worker threads. But nobody is supposed to call
|
||||
// ThreadStart() for the worker threads.
|
||||
CHECK_EQ(tid(), 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
thread_return_t res = start_routine_(arg_);
|
||||
|
||||
// On POSIX systems we defer this to the TSD destructor. LSan will consider
|
||||
// the thread's memory as non-live from the moment we call Destroy(), even
|
||||
// though that memory might contain pointers to heap objects which will be
|
||||
// cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
|
||||
// the TSD destructors have run might cause false positives in LSan.
|
||||
if (!SANITIZER_POSIX)
|
||||
this->Destroy();
|
||||
|
||||
return res;
|
||||
if (common_flags()->use_sigaltstack)
|
||||
SetAlternateSignalStack();
|
||||
}
|
||||
|
||||
AsanThread *CreateMainThread() {
|
||||
AsanThread *main_thread = AsanThread::Create(
|
||||
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
|
||||
/* parent_tid */ kMainTid,
|
||||
/* stack */ nullptr, /* detached */ true);
|
||||
SetCurrentThread(main_thread);
|
||||
main_thread->ThreadStart(internal_getpid());
|
||||
|
@ -341,14 +340,14 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
|||
bottom = fake_stack->AddrIsInFakeStack(addr);
|
||||
CHECK(bottom);
|
||||
access->offset = addr - bottom;
|
||||
access->frame_pc = ((uptr*)bottom)[2];
|
||||
access->frame_descr = (const char *)((uptr*)bottom)[1];
|
||||
access->frame_pc = ((uptr *)bottom)[2];
|
||||
access->frame_descr = (const char *)((uptr *)bottom)[1];
|
||||
return true;
|
||||
}
|
||||
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
|
||||
uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
|
||||
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
|
||||
u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
|
||||
|
@ -370,7 +369,7 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
|||
CHECK(ptr[0] == kCurrentStackFrameMagic);
|
||||
access->offset = addr - (uptr)ptr;
|
||||
access->frame_pc = ptr[2];
|
||||
access->frame_descr = (const char*)ptr[1];
|
||||
access->frame_descr = (const char *)ptr[1];
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -388,8 +387,8 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
|
|||
}
|
||||
|
||||
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
|
||||
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
|
||||
u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
(*shadow_ptr != kAsanStackLeftRedzoneMagic &&
|
||||
|
@ -473,16 +472,23 @@ void EnsureMainThreadIDIsCorrect() {
|
|||
__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
|
||||
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
|
||||
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
|
||||
if (!context) return nullptr;
|
||||
if (!context)
|
||||
return nullptr;
|
||||
return context->thread;
|
||||
}
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
void LockThreadRegistry() { __asan::asanThreadRegistry().Lock(); }
|
||||
void LockThreads() {
|
||||
__asan::asanThreadRegistry().Lock();
|
||||
__asan::asanThreadArgRetval().Lock();
|
||||
}
|
||||
|
||||
void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); }
|
||||
void UnlockThreads() {
|
||||
__asan::asanThreadArgRetval().Unlock();
|
||||
__asan::asanThreadRegistry().Unlock();
|
||||
}
|
||||
|
||||
static ThreadRegistry *GetAsanThreadRegistryLocked() {
|
||||
__asan::asanThreadRegistry().CheckLocked();
|
||||
|
@ -495,7 +501,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
|||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
if (!t) return false;
|
||||
if (!t)
|
||||
return false;
|
||||
*stack_begin = t->stack_bottom();
|
||||
*stack_end = t->stack_top();
|
||||
*tls_begin = t->tls_begin();
|
||||
|
@ -536,33 +543,7 @@ void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
|
|||
}
|
||||
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *ptrs) {
|
||||
// Look for the arg pointer of threads that have been created or are
|
||||
// running. This is necessary to prevent false positive leaks due to the
|
||||
// AsanThread holding the only live reference to a heap object. This
|
||||
// can happen because the `pthread_create()` interceptor doesn't wait
|
||||
// for the child thread to start before returning and thus loosing the
|
||||
// the only live reference to the heap object on the stack.
|
||||
|
||||
__asan::AsanThreadContext *atctx =
|
||||
static_cast<__asan::AsanThreadContext *>(tctx);
|
||||
|
||||
// Note ThreadStatusRunning is required because there is a small window
|
||||
// where the thread status switches to `ThreadStatusRunning` but the
|
||||
// `arg` pointer still isn't on the stack yet.
|
||||
if (atctx->status != ThreadStatusCreated &&
|
||||
atctx->status != ThreadStatusRunning)
|
||||
return;
|
||||
|
||||
uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
|
||||
if (!thread_arg)
|
||||
return;
|
||||
|
||||
auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
|
||||
ptrsVec->push_back(thread_arg);
|
||||
},
|
||||
ptrs);
|
||||
__asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs);
|
||||
}
|
||||
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
|
||||
|
@ -575,11 +556,7 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
|
|||
threads);
|
||||
}
|
||||
|
||||
void FinishThreadLocked(u32 tid) {
|
||||
GetAsanThreadRegistryLocked()->FinishThread(tid);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan;
|
||||
|
@ -593,20 +570,18 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
|
|||
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
|
||||
t->StartSwitchFiber((FakeStack **)fakestacksave, (uptr)bottom, size);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_finish_switch_fiber(void* fakestack,
|
||||
const void **bottom_old,
|
||||
void __sanitizer_finish_switch_fiber(void *fakestack, const void **bottom_old,
|
||||
uptr *size_old) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->FinishSwitchFiber((FakeStack*)fakestack,
|
||||
(uptr*)bottom_old,
|
||||
(uptr*)size_old);
|
||||
t->FinishSwitchFiber((FakeStack *)fakestack, (uptr *)bottom_old,
|
||||
(uptr *)size_old);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,12 @@
|
|||
#define ASAN_THREAD_H
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_fake_stack.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_stats.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
@ -55,18 +56,32 @@ class AsanThreadContext final : public ThreadContextBase {
|
|||
// AsanThreadContext objects are never freed, so we need many of them.
|
||||
COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
|
||||
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
// MSVC raises a warning about a nonstandard extension being used for the 0
|
||||
// sized element in this array. Disable this for warn-as-error builds.
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable : 4200)
|
||||
#endif
|
||||
|
||||
// AsanThread are stored in TSD and destroyed when the thread dies.
|
||||
class AsanThread {
|
||||
public:
|
||||
static AsanThread *Create(thread_callback_t start_routine, void *arg,
|
||||
u32 parent_tid, StackTrace *stack, bool detached);
|
||||
template <typename T>
|
||||
static AsanThread *Create(const T &data, u32 parent_tid, StackTrace *stack,
|
||||
bool detached) {
|
||||
return Create(&data, sizeof(data), parent_tid, stack, detached);
|
||||
}
|
||||
static AsanThread *Create(u32 parent_tid, StackTrace *stack, bool detached) {
|
||||
return Create(nullptr, 0, parent_tid, stack, detached);
|
||||
}
|
||||
static void TSDDtor(void *tsd);
|
||||
void Destroy();
|
||||
|
||||
struct InitOptions;
|
||||
void Init(const InitOptions *options = nullptr);
|
||||
|
||||
thread_return_t ThreadStart(tid_t os_id);
|
||||
void ThreadStart(tid_t os_id);
|
||||
thread_return_t RunThread();
|
||||
|
||||
uptr stack_top();
|
||||
uptr stack_bottom();
|
||||
|
@ -129,12 +144,18 @@ class AsanThread {
|
|||
|
||||
void *extra_spill_area() { return &extra_spill_area_; }
|
||||
|
||||
void *get_arg() { return arg_; }
|
||||
template <typename T>
|
||||
void GetStartData(T &data) const {
|
||||
GetStartData(&data, sizeof(data));
|
||||
}
|
||||
|
||||
private:
|
||||
// NOTE: There is no AsanThread constructor. It is allocated
|
||||
// via mmap() and *must* be valid in zero-initialized state.
|
||||
|
||||
static AsanThread *Create(const void *start_data, uptr data_size,
|
||||
u32 parent_tid, StackTrace *stack, bool detached);
|
||||
|
||||
void SetThreadStackAndTls(const InitOptions *options);
|
||||
|
||||
void ClearShadowForThreadStackAndTLS();
|
||||
|
@ -146,9 +167,9 @@ class AsanThread {
|
|||
};
|
||||
StackBounds GetStackBounds() const;
|
||||
|
||||
void GetStartData(void *out, uptr out_size) const;
|
||||
|
||||
AsanThreadContext *context_;
|
||||
thread_callback_t start_routine_;
|
||||
void *arg_;
|
||||
|
||||
uptr stack_top_;
|
||||
uptr stack_bottom_;
|
||||
|
@ -167,10 +188,17 @@ class AsanThread {
|
|||
AsanStats stats_;
|
||||
bool unwinding_;
|
||||
uptr extra_spill_area_;
|
||||
|
||||
char start_data_[];
|
||||
};
|
||||
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
|
||||
// Returns a single instance of registry.
|
||||
ThreadRegistry &asanThreadRegistry();
|
||||
ThreadArgRetval &asanThreadArgRetval();
|
||||
|
||||
// Must be called under ThreadRegistryLock.
|
||||
AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
|
||||
|
|
|
@ -131,10 +131,22 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
|||
}
|
||||
#endif
|
||||
|
||||
struct ThreadStartParams {
|
||||
thread_callback_t start_routine;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread *)arg;
|
||||
SetCurrentThread(t);
|
||||
return t->ThreadStart(GetTid());
|
||||
t->ThreadStart(GetTid());
|
||||
|
||||
ThreadStartParams params;
|
||||
t->GetStartData(params);
|
||||
|
||||
auto res = (*params.start_routine)(params.arg);
|
||||
t->Destroy(); // POSIX calls this from TSD destructor.
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
|
||||
|
@ -148,8 +160,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
|
|||
// one. This is a bandaid fix for PR22025.
|
||||
bool detached = false; // FIXME: how can we determine it on Windows?
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
|
||||
ThreadStartParams params = {start_routine, arg};
|
||||
AsanThread *t = AsanThread::Create(params, current_tid, &stack, detached);
|
||||
return REAL(CreateThread)(security, stack_size, asan_thread_start, t,
|
||||
thr_flags, tid);
|
||||
}
|
||||
|
@ -159,6 +171,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
|
|||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {
|
||||
__interception::SetErrorReportCallback(Report);
|
||||
|
||||
// The interceptors were not designed to be removable, so we have to keep this
|
||||
// module alive for the life of the process.
|
||||
HMODULE pinned;
|
||||
|
@ -194,9 +208,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
|||
}
|
||||
|
||||
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
||||
// Only asan on 64-bit Windows supports committing shadow memory on demand.
|
||||
#if SANITIZER_WINDOWS64
|
||||
// Since asan's mapping is compacting, the shadow chunk may be
|
||||
// not page-aligned, so we only flush the page-aligned portion.
|
||||
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
|
||||
#endif
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{
|
||||
|
|
|
@ -65,6 +65,7 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
|
|||
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoi);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoll);
|
||||
INTERCEPT_LIBRARY_FUNCTION(frexp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(longjmp);
|
||||
#if SANITIZER_INTERCEPT_MEMCHR
|
||||
|
@ -91,6 +92,7 @@ INTERCEPT_LIBRARY_FUNCTION(strspn);
|
|||
INTERCEPT_LIBRARY_FUNCTION(strstr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtok);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtoll);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
|
||||
|
||||
|
|
|
@ -86,9 +86,11 @@ static void InitializeFlags() {
|
|||
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
|
||||
// Sigtrap is used in error reporting.
|
||||
cf.handle_sigtrap = kHandleSignalExclusive;
|
||||
// For now only tested on Linux. Other plantforms can be turned on as they
|
||||
// become ready.
|
||||
cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
|
||||
// For now only tested on Linux and Fuchsia. Other plantforms can be turned
|
||||
// on as they become ready.
|
||||
constexpr bool can_detect_leaks =
|
||||
(SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA;
|
||||
cf.detect_leaks = cf.detect_leaks && can_detect_leaks;
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// Let platform handle other signals. It is better at reporting them then we
|
||||
|
@ -170,7 +172,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
|
|||
auto sds = StackDepotGetStats();
|
||||
AllocatorStatCounters asc;
|
||||
GetAllocatorStats(asc);
|
||||
s.append(
|
||||
s.AppendF(
|
||||
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
|
||||
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
|
||||
" heap: %zd",
|
||||
|
@ -290,14 +292,20 @@ static bool InitializeSingleGlobal(const hwasan_global &global) {
|
|||
}
|
||||
|
||||
static void InitLoadedGlobals() {
|
||||
dl_iterate_phdr(
|
||||
[](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
|
||||
for (const hwasan_global &global : HwasanGlobalsFor(
|
||||
info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
|
||||
InitializeSingleGlobal(global);
|
||||
return 0;
|
||||
},
|
||||
nullptr);
|
||||
// Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
|
||||
// the startup path which calls into __hwasan_library_loaded on all
|
||||
// initially loaded modules, so explicitly registering the globals here
|
||||
// isn't needed.
|
||||
if constexpr (!SANITIZER_FUCHSIA) {
|
||||
dl_iterate_phdr(
|
||||
[](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
|
||||
for (const hwasan_global &global : HwasanGlobalsFor(
|
||||
info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
|
||||
InitializeSingleGlobal(global);
|
||||
return 0;
|
||||
},
|
||||
nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare to run instrumented code on the main thread.
|
||||
|
@ -364,13 +372,7 @@ __attribute__((constructor(0))) void __hwasan_init() {
|
|||
DisableCoreDumperIfNecessary();
|
||||
|
||||
InitInstrumentation();
|
||||
if constexpr (!SANITIZER_FUCHSIA) {
|
||||
// Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
|
||||
// the startup path which calls into __hwasan_library_loaded on all
|
||||
// initially loaded modules, so explicitly registering the globals here
|
||||
// isn't needed.
|
||||
InitLoadedGlobals();
|
||||
}
|
||||
InitLoadedGlobals();
|
||||
|
||||
// Needs to be called here because flags()->random_tags might not have been
|
||||
// initialized when InitInstrumentation() was called.
|
||||
|
@ -530,6 +532,56 @@ void __hwasan_load16_noabort(uptr p) {
|
|||
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_loadN_match_all(uptr p, uptr sz, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
|
||||
}
|
||||
void __hwasan_load1_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
|
||||
}
|
||||
void __hwasan_load2_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
|
||||
}
|
||||
void __hwasan_load4_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
|
||||
}
|
||||
void __hwasan_load8_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
|
||||
}
|
||||
void __hwasan_load16_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_loadN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
|
||||
}
|
||||
void __hwasan_load1_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
|
||||
}
|
||||
void __hwasan_load2_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
|
||||
}
|
||||
void __hwasan_load4_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
|
||||
}
|
||||
void __hwasan_load8_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
|
||||
}
|
||||
void __hwasan_load16_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_storeN(uptr p, uptr sz) {
|
||||
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
|
||||
}
|
||||
|
@ -568,6 +620,56 @@ void __hwasan_store16_noabort(uptr p) {
|
|||
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_storeN_match_all(uptr p, uptr sz, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
|
||||
}
|
||||
void __hwasan_store1_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
|
||||
}
|
||||
void __hwasan_store2_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
|
||||
}
|
||||
void __hwasan_store4_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
|
||||
}
|
||||
void __hwasan_store8_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
|
||||
}
|
||||
void __hwasan_store16_match_all(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_storeN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
|
||||
}
|
||||
void __hwasan_store1_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
|
||||
}
|
||||
void __hwasan_store2_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
|
||||
}
|
||||
void __hwasan_store4_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
|
||||
}
|
||||
void __hwasan_store8_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
|
||||
}
|
||||
void __hwasan_store16_match_all_noabort(uptr p, u8 match_all_tag) {
|
||||
if (GetTagFromPointer(p) != match_all_tag)
|
||||
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
|
||||
}
|
||||
|
||||
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
|
||||
TagMemoryAligned(UntagAddr(p), sz, tag);
|
||||
}
|
||||
|
@ -579,7 +681,7 @@ uptr __hwasan_tag_pointer(uptr p, u8 tag) {
|
|||
void __hwasan_handle_longjmp(const void *sp_dst) {
|
||||
uptr dst = (uptr)sp_dst;
|
||||
// HWASan does not support tagged SP.
|
||||
CHECK(GetTagFromPointer(dst) == 0);
|
||||
CHECK_EQ(GetTagFromPointer(dst), 0);
|
||||
|
||||
uptr sp = (uptr)__builtin_frame_address(0);
|
||||
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
|
||||
|
|
|
@ -159,13 +159,13 @@ void *__sanitizer_malloc(uptr size) {
|
|||
// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
|
||||
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
||||
ARGS) ALIAS("__sanitizer_" #FN)
|
||||
ARGS) ALIAS(__sanitizer_##FN)
|
||||
#else
|
||||
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
|
||||
ALIAS("__sanitizer_" #FN); \
|
||||
ALIAS(__sanitizer_##FN); \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
||||
ARGS) ALIAS("__sanitizer_" #FN)
|
||||
ARGS) ALIAS(__sanitizer_##FN)
|
||||
#endif
|
||||
|
||||
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
|
||||
|
|
|
@ -149,8 +149,9 @@ void HwasanAllocatorInit() {
|
|||
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
|
||||
!flags()->disable_allocator_tagging);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
|
||||
GetAliasRegionStart());
|
||||
allocator.InitLinkerInitialized(
|
||||
common_flags()->allocator_release_to_os_interval_ms,
|
||||
GetAliasRegionStart());
|
||||
for (uptr i = 0; i < sizeof(tail_magic); i++)
|
||||
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
|
||||
if (common_flags()->max_allocation_size_mb) {
|
||||
|
@ -165,8 +166,11 @@ void HwasanAllocatorLock() { allocator.ForceLock(); }
|
|||
|
||||
void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
|
||||
|
||||
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
|
||||
void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
|
||||
|
||||
void AllocatorThreadFinish(AllocatorCache *cache) {
|
||||
allocator.SwallowCache(cache);
|
||||
allocator.DestroyCache(cache);
|
||||
}
|
||||
|
||||
static uptr TaggedSize(uptr size) {
|
||||
|
@ -230,28 +234,23 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
}
|
||||
|
||||
void *user_ptr = allocated;
|
||||
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
|
||||
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
|
||||
// retag to 0.
|
||||
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
|
||||
(flags()->tag_in_malloc || flags()->tag_in_free) &&
|
||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
||||
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
|
||||
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
|
||||
uptr tag_size = orig_size ? orig_size : 1;
|
||||
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
|
||||
user_ptr =
|
||||
(void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
|
||||
if (full_granule_size != tag_size) {
|
||||
u8 *short_granule =
|
||||
reinterpret_cast<u8 *>(allocated) + full_granule_size;
|
||||
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
|
||||
tag_size % kShadowAlignment);
|
||||
short_granule[kShadowAlignment - 1] = tag;
|
||||
}
|
||||
} else {
|
||||
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
|
||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
|
||||
flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
|
||||
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
|
||||
uptr tag_size = orig_size ? orig_size : 1;
|
||||
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
|
||||
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
|
||||
if (full_granule_size != tag_size) {
|
||||
u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
|
||||
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
|
||||
tag_size % kShadowAlignment);
|
||||
short_granule[kShadowAlignment - 1] = tag;
|
||||
}
|
||||
} else {
|
||||
// Tagging can not be completely skipped. If it's disabled, we need to tag
|
||||
// with zeros.
|
||||
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
|
||||
}
|
||||
|
||||
Metadata *meta =
|
||||
|
@ -261,7 +260,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
: __lsan::kDirectlyLeaked);
|
||||
#endif
|
||||
meta->SetAllocated(StackDepotPut(*stack), orig_size);
|
||||
RunMallocHooks(user_ptr, size);
|
||||
RunMallocHooks(user_ptr, orig_size);
|
||||
return user_ptr;
|
||||
}
|
||||
|
||||
|
@ -288,8 +287,6 @@ static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
|
|||
|
||||
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
||||
CHECK(tagged_ptr);
|
||||
RunFreeHooks(tagged_ptr);
|
||||
|
||||
void *untagged_ptr = UntagPtr(tagged_ptr);
|
||||
|
||||
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
|
||||
|
@ -304,6 +301,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
|
||||
return;
|
||||
}
|
||||
|
||||
RunFreeHooks(tagged_ptr);
|
||||
|
||||
uptr orig_size = meta->GetRequestedSize();
|
||||
u32 free_context_id = StackDepotPut(*stack);
|
||||
u32 alloc_context_id = meta->GetAllocStackId();
|
||||
|
@ -340,7 +340,8 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
|
||||
}
|
||||
if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
|
||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
|
||||
allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
|
||||
// Always store full 8-bit tags on free to maximize UAF detection.
|
||||
tag_t tag;
|
||||
if (t) {
|
||||
|
@ -437,6 +438,15 @@ static uptr AllocationSize(const void *p) {
|
|||
return b->GetRequestedSize();
|
||||
}
|
||||
|
||||
static uptr AllocationSizeFast(const void *p) {
|
||||
const void *untagged_ptr = UntagPtr(p);
|
||||
void *aligned_ptr = reinterpret_cast<void *>(
|
||||
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
|
||||
return meta->GetRequestedSize();
|
||||
}
|
||||
|
||||
void *hwasan_malloc(uptr size, StackTrace *stack) {
|
||||
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
|
||||
}
|
||||
|
@ -675,4 +685,11 @@ const void *__sanitizer_get_allocated_begin(const void *p) {
|
|||
|
||||
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
|
||||
|
||||
uptr __sanitizer_get_allocated_size_fast(const void *p) {
|
||||
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
|
||||
uptr ret = AllocationSizeFast(p);
|
||||
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
|
||||
|
|
|
@ -54,6 +54,10 @@ static_assert(sizeof(Metadata) == 16);
|
|||
|
||||
struct HwasanMapUnmapCallback {
|
||||
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
|
||||
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
|
||||
uptr user_size) const {
|
||||
UpdateMemoryUsage();
|
||||
}
|
||||
void OnUnmap(uptr p, uptr size) const {
|
||||
// We are about to unmap a chunk of user memory.
|
||||
// It can return as user-requested mmap() or another thread stack.
|
||||
|
@ -88,7 +92,8 @@ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
|||
typedef CombinedAllocator<PrimaryAllocator> Allocator;
|
||||
typedef Allocator::AllocatorCache AllocatorCache;
|
||||
|
||||
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
|
||||
void AllocatorThreadStart(AllocatorCache *cache);
|
||||
void AllocatorThreadFinish(AllocatorCache *cache);
|
||||
|
||||
class HwasanChunkView {
|
||||
public:
|
||||
|
|
|
@ -62,7 +62,8 @@ __hwasan_personality_wrapper(int version, _Unwind_Action actions,
|
|||
#error Unsupported architecture
|
||||
#endif
|
||||
uptr sp = get_cfa(context);
|
||||
TagMemory(sp, fp - sp, 0);
|
||||
TagMemory(UntagAddr(sp), UntagAddr(fp) - UntagAddr(sp),
|
||||
GetTagFromPointer(sp));
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#include "hwasan_globals.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_array_ref.h"
|
||||
|
||||
namespace __hwasan {
|
||||
|
||||
enum { NT_LLVM_HWASAN_GLOBALS = 3 };
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <link.h>
|
||||
|
||||
#include "sanitizer_common/sanitizer_array_ref.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
|
|
|
@ -14,10 +14,17 @@
|
|||
// sanitizer_common/sanitizer_common_interceptors.h
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
|
||||
|
||||
#include "hwasan.h"
|
||||
#include "hwasan_allocator.h"
|
||||
#include "hwasan_checks.h"
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_platform_interceptors.h"
|
||||
#include "hwasan_thread.h"
|
||||
#include "hwasan_thread_list.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
||||
|
@ -25,21 +32,47 @@
|
|||
|
||||
using namespace __hwasan;
|
||||
|
||||
#if HWASAN_WITH_INTERCEPTORS
|
||||
|
||||
struct ThreadStartArg {
|
||||
thread_callback_t callback;
|
||||
void *param;
|
||||
__sanitizer_sigset_t starting_sigset_;
|
||||
struct HWAsanInterceptorContext {
|
||||
const char *interceptor_name;
|
||||
};
|
||||
|
||||
static void *HwasanThreadStartFunc(void *arg) {
|
||||
__hwasan_thread_enter();
|
||||
ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
|
||||
SetSigProcMask(&A.starting_sigset_, nullptr);
|
||||
UnmapOrDie(arg, GetPageSizeCached());
|
||||
return A.callback(A.param);
|
||||
}
|
||||
# define ACCESS_MEMORY_RANGE(ctx, offset, size, access) \
|
||||
do { \
|
||||
__hwasan::CheckAddressSized<ErrorAction::Abort, access>((uptr)offset, \
|
||||
size); \
|
||||
} while (0)
|
||||
|
||||
# define HWASAN_READ_RANGE(ctx, offset, size) \
|
||||
ACCESS_MEMORY_RANGE(ctx, offset, size, AccessType::Load)
|
||||
# define HWASAN_WRITE_RANGE(ctx, offset, size) \
|
||||
ACCESS_MEMORY_RANGE(ctx, offset, size, AccessType::Store)
|
||||
|
||||
# if !SANITIZER_APPLE
|
||||
# define HWASAN_INTERCEPT_FUNC(name) \
|
||||
do { \
|
||||
if (!INTERCEPT_FUNCTION(name)) \
|
||||
VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
|
||||
} while (0)
|
||||
# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
|
||||
do { \
|
||||
if (!INTERCEPT_FUNCTION_VER(name, ver)) \
|
||||
VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
|
||||
#name, ver); \
|
||||
} while (0)
|
||||
# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
|
||||
do { \
|
||||
if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
|
||||
VReport( \
|
||||
1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
|
||||
#name, ver, #name); \
|
||||
} while (0)
|
||||
|
||||
# else
|
||||
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
|
||||
# define HWASAN_INTERCEPT_FUNC(name)
|
||||
# endif // SANITIZER_APPLE
|
||||
|
||||
# if HWASAN_WITH_INTERCEPTORS
|
||||
|
||||
# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) __hwasan_loadN((uptr)p, (uptr)s)
|
||||
# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
|
||||
|
@ -57,26 +90,251 @@ static void *HwasanThreadStartFunc(void *arg) {
|
|||
# include "sanitizer_common/sanitizer_common_syscalls.inc"
|
||||
# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
|
||||
|
||||
INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
|
||||
void * param) {
|
||||
# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
|
||||
HWASAN_WRITE_RANGE(ctx, ptr, size)
|
||||
|
||||
# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
|
||||
HWASAN_READ_RANGE(ctx, ptr, size)
|
||||
|
||||
# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
|
||||
HWAsanInterceptorContext _ctx = {#func}; \
|
||||
ctx = (void *)&_ctx; \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(func); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(path); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(fd); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(fd); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(fd); \
|
||||
(void)(newfd); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(name); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
(void)(thread); \
|
||||
(void)(name); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
|
||||
do { \
|
||||
(void)(name); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memset(dst, v, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
|
||||
if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
|
||||
common_flags()->intercept_intrin) \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
return REAL(memset)(dst, v, size); \
|
||||
}
|
||||
|
||||
# define COMMON_INTERCEPTOR_STRERROR() \
|
||||
do { \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
|
||||
|
||||
# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
|
||||
|
||||
// The main purpose of the mmap interceptor is to prevent the user from
|
||||
// allocating on top of shadow pages.
|
||||
//
|
||||
// For compatibility, it does not tag pointers, nor does it allow
|
||||
// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
|
||||
// will not return a tagged pointer, the tagged pointer must have come
|
||||
// from elsewhere, such as the secondary allocator, which makes it a
|
||||
// very odd usecase.)
|
||||
template <class Mmap>
|
||||
static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
|
||||
int prot, int flags, int fd, OFF64_T offset) {
|
||||
if (addr) {
|
||||
if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
|
||||
|
||||
addr = UntagPtr(addr);
|
||||
}
|
||||
SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
|
||||
void *end_addr = (char *)addr + (rounded_length - 1);
|
||||
if (addr && length &&
|
||||
(!MemIsApp(reinterpret_cast<uptr>(addr)) ||
|
||||
!MemIsApp(reinterpret_cast<uptr>(end_addr)))) {
|
||||
// User requested an address that is incompatible with HWASan's
|
||||
// memory layout. Use a different address if allowed, else fail.
|
||||
if (flags & map_fixed) {
|
||||
errno = errno_EINVAL;
|
||||
return (void *)-1;
|
||||
} else {
|
||||
addr = nullptr;
|
||||
}
|
||||
}
|
||||
void *res = real_mmap(addr, length, prot, flags, fd, offset);
|
||||
if (length && res != (void *)-1) {
|
||||
uptr beg = reinterpret_cast<uptr>(res);
|
||||
DCHECK(IsAligned(beg, GetPageSize()));
|
||||
if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
|
||||
// Application has attempted to map more memory than is supported by
|
||||
// HWASan. Act as if we ran out of memory.
|
||||
internal_munmap(res, length);
|
||||
errno = errno_ENOMEM;
|
||||
return (void *)-1;
|
||||
}
|
||||
__hwasan::TagMemoryAligned(beg, rounded_length, 0);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template <class Munmap>
|
||||
static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
|
||||
// We should not tag if munmap fail, but it's to late to tag after
|
||||
// real_munmap, as the pages could be mmaped by another thread.
|
||||
uptr beg = reinterpret_cast<uptr>(addr);
|
||||
if (length && IsAligned(beg, GetPageSize())) {
|
||||
SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
|
||||
// Protect from unmapping the shadow.
|
||||
if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
|
||||
errno = errno_EINVAL;
|
||||
return -1;
|
||||
}
|
||||
__hwasan::TagMemoryAligned(beg, rounded_length, 0);
|
||||
}
|
||||
return real_munmap(addr, length);
|
||||
}
|
||||
|
||||
# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
|
||||
fd, offset) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
|
||||
} while (false)
|
||||
|
||||
# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
|
||||
do { \
|
||||
(void)(ctx); \
|
||||
return munmap_interceptor(REAL(munmap), addr, sz); \
|
||||
} while (false)
|
||||
|
||||
# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
|
||||
# include "sanitizer_common/sanitizer_common_interceptors.inc"
|
||||
|
||||
struct ThreadStartArg {
|
||||
__sanitizer_sigset_t starting_sigset_;
|
||||
};
|
||||
|
||||
static void *HwasanThreadStartFunc(void *arg) {
|
||||
__hwasan_thread_enter();
|
||||
SetSigProcMask(&reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
|
||||
nullptr);
|
||||
InternalFree(arg);
|
||||
auto self = GetThreadSelf();
|
||||
auto args = hwasanThreadArgRetval().GetArgs(self);
|
||||
void *retval = (*args.routine)(args.arg_retval);
|
||||
hwasanThreadArgRetval().Finish(self, retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
int pthread_attr_getdetachstate(void *attr, int *v);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_create, void *thread, void *attr,
|
||||
void *(*callback)(void *), void *param) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
ScopedTaggingDisabler tagging_disabler;
|
||||
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
|
||||
GetPageSizeCached(), "pthread_create"));
|
||||
A->callback = callback;
|
||||
A->param = param;
|
||||
bool detached = [attr]() {
|
||||
int d = 0;
|
||||
return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
|
||||
}();
|
||||
ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(sizeof(ThreadStartArg));
|
||||
ScopedBlockSignals block(&A->starting_sigset_);
|
||||
// ASAN uses the same approach to disable leaks from pthread_create.
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler lsan_disabler;
|
||||
# endif
|
||||
return REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
|
||||
|
||||
int result;
|
||||
hwasanThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
|
||||
result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
|
||||
return result ? 0 : *(uptr *)(thread);
|
||||
});
|
||||
if (result != 0)
|
||||
InternalFree(A);
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return REAL(pthread_join)(t, arg);
|
||||
INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
|
||||
int result;
|
||||
hwasanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_join)(thread, retval);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_detach, void *thread) {
|
||||
int result;
|
||||
hwasanThreadArgRetval().Detach((uptr)thread, [&]() {
|
||||
result = REAL(pthread_detach)(thread);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, pthread_exit, void *retval) {
|
||||
hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
|
||||
REAL(pthread_exit)(retval);
|
||||
}
|
||||
|
||||
# if SANITIZER_GLIBC
|
||||
INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
|
||||
int result;
|
||||
hwasanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_tryjoin_np)(thread, ret);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
|
||||
const struct timespec *abstime) {
|
||||
int result;
|
||||
hwasanThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
# endif
|
||||
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
DEFINE_REAL(int, vfork)
|
||||
|
@ -85,13 +343,13 @@ DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
|
|||
// Get and/or change the set of blocked signals.
|
||||
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
|
||||
__hw_sigset_t *__restrict __oset);
|
||||
#define SIG_BLOCK 0
|
||||
#define SIG_SETMASK 2
|
||||
# define SIG_BLOCK 0
|
||||
# define SIG_SETMASK 2
|
||||
extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
|
||||
env[0].__magic = kHwJmpBufMagic;
|
||||
env[0].__mask_was_saved =
|
||||
(savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
|
||||
&env[0].__saved_mask) == 0);
|
||||
(savemask &&
|
||||
sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0, &env[0].__saved_mask) == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -120,26 +378,27 @@ InternalLongjmp(__hw_register_buf env, int retval) {
|
|||
# if defined(__aarch64__)
|
||||
register long int retval_tmp asm("x1") = retval;
|
||||
register void *env_address asm("x0") = &env[0];
|
||||
asm volatile("ldp x19, x20, [%0, #0<<3];"
|
||||
"ldp x21, x22, [%0, #2<<3];"
|
||||
"ldp x23, x24, [%0, #4<<3];"
|
||||
"ldp x25, x26, [%0, #6<<3];"
|
||||
"ldp x27, x28, [%0, #8<<3];"
|
||||
"ldp x29, x30, [%0, #10<<3];"
|
||||
"ldp d8, d9, [%0, #14<<3];"
|
||||
"ldp d10, d11, [%0, #16<<3];"
|
||||
"ldp d12, d13, [%0, #18<<3];"
|
||||
"ldp d14, d15, [%0, #20<<3];"
|
||||
"ldr x5, [%0, #13<<3];"
|
||||
"mov sp, x5;"
|
||||
// Return the value requested to return through arguments.
|
||||
// This should be in x1 given what we requested above.
|
||||
"cmp %1, #0;"
|
||||
"mov x0, #1;"
|
||||
"csel x0, %1, x0, ne;"
|
||||
"br x30;"
|
||||
: "+r"(env_address)
|
||||
: "r"(retval_tmp));
|
||||
asm volatile(
|
||||
"ldp x19, x20, [%0, #0<<3];"
|
||||
"ldp x21, x22, [%0, #2<<3];"
|
||||
"ldp x23, x24, [%0, #4<<3];"
|
||||
"ldp x25, x26, [%0, #6<<3];"
|
||||
"ldp x27, x28, [%0, #8<<3];"
|
||||
"ldp x29, x30, [%0, #10<<3];"
|
||||
"ldp d8, d9, [%0, #14<<3];"
|
||||
"ldp d10, d11, [%0, #16<<3];"
|
||||
"ldp d12, d13, [%0, #18<<3];"
|
||||
"ldp d14, d15, [%0, #20<<3];"
|
||||
"ldr x5, [%0, #13<<3];"
|
||||
"mov sp, x5;"
|
||||
// Return the value requested to return through arguments.
|
||||
// This should be in x1 given what we requested above.
|
||||
"cmp %1, #0;"
|
||||
"mov x0, #1;"
|
||||
"csel x0, %1, x0, ne;"
|
||||
"br x30;"
|
||||
: "+r"(env_address)
|
||||
: "r"(retval_tmp));
|
||||
# elif defined(__x86_64__)
|
||||
register long int retval_tmp asm("%rsi") = retval;
|
||||
register void *env_address asm("%rdi") = &env[0];
|
||||
|
@ -215,8 +474,7 @@ INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
|
|||
|
||||
if (env[0].__mask_was_saved)
|
||||
// Restore the saved signal mask.
|
||||
(void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
|
||||
(__hw_sigset_t *)0);
|
||||
(void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask, (__hw_sigset_t *)0);
|
||||
InternalLongjmp(env[0].__jmpbuf, val);
|
||||
}
|
||||
|
||||
|
@ -238,8 +496,8 @@ INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
|
|||
}
|
||||
InternalLongjmp(env[0].__jmpbuf, val);
|
||||
}
|
||||
#undef SIG_BLOCK
|
||||
#undef SIG_SETMASK
|
||||
# undef SIG_BLOCK
|
||||
# undef SIG_SETMASK
|
||||
|
||||
# endif // HWASAN_WITH_INTERCEPTORS
|
||||
|
||||
|
@ -254,7 +512,7 @@ int OnExit() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
} // namespace __hwasan
|
||||
} // namespace __hwasan
|
||||
|
||||
namespace __hwasan {
|
||||
|
||||
|
@ -262,19 +520,30 @@ void InitializeInterceptors() {
|
|||
static int inited = 0;
|
||||
CHECK_EQ(inited, 0);
|
||||
|
||||
#if HWASAN_WITH_INTERCEPTORS
|
||||
#if defined(__linux__)
|
||||
# if HWASAN_WITH_INTERCEPTORS
|
||||
InitializeCommonInterceptors();
|
||||
|
||||
(void)(read_iovec);
|
||||
(void)(write_iovec);
|
||||
|
||||
# if defined(__linux__)
|
||||
INTERCEPT_FUNCTION(__libc_longjmp);
|
||||
INTERCEPT_FUNCTION(longjmp);
|
||||
INTERCEPT_FUNCTION(siglongjmp);
|
||||
INTERCEPT_FUNCTION(vfork);
|
||||
#endif // __linux__
|
||||
# endif // __linux__
|
||||
INTERCEPT_FUNCTION(pthread_create);
|
||||
INTERCEPT_FUNCTION(pthread_join);
|
||||
INTERCEPT_FUNCTION(pthread_detach);
|
||||
INTERCEPT_FUNCTION(pthread_exit);
|
||||
# if SANITIZER_GLIBC
|
||||
INTERCEPT_FUNCTION(pthread_tryjoin_np);
|
||||
INTERCEPT_FUNCTION(pthread_timedjoin_np);
|
||||
# endif
|
||||
# endif
|
||||
|
||||
inited = 1;
|
||||
}
|
||||
} // namespace __hwasan
|
||||
} // namespace __hwasan
|
||||
|
||||
#endif // #if !SANITIZER_FUCHSIA
|
||||
|
|
|
@ -76,6 +76,32 @@ void __hwasan_load8_noabort(uptr);
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load16_noabort(uptr);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_loadN_match_all(uptr, uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load1_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load2_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load4_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load8_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load16_match_all(uptr, u8);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_loadN_match_all_noabort(uptr, uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load1_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load2_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load4_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load8_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_load16_match_all_noabort(uptr, u8);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_storeN(uptr, uptr);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -102,6 +128,32 @@ void __hwasan_store8_noabort(uptr);
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store16_noabort(uptr);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_storeN_match_all(uptr, uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store1_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store2_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store4_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store8_match_all(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store16_match_all(uptr, u8);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_storeN_match_all_noabort(uptr, uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store1_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store2_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store4_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store8_match_all_noabort(uptr, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_store16_match_all_noabort(uptr, u8);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
|
||||
|
||||
|
@ -183,6 +235,13 @@ void *__hwasan_memset(void *s, int c, uptr n);
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__hwasan_memmove(void *dest, const void *src, uptr n);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__hwasan_memcpy_match_all(void *dst, const void *src, uptr size, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__hwasan_memset_match_all(void *s, int c, uptr n, u8);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __hwasan_set_error_report_callback(void (*callback)(const char *));
|
||||
} // extern "C"
|
||||
|
|
|
@ -283,7 +283,7 @@ void InitThreads() {
|
|||
bool MemIsApp(uptr p) {
|
||||
// Memory outside the alias range has non-zero tags.
|
||||
# if !defined(HWASAN_ALIASING_MODE)
|
||||
CHECK(GetTagFromPointer(p) == 0);
|
||||
CHECK_EQ(GetTagFromPointer(p), 0);
|
||||
# endif
|
||||
|
||||
return (p >= kHighMemStart && p <= kHighMemEnd) ||
|
||||
|
@ -302,8 +302,15 @@ extern "C" void __hwasan_thread_exit() {
|
|||
Thread *t = GetCurrentThread();
|
||||
// Make sure that signal handler can not see a stale current thread pointer.
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
if (t)
|
||||
if (t) {
|
||||
// Block async signals on the thread as the handler can be instrumented.
|
||||
// After this point instrumented code can't access essential data from TLS
|
||||
// and will crash.
|
||||
// Bionic already calls __hwasan_thread_exit with blocked signals.
|
||||
if (SANITIZER_GLIBC)
|
||||
BlockSignals();
|
||||
hwasanThreadList().ReleaseThread(t);
|
||||
}
|
||||
}
|
||||
|
||||
# if HWASAN_WITH_INTERCEPTORS
|
||||
|
|
|
@ -42,3 +42,33 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) {
|
|||
reinterpret_cast<uptr>(from), size);
|
||||
return memmove(to, from, size);
|
||||
}
|
||||
|
||||
void *__hwasan_memset_match_all(void *block, int c, uptr size,
|
||||
u8 match_all_tag) {
|
||||
if (GetTagFromPointer(reinterpret_cast<uptr>(block)) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
|
||||
reinterpret_cast<uptr>(block), size);
|
||||
return memset(block, c, size);
|
||||
}
|
||||
|
||||
void *__hwasan_memcpy_match_all(void *to, const void *from, uptr size,
|
||||
u8 match_all_tag) {
|
||||
if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
|
||||
reinterpret_cast<uptr>(to), size);
|
||||
if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
|
||||
reinterpret_cast<uptr>(from), size);
|
||||
return memcpy(to, from, size);
|
||||
}
|
||||
|
||||
void *__hwasan_memmove_match_all(void *to, const void *from, uptr size,
|
||||
u8 match_all_tag) {
|
||||
if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
|
||||
reinterpret_cast<uptr>(to), size);
|
||||
if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
|
||||
reinterpret_cast<uptr>(from), size);
|
||||
return memmove(to, from, size);
|
||||
}
|
||||
|
|
1001
libsanitizer/hwasan/hwasan_platform_interceptors.h
Normal file
1001
libsanitizer/hwasan/hwasan_platform_interceptors.h
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -26,7 +26,7 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
|
|||
void ReportInvalidFree(StackTrace *stack, uptr addr);
|
||||
void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
|
||||
const u8 *expected);
|
||||
void ReportRegisters(uptr *registers_frame, uptr pc);
|
||||
void ReportRegisters(const uptr *registers_frame, uptr pc);
|
||||
void ReportAtExitStatistics();
|
||||
|
||||
|
||||
|
|
|
@ -31,33 +31,37 @@
|
|||
.section .text
|
||||
.file "hwasan_setjmp_aarch64.S"
|
||||
|
||||
.global __interceptor_setjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_setjmp)
|
||||
__interceptor_setjmp:
|
||||
.global ASM_WRAPPER_NAME(setjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
|
||||
ASM_WRAPPER_NAME(setjmp):
|
||||
CFI_STARTPROC
|
||||
BTI_C
|
||||
mov x1, #0
|
||||
b __interceptor_sigsetjmp
|
||||
b ASM_WRAPPER_NAME(sigsetjmp)
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_setjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
|
||||
// current signal.
|
||||
.global __interceptor_setjmp_bionic
|
||||
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
|
||||
__interceptor_setjmp_bionic:
|
||||
.global ASM_WRAPPER_NAME(setjmp_bionic)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp_bionic))
|
||||
ASM_WRAPPER_NAME(setjmp_bionic):
|
||||
CFI_STARTPROC
|
||||
BTI_C
|
||||
mov x1, #1
|
||||
b __interceptor_sigsetjmp
|
||||
b ASM_WRAPPER_NAME(sigsetjmp)
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_setjmp_bionic)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(setjmp_bionic))
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(setjmp_bionic)
|
||||
#endif
|
||||
|
||||
.global __interceptor_sigsetjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
|
||||
__interceptor_sigsetjmp:
|
||||
.global ASM_WRAPPER_NAME(sigsetjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
ASM_WRAPPER_NAME(sigsetjmp):
|
||||
CFI_STARTPROC
|
||||
BTI_C
|
||||
stp x19, x20, [x0, #0<<3]
|
||||
|
@ -77,22 +81,19 @@ __interceptor_sigsetjmp:
|
|||
// This function is defined in hwasan_interceptors.cc
|
||||
b __sigjmp_save
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_sigsetjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
|
||||
|
||||
.macro WEAK_ALIAS first second
|
||||
.weak \second
|
||||
.equ \second\(), \first
|
||||
.endm
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
WEAK_ALIAS __interceptor_sigsetjmp, sigsetjmp
|
||||
WEAK_ALIAS __interceptor_setjmp_bionic, setjmp
|
||||
ASM_TRAMPOLINE_ALIAS(sigsetjmp, sigsetjmp)
|
||||
ASM_TRAMPOLINE_ALIAS(setjmp, setjmp_bionic)
|
||||
#else
|
||||
WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
|
||||
ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
|
||||
#endif
|
||||
|
||||
WEAK_ALIAS __interceptor_setjmp, _setjmp
|
||||
ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
|
||||
#endif
|
||||
|
||||
// We do not need executable stack.
|
||||
|
|
|
@ -31,18 +31,18 @@
|
|||
.section .text
|
||||
.file "hwasan_setjmp_riscv64.S"
|
||||
|
||||
.global __interceptor_setjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_setjmp)
|
||||
__interceptor_setjmp:
|
||||
.global ASM_WRAPPER_NAME(setjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
|
||||
ASM_WRAPPER_NAME(setjmp):
|
||||
CFI_STARTPROC
|
||||
addi x11, x0, 0
|
||||
tail __interceptor_sigsetjmp
|
||||
tail ASM_WRAPPER_NAME(sigsetjmp)
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_setjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
|
||||
|
||||
.global __interceptor_sigsetjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
|
||||
__interceptor_sigsetjmp:
|
||||
.global ASM_WRAPPER_NAME(sigsetjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
ASM_WRAPPER_NAME(sigsetjmp):
|
||||
CFI_STARTPROC
|
||||
sd ra, 0<<3(x10)
|
||||
sd s0, 1<<3(x10)
|
||||
|
@ -80,17 +80,12 @@ __interceptor_sigsetjmp:
|
|||
// This function is defined in hwasan_interceptors.cc
|
||||
tail __sigjmp_save
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_sigsetjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
|
||||
|
||||
.macro WEAK_ALIAS first second
|
||||
.weak \second
|
||||
.equ \second\(), \first
|
||||
.endm
|
||||
|
||||
WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
|
||||
|
||||
WEAK_ALIAS __interceptor_setjmp, _setjmp
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
|
||||
ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
|
||||
ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
|
||||
#endif
|
||||
|
||||
// We do not need executable stack.
|
||||
|
|
|
@ -31,19 +31,19 @@
|
|||
.section .text
|
||||
.file "hwasan_setjmp_x86_64.S"
|
||||
|
||||
.global __interceptor_setjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_setjmp)
|
||||
__interceptor_setjmp:
|
||||
.global ASM_WRAPPER_NAME(setjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
|
||||
ASM_WRAPPER_NAME(setjmp):
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
xorl %esi, %esi
|
||||
jmp .Linterceptor_sigsetjmp
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_setjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
|
||||
|
||||
.global __interceptor_sigsetjmp
|
||||
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
|
||||
__interceptor_sigsetjmp:
|
||||
.global ASM_WRAPPER_NAME(sigsetjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
ASM_WRAPPER_NAME(sigsetjmp):
|
||||
.Linterceptor_sigsetjmp:
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
|
@ -67,16 +67,12 @@ __interceptor_sigsetjmp:
|
|||
jmp __sigjmp_save
|
||||
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_sigsetjmp)
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
|
||||
|
||||
|
||||
.macro WEAK_ALIAS first second
|
||||
.weak \second
|
||||
.equ \second\(), \first
|
||||
.endm
|
||||
|
||||
WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
|
||||
WEAK_ALIAS __interceptor_setjmp, _setjmp
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
|
||||
ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
|
||||
ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
|
||||
#endif
|
||||
|
||||
// We do not need executable stack.
|
||||
|
|
|
@ -89,16 +89,16 @@ __hwasan_tag_mismatch:
|
|||
ubfx x16, x0, #4, #52
|
||||
ldrb w16, [x9, x16]
|
||||
cmp w16, #0xf
|
||||
b.hi __hwasan_tag_mismatch_v2
|
||||
b.hi mismatch
|
||||
cmp w16, w17
|
||||
b.lo __hwasan_tag_mismatch_v2
|
||||
b.lo mismatch
|
||||
|
||||
// Load the real tag from the last byte of the granule and compare against
|
||||
// the pointer tag.
|
||||
orr x16, x0, #0xf
|
||||
ldrb w16, [x16]
|
||||
cmp x16, x0, lsr #56
|
||||
b.ne __hwasan_tag_mismatch_v2
|
||||
b.ne mismatch
|
||||
|
||||
// Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
|
||||
// call and resume execution.
|
||||
|
@ -108,6 +108,8 @@ __hwasan_tag_mismatch:
|
|||
.global __hwasan_tag_mismatch_v2
|
||||
.type __hwasan_tag_mismatch_v2, %function
|
||||
__hwasan_tag_mismatch_v2:
|
||||
// Avoid using global label, to prevent "relocation out of range".
|
||||
mismatch:
|
||||
CFI_STARTPROC
|
||||
BTI_J
|
||||
|
||||
|
|
|
@ -58,6 +58,16 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
|||
#endif
|
||||
InitStackAndTls(state);
|
||||
dtls_ = DTLS_Get();
|
||||
AllocatorThreadStart(allocator_cache());
|
||||
|
||||
if (flags()->verbose_threads) {
|
||||
if (IsMainThread()) {
|
||||
Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
|
||||
sizeof(Thread), heap_allocations_->SizeInBytes(),
|
||||
stack_allocations_->size() * sizeof(uptr));
|
||||
}
|
||||
Print("Creating : ");
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
|
||||
|
@ -79,28 +89,23 @@ void Thread::InitStackRingBuffer(uptr stack_buffer_start,
|
|||
CHECK(MemIsApp(stack_bottom_));
|
||||
CHECK(MemIsApp(stack_top_ - 1));
|
||||
}
|
||||
|
||||
if (flags()->verbose_threads) {
|
||||
if (IsMainThread()) {
|
||||
Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
|
||||
sizeof(Thread), heap_allocations_->SizeInBytes(),
|
||||
stack_allocations_->size() * sizeof(uptr));
|
||||
}
|
||||
Print("Creating : ");
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::ClearShadowForThreadStackAndTLS() {
|
||||
if (stack_top_ != stack_bottom_)
|
||||
TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
|
||||
TagMemory(UntagAddr(stack_bottom_),
|
||||
UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
|
||||
GetTagFromPointer(stack_top_));
|
||||
if (tls_begin_ != tls_end_)
|
||||
TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
|
||||
TagMemory(UntagAddr(tls_begin_),
|
||||
UntagAddr(tls_end_) - UntagAddr(tls_begin_),
|
||||
GetTagFromPointer(tls_begin_));
|
||||
}
|
||||
|
||||
void Thread::Destroy() {
|
||||
if (flags()->verbose_threads)
|
||||
Print("Destroying: ");
|
||||
AllocatorSwallowThreadLocalCache(allocator_cache());
|
||||
AllocatorThreadFinish(allocator_cache());
|
||||
ClearShadowForThreadStackAndTLS();
|
||||
if (heap_allocations_)
|
||||
heap_allocations_->Delete();
|
||||
|
@ -173,9 +178,15 @@ static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
|
|||
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
|
||||
}
|
||||
|
||||
void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
|
||||
void LockThreads() {
|
||||
__hwasan::hwasanThreadList().Lock();
|
||||
__hwasan::hwasanThreadArgRetval().Lock();
|
||||
}
|
||||
|
||||
void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
|
||||
void UnlockThreads() {
|
||||
__hwasan::hwasanThreadArgRetval().Unlock();
|
||||
__hwasan::hwasanThreadList().Unlock();
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
|
||||
|
||||
|
@ -202,7 +213,10 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
|
|||
InternalMmapVector<Range> *ranges) {}
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
|
||||
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
__hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
|
||||
}
|
||||
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
|
||||
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -1,15 +1,28 @@
|
|||
#include "hwasan_thread_list.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
|
||||
|
||||
namespace __hwasan {
|
||||
static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
|
||||
|
||||
static HwasanThreadList *hwasan_thread_list;
|
||||
static ThreadArgRetval *thread_data;
|
||||
|
||||
HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
|
||||
ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
|
||||
|
||||
void InitThreadList(uptr storage, uptr size) {
|
||||
CHECK(hwasan_thread_list == nullptr);
|
||||
CHECK_EQ(hwasan_thread_list, nullptr);
|
||||
|
||||
static ALIGNED(alignof(
|
||||
HwasanThreadList)) char thread_list_placeholder[sizeof(HwasanThreadList)];
|
||||
hwasan_thread_list =
|
||||
new (thread_list_placeholder) HwasanThreadList(storage, size);
|
||||
|
||||
CHECK_EQ(thread_data, nullptr);
|
||||
|
||||
static ALIGNED(alignof(
|
||||
ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
|
||||
thread_data = new (thread_data_placeholder) ThreadArgRetval();
|
||||
}
|
||||
|
||||
} // namespace __hwasan
|
||||
} // namespace __hwasan
|
||||
|
|
|
@ -47,8 +47,8 @@
|
|||
#include "hwasan_allocator.h"
|
||||
#include "hwasan_flags.h"
|
||||
#include "hwasan_thread.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
|
||||
|
||||
namespace __hwasan {
|
||||
|
||||
|
@ -131,9 +131,9 @@ class SANITIZER_MUTEX HwasanThreadList {
|
|||
|
||||
void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
|
||||
RemoveThreadStats(t);
|
||||
RemoveThreadFromLiveList(t);
|
||||
t->Destroy();
|
||||
DontNeedThread(t);
|
||||
RemoveThreadFromLiveList(t);
|
||||
SpinMutexLock l(&free_list_mutex_);
|
||||
free_list_.push_back(t);
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ class SANITIZER_MUTEX HwasanThreadList {
|
|||
}
|
||||
|
||||
template <class CB>
|
||||
Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
|
||||
Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
|
||||
CheckLocked();
|
||||
for (Thread *t : live_list_)
|
||||
if (cb(t))
|
||||
|
@ -199,7 +199,7 @@ class SANITIZER_MUTEX HwasanThreadList {
|
|||
CHECK(IsAligned(free_space_, align));
|
||||
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
|
||||
free_space_ += thread_alloc_size_;
|
||||
CHECK(free_space_ <= free_space_end_ && "out of thread memory");
|
||||
CHECK_LE(free_space_, free_space_end_);
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -222,5 +222,6 @@ class SANITIZER_MUTEX HwasanThreadList {
|
|||
|
||||
void InitThreadList(uptr storage, uptr size);
|
||||
HwasanThreadList &hwasanThreadList();
|
||||
ThreadArgRetval &hwasanThreadArgRetval();
|
||||
|
||||
} // namespace __hwasan
|
||||
|
|
|
@ -11,82 +11,89 @@
|
|||
#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
#define SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/* Returns the estimated number of bytes that will be reserved by allocator
|
||||
for request of "size" bytes. If allocator can't allocate that much
|
||||
memory, returns the maximal possible allocation size, otherwise returns
|
||||
"size". */
|
||||
size_t __sanitizer_get_estimated_allocated_size(size_t size);
|
||||
/* Returns the estimated number of bytes that will be reserved by allocator
|
||||
for request of "size" bytes. If allocator can't allocate that much
|
||||
memory, returns the maximal possible allocation size, otherwise returns
|
||||
"size". */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_estimated_allocated_size(size_t size);
|
||||
|
||||
/* Returns true if p was returned by the allocator and
|
||||
is not yet freed. */
|
||||
int __sanitizer_get_ownership(const volatile void *p);
|
||||
/* Returns true if p was returned by the allocator and
|
||||
is not yet freed. */
|
||||
int SANITIZER_CDECL __sanitizer_get_ownership(const volatile void *p);
|
||||
|
||||
/* If a pointer lies within an allocation, it will return the start address
|
||||
of the allocation. Otherwise, it returns nullptr. */
|
||||
const void *__sanitizer_get_allocated_begin(const void *p);
|
||||
/* If a pointer lies within an allocation, it will return the start address
|
||||
of the allocation. Otherwise, it returns nullptr. */
|
||||
const void *SANITIZER_CDECL __sanitizer_get_allocated_begin(const void *p);
|
||||
|
||||
/* Returns the number of bytes reserved for the pointer p.
|
||||
Requires (get_ownership(p) == true) or (p == 0). */
|
||||
size_t __sanitizer_get_allocated_size(const volatile void *p);
|
||||
/* Returns the number of bytes reserved for the pointer p.
|
||||
Requires (get_ownership(p) == true) or (p == 0). */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_allocated_size(const volatile void *p);
|
||||
|
||||
/* Number of bytes, allocated and not yet freed by the application. */
|
||||
size_t __sanitizer_get_current_allocated_bytes(void);
|
||||
/* Returns the number of bytes reserved for the pointer p.
|
||||
Requires __sanitizer_get_allocated_begin(p) == p. */
|
||||
size_t SANITIZER_CDECL
|
||||
__sanitizer_get_allocated_size_fast(const volatile void *p);
|
||||
|
||||
/* Number of bytes, mmaped by the allocator to fulfill allocation requests.
|
||||
Generally, for request of X bytes, allocator can reserve and add to free
|
||||
lists a large number of chunks of size X to use them for future requests.
|
||||
All these chunks count toward the heap size. Currently, allocator never
|
||||
releases memory to OS (instead, it just puts freed chunks to free
|
||||
lists). */
|
||||
size_t __sanitizer_get_heap_size(void);
|
||||
/* Number of bytes, allocated and not yet freed by the application. */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_current_allocated_bytes(void);
|
||||
|
||||
/* Number of bytes, mmaped by the allocator, which can be used to fulfill
|
||||
allocation requests. When a user program frees memory chunk, it can first
|
||||
fall into quarantine and will count toward __sanitizer_get_free_bytes()
|
||||
later. */
|
||||
size_t __sanitizer_get_free_bytes(void);
|
||||
/* Number of bytes, mmaped by the allocator to fulfill allocation requests.
|
||||
Generally, for request of X bytes, allocator can reserve and add to free
|
||||
lists a large number of chunks of size X to use them for future requests.
|
||||
All these chunks count toward the heap size. Currently, allocator never
|
||||
releases memory to OS (instead, it just puts freed chunks to free
|
||||
lists). */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_heap_size(void);
|
||||
|
||||
/* Number of bytes in unmapped pages, that are released to OS. Currently,
|
||||
always returns 0. */
|
||||
size_t __sanitizer_get_unmapped_bytes(void);
|
||||
/* Number of bytes, mmaped by the allocator, which can be used to fulfill
|
||||
allocation requests. When a user program frees memory chunk, it can first
|
||||
fall into quarantine and will count toward __sanitizer_get_free_bytes()
|
||||
later. */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_free_bytes(void);
|
||||
|
||||
/* Malloc hooks that may be optionally provided by user.
|
||||
__sanitizer_malloc_hook(ptr, size) is called immediately after
|
||||
allocation of "size" bytes, which returned "ptr".
|
||||
__sanitizer_free_hook(ptr) is called immediately before
|
||||
deallocation of "ptr". */
|
||||
void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
|
||||
void __sanitizer_free_hook(const volatile void *ptr);
|
||||
/* Number of bytes in unmapped pages, that are released to OS. Currently,
|
||||
always returns 0. */
|
||||
size_t SANITIZER_CDECL __sanitizer_get_unmapped_bytes(void);
|
||||
|
||||
/* Installs a pair of hooks for malloc/free.
|
||||
Several (currently, 5) hook pairs may be installed, they are executed
|
||||
in the order they were installed and after calling
|
||||
__sanitizer_malloc_hook/__sanitizer_free_hook.
|
||||
Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
|
||||
chained and do not rely on weak symbols working on the platform, but
|
||||
require __sanitizer_install_malloc_and_free_hooks to be called at startup
|
||||
and thus will not be called on malloc/free very early in the process.
|
||||
Returns the number of hooks currently installed or 0 on failure.
|
||||
Not thread-safe, should be called in the main thread before starting
|
||||
other threads.
|
||||
*/
|
||||
int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*malloc_hook)(const volatile void *, size_t),
|
||||
void (*free_hook)(const volatile void *));
|
||||
/* Malloc hooks that may be optionally provided by user.
|
||||
__sanitizer_malloc_hook(ptr, size) is called immediately after
|
||||
allocation of "size" bytes, which returned "ptr".
|
||||
__sanitizer_free_hook(ptr) is called immediately before
|
||||
deallocation of "ptr". */
|
||||
void SANITIZER_CDECL __sanitizer_malloc_hook(const volatile void *ptr,
|
||||
size_t size);
|
||||
void SANITIZER_CDECL __sanitizer_free_hook(const volatile void *ptr);
|
||||
|
||||
/* Drains allocator quarantines (calling thread's and global ones), returns
|
||||
freed memory back to OS and releases other non-essential internal allocator
|
||||
resources in attempt to reduce process RSS.
|
||||
Currently available with ASan only.
|
||||
*/
|
||||
void __sanitizer_purge_allocator(void);
|
||||
/* Installs a pair of hooks for malloc/free.
|
||||
Several (currently, 5) hook pairs may be installed, they are executed
|
||||
in the order they were installed and after calling
|
||||
__sanitizer_malloc_hook/__sanitizer_free_hook.
|
||||
Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
|
||||
chained and do not rely on weak symbols working on the platform, but
|
||||
require __sanitizer_install_malloc_and_free_hooks to be called at startup
|
||||
and thus will not be called on malloc/free very early in the process.
|
||||
Returns the number of hooks currently installed or 0 on failure.
|
||||
Not thread-safe, should be called in the main thread before starting
|
||||
other threads.
|
||||
*/
|
||||
int SANITIZER_CDECL __sanitizer_install_malloc_and_free_hooks(
|
||||
void(SANITIZER_CDECL *malloc_hook)(const volatile void *, size_t),
|
||||
void(SANITIZER_CDECL *free_hook)(const volatile void *));
|
||||
|
||||
/* Drains allocator quarantines (calling thread's and global ones), returns
|
||||
freed memory back to OS and releases other non-essential internal allocator
|
||||
resources in attempt to reduce process RSS.
|
||||
Currently available with ASan only.
|
||||
*/
|
||||
void SANITIZER_CDECL __sanitizer_purge_allocator(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,7 +31,8 @@ extern "C" {
|
|||
///
|
||||
/// \param addr Start of memory region.
|
||||
/// \param size Size of memory region.
|
||||
void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
||||
void SANITIZER_CDECL __asan_poison_memory_region(void const volatile *addr,
|
||||
size_t size);
|
||||
|
||||
/// Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
|
||||
///
|
||||
|
@ -45,10 +46,19 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
|||
///
|
||||
/// \param addr Start of memory region.
|
||||
/// \param size Size of memory region.
|
||||
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
||||
void SANITIZER_CDECL __asan_unpoison_memory_region(void const volatile *addr,
|
||||
size_t size);
|
||||
|
||||
// Macros provided for convenience.
|
||||
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
||||
#ifdef __has_feature
|
||||
#if __has_feature(address_sanitizer)
|
||||
#define ASAN_DEFINE_REGION_MACROS
|
||||
#endif
|
||||
#elif defined(__SANITIZE_ADDRESS__)
|
||||
#define ASAN_DEFINE_REGION_MACROS
|
||||
#endif
|
||||
|
||||
#ifdef ASAN_DEFINE_REGION_MACROS
|
||||
/// Marks a memory region as unaddressable.
|
||||
///
|
||||
/// \note Macro provided for convenience; defined as a no-op if ASan is not
|
||||
|
@ -56,7 +66,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|||
///
|
||||
/// \param addr Start of memory region.
|
||||
/// \param size Size of memory region.
|
||||
#define ASAN_POISON_MEMORY_REGION(addr, size) \
|
||||
#define ASAN_POISON_MEMORY_REGION(addr, size) \
|
||||
__asan_poison_memory_region((addr), (size))
|
||||
|
||||
/// Marks a memory region as addressable.
|
||||
|
@ -66,14 +76,13 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|||
///
|
||||
/// \param addr Start of memory region.
|
||||
/// \param size Size of memory region.
|
||||
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
|
||||
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
|
||||
__asan_unpoison_memory_region((addr), (size))
|
||||
#else
|
||||
#define ASAN_POISON_MEMORY_REGION(addr, size) \
|
||||
((void)(addr), (void)(size))
|
||||
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
|
||||
((void)(addr), (void)(size))
|
||||
#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
|
||||
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
|
||||
#endif
|
||||
#undef ASAN_DEFINE_REGION_MACROS
|
||||
|
||||
/// Checks if an address is poisoned.
|
||||
///
|
||||
|
@ -85,7 +94,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|||
///
|
||||
/// \retval 1 Address is poisoned.
|
||||
/// \retval 0 Address is not poisoned.
|
||||
int __asan_address_is_poisoned(void const volatile *addr);
|
||||
int SANITIZER_CDECL __asan_address_is_poisoned(void const volatile *addr);
|
||||
|
||||
/// Checks if a region is poisoned.
|
||||
///
|
||||
|
@ -95,14 +104,14 @@ int __asan_address_is_poisoned(void const volatile *addr);
|
|||
/// \param beg Start of memory region.
|
||||
/// \param size Start of memory region.
|
||||
/// \returns Address of first poisoned byte.
|
||||
void *__asan_region_is_poisoned(void *beg, size_t size);
|
||||
void *SANITIZER_CDECL __asan_region_is_poisoned(void *beg, size_t size);
|
||||
|
||||
/// Describes an address (useful for calling from the debugger).
|
||||
///
|
||||
/// Prints the description of <c><i>addr</i></c>.
|
||||
///
|
||||
/// \param addr Address to describe.
|
||||
void __asan_describe_address(void *addr);
|
||||
void SANITIZER_CDECL __asan_describe_address(void *addr);
|
||||
|
||||
/// Checks if an error has been or is being reported (useful for calling from
|
||||
/// the debugger to get information about an ASan error).
|
||||
|
@ -111,7 +120,7 @@ void __asan_describe_address(void *addr);
|
|||
///
|
||||
/// \returns 1 if an error has been (or is being) reported. Otherwise returns
|
||||
/// 0.
|
||||
int __asan_report_present(void);
|
||||
int SANITIZER_CDECL __asan_report_present(void);
|
||||
|
||||
/// Gets the PC (program counter) register value of an ASan error (useful for
|
||||
/// calling from the debugger).
|
||||
|
@ -120,7 +129,7 @@ int __asan_report_present(void);
|
|||
/// Otherwise returns 0.
|
||||
///
|
||||
/// \returns PC value.
|
||||
void *__asan_get_report_pc(void);
|
||||
void *SANITIZER_CDECL __asan_get_report_pc(void);
|
||||
|
||||
/// Gets the BP (base pointer) register value of an ASan error (useful for
|
||||
/// calling from the debugger).
|
||||
|
@ -129,7 +138,7 @@ void *__asan_get_report_pc(void);
|
|||
/// Otherwise returns 0.
|
||||
///
|
||||
/// \returns BP value.
|
||||
void *__asan_get_report_bp(void);
|
||||
void *SANITIZER_CDECL __asan_get_report_bp(void);
|
||||
|
||||
/// Gets the SP (stack pointer) register value of an ASan error (useful for
|
||||
/// calling from the debugger).
|
||||
|
@ -138,7 +147,7 @@ void *__asan_get_report_bp(void);
|
|||
/// Otherwise returns 0.
|
||||
///
|
||||
/// \returns SP value.
|
||||
void *__asan_get_report_sp(void);
|
||||
void *SANITIZER_CDECL __asan_get_report_sp(void);
|
||||
|
||||
/// Gets the address of the report buffer of an ASan error (useful for calling
|
||||
/// from the debugger).
|
||||
|
@ -147,7 +156,7 @@ void *__asan_get_report_sp(void);
|
|||
/// reported. Otherwise returns 0.
|
||||
///
|
||||
/// \returns Address of report buffer.
|
||||
void *__asan_get_report_address(void);
|
||||
void *SANITIZER_CDECL __asan_get_report_address(void);
|
||||
|
||||
/// Gets access type of an ASan error (useful for calling from the debugger).
|
||||
///
|
||||
|
@ -155,7 +164,7 @@ void *__asan_get_report_address(void);
|
|||
/// reported. Otherwise returns 0.
|
||||
///
|
||||
/// \returns Access type (0 = read, 1 = write).
|
||||
int __asan_get_report_access_type(void);
|
||||
int SANITIZER_CDECL __asan_get_report_access_type(void);
|
||||
|
||||
/// Gets access size of an ASan error (useful for calling from the debugger).
|
||||
///
|
||||
|
@ -163,7 +172,7 @@ int __asan_get_report_access_type(void);
|
|||
/// returns 0.
|
||||
///
|
||||
/// \returns Access size in bytes.
|
||||
size_t __asan_get_report_access_size(void);
|
||||
size_t SANITIZER_CDECL __asan_get_report_access_size(void);
|
||||
|
||||
/// Gets the bug description of an ASan error (useful for calling from a
|
||||
/// debugger).
|
||||
|
@ -171,7 +180,7 @@ size_t __asan_get_report_access_size(void);
|
|||
/// \returns Returns a bug description if an error has been (or is being)
|
||||
/// reported - for example, "heap-use-after-free". Otherwise returns an empty
|
||||
/// string.
|
||||
const char *__asan_get_report_description(void);
|
||||
const char *SANITIZER_CDECL __asan_get_report_description(void);
|
||||
|
||||
/// Gets information about a pointer (useful for calling from the debugger).
|
||||
///
|
||||
|
@ -192,8 +201,10 @@ const char *__asan_get_report_description(void);
|
|||
/// \param[out] region_size Size of the region in bytes.
|
||||
///
|
||||
/// \returns Returns the category of the given pointer as a constant string.
|
||||
const char *__asan_locate_address(void *addr, char *name, size_t name_size,
|
||||
void **region_address, size_t *region_size);
|
||||
const char *SANITIZER_CDECL __asan_locate_address(void *addr, char *name,
|
||||
size_t name_size,
|
||||
void **region_address,
|
||||
size_t *region_size);
|
||||
|
||||
/// Gets the allocation stack trace and thread ID for a heap address (useful
|
||||
/// for calling from the debugger).
|
||||
|
@ -207,8 +218,8 @@ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
|
|||
/// \param[out] thread_id The thread ID of the address.
|
||||
///
|
||||
/// \returns Returns the number of stored frames or 0 on error.
|
||||
size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
|
||||
int *thread_id);
|
||||
size_t SANITIZER_CDECL __asan_get_alloc_stack(void *addr, void **trace,
|
||||
size_t size, int *thread_id);
|
||||
|
||||
/// Gets the free stack trace and thread ID for a heap address (useful for
|
||||
/// calling from the debugger).
|
||||
|
@ -222,15 +233,16 @@ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
|
|||
/// \param[out] thread_id The thread ID of the address.
|
||||
///
|
||||
/// \returns Returns the number of stored frames or 0 on error.
|
||||
size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
|
||||
int *thread_id);
|
||||
size_t SANITIZER_CDECL __asan_get_free_stack(void *addr, void **trace,
|
||||
size_t size, int *thread_id);
|
||||
|
||||
/// Gets the current shadow memory mapping (useful for calling from the
|
||||
/// debugger).
|
||||
///
|
||||
/// \param[out] shadow_scale Shadow scale value.
|
||||
/// \param[out] shadow_offset Offset value.
|
||||
void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
|
||||
void SANITIZER_CDECL __asan_get_shadow_mapping(size_t *shadow_scale,
|
||||
size_t *shadow_offset);
|
||||
|
||||
/// This is an internal function that is called to report an error. However,
|
||||
/// it is still a part of the interface because you might want to set a
|
||||
|
@ -242,29 +254,31 @@ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
|
|||
/// \param addr Address of the ASan error.
|
||||
/// \param is_write True if the error is a write error; false otherwise.
|
||||
/// \param access_size Size of the memory access of the ASan error.
|
||||
void __asan_report_error(void *pc, void *bp, void *sp,
|
||||
void *addr, int is_write, size_t access_size);
|
||||
void SANITIZER_CDECL __asan_report_error(void *pc, void *bp, void *sp,
|
||||
void *addr, int is_write,
|
||||
size_t access_size);
|
||||
|
||||
// Deprecated. Call __sanitizer_set_death_callback instead.
|
||||
void __asan_set_death_callback(void (*callback)(void));
|
||||
void SANITIZER_CDECL __asan_set_death_callback(void (*callback)(void));
|
||||
|
||||
/// Sets the callback function to be called during ASan error reporting.
|
||||
///
|
||||
/// The callback provides a string pointer to the report.
|
||||
///
|
||||
/// \param callback User-provided function.
|
||||
void __asan_set_error_report_callback(void (*callback)(const char *));
|
||||
void SANITIZER_CDECL
|
||||
__asan_set_error_report_callback(void (*callback)(const char *));
|
||||
|
||||
/// User-provided callback on ASan errors.
|
||||
///
|
||||
/// You can provide a function that would be called immediately when ASan
|
||||
/// detects an error. This is useful in cases when ASan detects an error but
|
||||
/// your program crashes before the ASan report is printed.
|
||||
void __asan_on_error(void);
|
||||
void SANITIZER_CDECL __asan_on_error(void);
|
||||
|
||||
/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
|
||||
/// debugger).
|
||||
void __asan_print_accumulated_stats(void);
|
||||
void SANITIZER_CDECL __asan_print_accumulated_stats(void);
|
||||
|
||||
/// User-provided default option settings.
|
||||
///
|
||||
|
@ -273,7 +287,7 @@ void __asan_print_accumulated_stats(void);
|
|||
/// <c>verbosity=1:halt_on_error=0</c>).
|
||||
///
|
||||
/// \returns Default options string.
|
||||
const char* __asan_default_options(void);
|
||||
const char *SANITIZER_CDECL __asan_default_options(void);
|
||||
|
||||
// The following two functions facilitate garbage collection in presence of
|
||||
// ASan's fake stack.
|
||||
|
@ -285,7 +299,7 @@ const char* __asan_default_options(void);
|
|||
/// does not have a fake stack.
|
||||
///
|
||||
/// \returns An opaque handler to the fake stack or NULL.
|
||||
void *__asan_get_current_fake_stack(void);
|
||||
void *SANITIZER_CDECL __asan_get_current_fake_stack(void);
|
||||
|
||||
/// Checks if an address belongs to a given fake stack.
|
||||
///
|
||||
|
@ -305,22 +319,22 @@ void *__asan_get_current_fake_stack(void);
|
|||
/// \param[out] beg Beginning of fake frame.
|
||||
/// \param[out] end End of fake frame.
|
||||
/// \returns Stack address or NULL.
|
||||
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
|
||||
void **end);
|
||||
void *SANITIZER_CDECL __asan_addr_is_in_fake_stack(void *fake_stack, void *addr,
|
||||
void **beg, void **end);
|
||||
|
||||
/// Performs shadow memory cleanup of the current thread's stack before a
|
||||
/// function marked with the <c>[[noreturn]]</c> attribute is called.
|
||||
///
|
||||
/// To avoid false positives on the stack, must be called before no-return
|
||||
/// functions like <c>_exit()</c> and <c>execl()</c>.
|
||||
void __asan_handle_no_return(void);
|
||||
void SANITIZER_CDECL __asan_handle_no_return(void);
|
||||
|
||||
/// Update allocation stack trace for the given allocation to the current stack
|
||||
/// trace. Returns 1 if successful, 0 if not.
|
||||
int __asan_update_allocation_context(void* addr);
|
||||
int SANITIZER_CDECL __asan_update_allocation_context(void *addr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_ASAN_INTERFACE_H
|
||||
#endif // SANITIZER_ASAN_INTERFACE_H
|
||||
|
|
|
@ -15,9 +15,12 @@
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// GCC does not understand __has_feature.
|
||||
#if !defined(__has_feature)
|
||||
#define __has_feature(x) 0
|
||||
// Windows allows a user to set their default calling convention, but we always
|
||||
// use __cdecl
|
||||
#ifdef _WIN32
|
||||
#define SANITIZER_CDECL __cdecl
|
||||
#else
|
||||
#define SANITIZER_CDECL
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -39,71 +42,73 @@ typedef struct {
|
|||
} __sanitizer_sandbox_arguments;
|
||||
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
void SANITIZER_CDECL __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
void SANITIZER_CDECL __sanitizer_set_report_fd(void *fd);
|
||||
// Get the current full report file path, if a path was specified by
|
||||
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
|
||||
const char *__sanitizer_get_report_path();
|
||||
const char *SANITIZER_CDECL __sanitizer_get_report_path();
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on. The reserved
|
||||
// parameter will be used in the future to hold a structure with functions
|
||||
// that the tools may call to bypass the sandbox.
|
||||
void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
void SANITIZER_CDECL
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
void __sanitizer_report_error_summary(const char *error_summary);
|
||||
void SANITIZER_CDECL
|
||||
__sanitizer_report_error_summary(const char *error_summary);
|
||||
|
||||
// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen
|
||||
// in unaligned loads/stores. To find such bugs reliably, you need to replace
|
||||
// plain unaligned loads/stores with these calls.
|
||||
|
||||
/// Loads a 16-bit unaligned value.
|
||||
///
|
||||
//
|
||||
/// \param p Pointer to unaligned memory.
|
||||
///
|
||||
/// \returns Loaded value.
|
||||
uint16_t __sanitizer_unaligned_load16(const void *p);
|
||||
uint16_t SANITIZER_CDECL __sanitizer_unaligned_load16(const void *p);
|
||||
|
||||
/// Loads a 32-bit unaligned value.
|
||||
///
|
||||
/// \param p Pointer to unaligned memory.
|
||||
///
|
||||
/// \returns Loaded value.
|
||||
uint32_t __sanitizer_unaligned_load32(const void *p);
|
||||
uint32_t SANITIZER_CDECL __sanitizer_unaligned_load32(const void *p);
|
||||
|
||||
/// Loads a 64-bit unaligned value.
|
||||
///
|
||||
/// \param p Pointer to unaligned memory.
|
||||
///
|
||||
/// \returns Loaded value.
|
||||
uint64_t __sanitizer_unaligned_load64(const void *p);
|
||||
uint64_t SANITIZER_CDECL __sanitizer_unaligned_load64(const void *p);
|
||||
|
||||
/// Stores a 16-bit unaligned value.
|
||||
///
|
||||
/// \param p Pointer to unaligned memory.
|
||||
/// \param x 16-bit value to store.
|
||||
void __sanitizer_unaligned_store16(void *p, uint16_t x);
|
||||
void SANITIZER_CDECL __sanitizer_unaligned_store16(void *p, uint16_t x);
|
||||
|
||||
/// Stores a 32-bit unaligned value.
|
||||
///
|
||||
/// \param p Pointer to unaligned memory.
|
||||
/// \param x 32-bit value to store.
|
||||
void __sanitizer_unaligned_store32(void *p, uint32_t x);
|
||||
void SANITIZER_CDECL __sanitizer_unaligned_store32(void *p, uint32_t x);
|
||||
|
||||
/// Stores a 64-bit unaligned value.
|
||||
///
|
||||
/// \param p Pointer to unaligned memory.
|
||||
/// \param x 64-bit value to store.
|
||||
void __sanitizer_unaligned_store64(void *p, uint64_t x);
|
||||
void SANITIZER_CDECL __sanitizer_unaligned_store64(void *p, uint64_t x);
|
||||
|
||||
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
|
||||
// to ensure only one report is printed when multiple errors occur
|
||||
// simultaneously.
|
||||
int __sanitizer_acquire_crash_state();
|
||||
int SANITIZER_CDECL __sanitizer_acquire_crash_state();
|
||||
|
||||
/// Annotates the current state of a contiguous container, such as
|
||||
/// <c>std::vector</c>, <c>std::string</c>, or similar.
|
||||
|
@ -129,35 +134,30 @@ int __sanitizer_acquire_crash_state();
|
|||
/// state <c>mid == end</c>, so that should be the final state when the
|
||||
/// container is destroyed or when the container reallocates the storage.
|
||||
///
|
||||
/// For ASan, <c><i>beg</i></c> should be 8-aligned and <c><i>end</i></c>
|
||||
/// should be either 8-aligned or it should point to the end of a separate
|
||||
/// heap-, stack-, or global-allocated buffer. So the following example will
|
||||
/// not work:
|
||||
/// For ASan, <c><i>beg</i></c> no longer needs to be 8-aligned,
|
||||
/// first and last granule may be shared with other objects
|
||||
/// and therefore the function can be used for any allocator.
|
||||
///
|
||||
/// The following example shows how to use the function:
|
||||
///
|
||||
/// \code
|
||||
/// int64_t x[2]; // 16 bytes, 8-aligned
|
||||
/// char *beg = (char *)&x[0];
|
||||
/// char *end = beg + 12; // Not 8-aligned, not the end of the buffer
|
||||
/// \endcode
|
||||
///
|
||||
/// The following, however, will work:
|
||||
/// \code
|
||||
/// int32_t x[3]; // 12 bytes, but 8-aligned under ASan.
|
||||
/// int32_t x[3]; // 12 bytes
|
||||
/// char *beg = (char*)&x[0];
|
||||
/// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer
|
||||
/// char *end = beg + 12;
|
||||
/// __sanitizer_annotate_contiguous_container(beg, end, beg, end);
|
||||
/// \endcode
|
||||
///
|
||||
/// \note Use this function with caution and do not use for anything other
|
||||
/// than vector-like classes.
|
||||
/// \note Unaligned <c><i>beg</i></c> or <c><i>end</i></c> may miss bugs in
|
||||
/// these granules.
|
||||
///
|
||||
/// \param beg Beginning of memory region.
|
||||
/// \param end End of memory region.
|
||||
/// \param old_mid Old middle of memory region.
|
||||
/// \param new_mid New middle of memory region.
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg,
|
||||
const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
void SANITIZER_CDECL __sanitizer_annotate_contiguous_container(
|
||||
const void *beg, const void *end, const void *old_mid, const void *new_mid);
|
||||
|
||||
/// Similar to <c>__sanitizer_annotate_contiguous_container</c>.
|
||||
///
|
||||
|
@ -188,7 +188,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg,
|
|||
/// \param old_container_end End of used region.
|
||||
/// \param new_container_beg New beginning of used region.
|
||||
/// \param new_container_end New end of used region.
|
||||
void __sanitizer_annotate_double_ended_contiguous_container(
|
||||
void SANITIZER_CDECL __sanitizer_annotate_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *storage_end,
|
||||
const void *old_container_beg, const void *old_container_end,
|
||||
const void *new_container_beg, const void *new_container_end);
|
||||
|
@ -209,8 +209,9 @@ void __sanitizer_annotate_double_ended_contiguous_container(
|
|||
///
|
||||
/// \returns True if the contiguous container <c>[beg, end)</c> is properly
|
||||
/// poisoned.
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
int SANITIZER_CDECL __sanitizer_verify_contiguous_container(const void *beg,
|
||||
const void *mid,
|
||||
const void *end);
|
||||
|
||||
/// Returns true if the double ended contiguous
|
||||
/// container <c>[storage_beg, storage_end)</c> is properly poisoned.
|
||||
|
@ -233,7 +234,7 @@ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
|||
/// \returns True if the double-ended contiguous container <c>[storage_beg,
|
||||
/// container_beg, container_end, end)</c> is properly poisoned - only
|
||||
/// [container_beg; container_end) is addressable.
|
||||
int __sanitizer_verify_double_ended_contiguous_container(
|
||||
int SANITIZER_CDECL __sanitizer_verify_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
|
@ -247,9 +248,8 @@ int __sanitizer_verify_double_ended_contiguous_container(
|
|||
/// \param end Old end of memory region.
|
||||
///
|
||||
/// \returns The bad address or NULL.
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
||||
const void *mid,
|
||||
const void *end);
|
||||
const void *SANITIZER_CDECL __sanitizer_contiguous_container_find_bad_address(
|
||||
const void *beg, const void *mid, const void *end);
|
||||
|
||||
/// returns the address of the first improperly poisoned byte.
|
||||
///
|
||||
|
@ -261,13 +261,14 @@ const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
|||
/// \param storage_end End of memory region.
|
||||
///
|
||||
/// \returns The bad address or NULL.
|
||||
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *SANITIZER_CDECL
|
||||
__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
/// Prints the stack trace leading to this call (useful for calling from the
|
||||
/// debugger).
|
||||
void __sanitizer_print_stack_trace(void);
|
||||
void SANITIZER_CDECL __sanitizer_print_stack_trace(void);
|
||||
|
||||
// Symbolizes the supplied 'pc' using the format string 'fmt'.
|
||||
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
|
||||
|
@ -279,17 +280,20 @@ void __sanitizer_print_stack_trace(void);
|
|||
// Inlined frames can be removed with 'symbolize_inline_frames=0'.
|
||||
// The format syntax is described in
|
||||
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
|
||||
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
|
||||
size_t out_buf_size);
|
||||
void SANITIZER_CDECL __sanitizer_symbolize_pc(void *pc, const char *fmt,
|
||||
char *out_buf,
|
||||
size_t out_buf_size);
|
||||
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
|
||||
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
|
||||
char *out_buf, size_t out_buf_size);
|
||||
void SANITIZER_CDECL __sanitizer_symbolize_global(void *data_ptr,
|
||||
const char *fmt,
|
||||
char *out_buf,
|
||||
size_t out_buf_size);
|
||||
// Determine the return address.
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
#define __sanitizer_return_address() \
|
||||
__builtin_extract_return_addr(__builtin_return_address(0))
|
||||
#else
|
||||
extern "C" void *_ReturnAddress(void);
|
||||
void *SANITIZER_CDECL _ReturnAddress(void);
|
||||
#pragma intrinsic(_ReturnAddress)
|
||||
#define __sanitizer_return_address() _ReturnAddress()
|
||||
#endif
|
||||
|
@ -299,8 +303,7 @@ extern "C" void *_ReturnAddress(void);
|
|||
/// Passing 0 will unset the callback.
|
||||
///
|
||||
/// \param callback User-provided callback.
|
||||
void __sanitizer_set_death_callback(void (*callback)(void));
|
||||
|
||||
void SANITIZER_CDECL __sanitizer_set_death_callback(void (*callback)(void));
|
||||
|
||||
// Interceptor hooks.
|
||||
// Whenever a libc function interceptor is called, it checks if the
|
||||
|
@ -316,8 +319,10 @@ void __sanitizer_set_death_callback(void (*callback)(void));
|
|||
/// \param s2 Pointer to block of memory.
|
||||
/// \param n Number of bytes to compare.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
|
||||
const void *s2, size_t n, int result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_memcmp(void *called_pc,
|
||||
const void *s1,
|
||||
const void *s2, size_t n,
|
||||
int result);
|
||||
|
||||
/// Interceptor hook for <c>strncmp()</c>.
|
||||
///
|
||||
|
@ -326,8 +331,10 @@ void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
|
|||
/// \param s2 Pointer to block of memory.
|
||||
/// \param n Number of bytes to compare.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n, int result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strncmp(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2, size_t n,
|
||||
int result);
|
||||
|
||||
/// Interceptor hook for <c>strncasecmp()</c>.
|
||||
///
|
||||
|
@ -336,8 +343,10 @@ void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
|
|||
/// \param s2 Pointer to block of memory.
|
||||
/// \param n Number of bytes to compare.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n, int result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strncasecmp(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2, size_t n,
|
||||
int result);
|
||||
|
||||
/// Interceptor hook for <c>strcmp()</c>.
|
||||
///
|
||||
|
@ -345,8 +354,9 @@ void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
|
|||
/// \param s1 Pointer to block of memory.
|
||||
/// \param s2 Pointer to block of memory.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strcmp(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2, int result);
|
||||
|
||||
/// Interceptor hook for <c>strcasecmp()</c>.
|
||||
///
|
||||
|
@ -354,8 +364,10 @@ void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
|
|||
/// \param s1 Pointer to block of memory.
|
||||
/// \param s2 Pointer to block of memory.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strcasecmp(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2,
|
||||
int result);
|
||||
|
||||
/// Interceptor hook for <c>strstr()</c>.
|
||||
///
|
||||
|
@ -363,23 +375,27 @@ void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
|
|||
/// \param s1 Pointer to block of memory.
|
||||
/// \param s2 Pointer to block of memory.
|
||||
/// \param result Value returned by the intercepted function.
|
||||
void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strstr(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2, char *result);
|
||||
|
||||
void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_strcasestr(void *called_pc,
|
||||
const char *s1,
|
||||
const char *s2,
|
||||
char *result);
|
||||
|
||||
void __sanitizer_weak_hook_memmem(void *called_pc,
|
||||
const void *s1, size_t len1,
|
||||
const void *s2, size_t len2, void *result);
|
||||
void SANITIZER_CDECL __sanitizer_weak_hook_memmem(void *called_pc,
|
||||
const void *s1, size_t len1,
|
||||
const void *s2, size_t len2,
|
||||
void *result);
|
||||
|
||||
// Prints stack traces for all live heap allocations ordered by total
|
||||
// allocation size until top_percent of total live heap is shown. top_percent
|
||||
// should be between 1 and 100. At most max_number_of_contexts contexts
|
||||
// (stack traces) are printed.
|
||||
// Experimental feature currently available only with ASan on Linux/x86_64.
|
||||
void __sanitizer_print_memory_profile(size_t top_percent,
|
||||
size_t max_number_of_contexts);
|
||||
void SANITIZER_CDECL __sanitizer_print_memory_profile(
|
||||
size_t top_percent, size_t max_number_of_contexts);
|
||||
|
||||
/// Notify ASan that a fiber switch has started (required only if implementing
|
||||
/// your own fiber library).
|
||||
|
@ -408,8 +424,9 @@ void __sanitizer_print_memory_profile(size_t top_percent,
|
|||
/// \param[out] fake_stack_save Fake stack save location.
|
||||
/// \param bottom Bottom address of stack.
|
||||
/// \param size Size of stack in bytes.
|
||||
void __sanitizer_start_switch_fiber(void **fake_stack_save,
|
||||
const void *bottom, size_t size);
|
||||
void SANITIZER_CDECL __sanitizer_start_switch_fiber(void **fake_stack_save,
|
||||
const void *bottom,
|
||||
size_t size);
|
||||
|
||||
/// Notify ASan that a fiber switch has completed (required only if
|
||||
/// implementing your own fiber library).
|
||||
|
@ -422,18 +439,17 @@ void __sanitizer_start_switch_fiber(void **fake_stack_save,
|
|||
/// \param fake_stack_save Fake stack save location.
|
||||
/// \param[out] bottom_old Bottom address of old stack.
|
||||
/// \param[out] size_old Size of old stack in bytes.
|
||||
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
|
||||
const void **bottom_old,
|
||||
size_t *size_old);
|
||||
void SANITIZER_CDECL __sanitizer_finish_switch_fiber(void *fake_stack_save,
|
||||
const void **bottom_old,
|
||||
size_t *size_old);
|
||||
|
||||
// Get full module name and calculate pc offset within it.
|
||||
// Returns 1 if pc belongs to some module, 0 if module was not found.
|
||||
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
|
||||
size_t module_path_len,
|
||||
void **pc_offset);
|
||||
int SANITIZER_CDECL __sanitizer_get_module_and_offset_for_pc(
|
||||
void *pc, char *module_path, size_t module_path_len, void **pc_offset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
|
||||
#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
|
||||
|
|
|
@ -18,18 +18,19 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Record and dump coverage info.
|
||||
void __sanitizer_cov_dump(void);
|
||||
// Record and dump coverage info.
|
||||
void SANITIZER_CDECL __sanitizer_cov_dump(void);
|
||||
|
||||
// Clear collected coverage info.
|
||||
void __sanitizer_cov_reset(void);
|
||||
// Clear collected coverage info.
|
||||
void SANITIZER_CDECL __sanitizer_cov_reset(void);
|
||||
|
||||
// Dump collected coverage info. Sorts pcs by module into individual .sancov
|
||||
// files.
|
||||
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
|
||||
// Dump collected coverage info. Sorts pcs by module into individual .sancov
|
||||
// files.
|
||||
void SANITIZER_CDECL __sanitizer_dump_coverage(const uintptr_t *pcs,
|
||||
uintptr_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_COVERAG_INTERFACE_H
|
||||
#endif // SANITIZER_COVERAG_INTERFACE_H
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#ifndef DFSAN_INTERFACE_H
|
||||
#define DFSAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -25,29 +25,30 @@ typedef uint8_t dfsan_label;
|
|||
typedef uint32_t dfsan_origin;
|
||||
|
||||
/// Signature of the callback argument to dfsan_set_write_callback().
|
||||
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
|
||||
typedef void(SANITIZER_CDECL *dfsan_write_callback_t)(int fd, const void *buf,
|
||||
size_t count);
|
||||
|
||||
/// Signature of the callback argument to dfsan_set_conditional_callback().
|
||||
typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
|
||||
dfsan_origin origin);
|
||||
typedef void(SANITIZER_CDECL *dfsan_conditional_callback_t)(
|
||||
dfsan_label label, dfsan_origin origin);
|
||||
|
||||
/// Signature of the callback argument to dfsan_set_reaches_function_callback().
|
||||
/// The description is intended to hold the name of the variable.
|
||||
typedef void (*dfsan_reaches_function_callback_t)(dfsan_label label,
|
||||
dfsan_origin origin,
|
||||
const char *file,
|
||||
unsigned int line,
|
||||
const char *function);
|
||||
typedef void(SANITIZER_CDECL *dfsan_reaches_function_callback_t)(
|
||||
dfsan_label label, dfsan_origin origin, const char *file, unsigned int line,
|
||||
const char *function);
|
||||
|
||||
/// Computes the union of \c l1 and \c l2, resulting in a union label.
|
||||
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
|
||||
dfsan_label SANITIZER_CDECL dfsan_union(dfsan_label l1, dfsan_label l2);
|
||||
|
||||
/// Sets the label for each address in [addr,addr+size) to \c label.
|
||||
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
|
||||
void SANITIZER_CDECL dfsan_set_label(dfsan_label label, void *addr,
|
||||
size_t size);
|
||||
|
||||
/// Sets the label for each address in [addr,addr+size) to the union of the
|
||||
/// current label for that address and \c label.
|
||||
void dfsan_add_label(dfsan_label label, void *addr, size_t size);
|
||||
void SANITIZER_CDECL dfsan_add_label(dfsan_label label, void *addr,
|
||||
size_t size);
|
||||
|
||||
/// Retrieves the label associated with the given data.
|
||||
///
|
||||
|
@ -55,23 +56,24 @@ void dfsan_add_label(dfsan_label label, void *addr, size_t size);
|
|||
/// which can be truncated or extended (implicitly or explicitly) as necessary.
|
||||
/// The truncation/extension operations will preserve the label of the original
|
||||
/// value.
|
||||
dfsan_label dfsan_get_label(long data);
|
||||
dfsan_label SANITIZER_CDECL dfsan_get_label(long data);
|
||||
|
||||
/// Retrieves the immediate origin associated with the given data. The returned
|
||||
/// origin may point to another origin.
|
||||
///
|
||||
/// The type of 'data' is arbitrary.
|
||||
dfsan_origin dfsan_get_origin(long data);
|
||||
dfsan_origin SANITIZER_CDECL dfsan_get_origin(long data);
|
||||
|
||||
/// Retrieves the label associated with the data at the given address.
|
||||
dfsan_label dfsan_read_label(const void *addr, size_t size);
|
||||
dfsan_label SANITIZER_CDECL dfsan_read_label(const void *addr, size_t size);
|
||||
|
||||
/// Return the origin associated with the first taint byte in the size bytes
|
||||
/// from the address addr.
|
||||
dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
|
||||
dfsan_origin SANITIZER_CDECL dfsan_read_origin_of_first_taint(const void *addr,
|
||||
size_t size);
|
||||
|
||||
/// Returns whether the given label label contains the label elem.
|
||||
int dfsan_has_label(dfsan_label label, dfsan_label elem);
|
||||
/// Returns whether the given label contains the label elem.
|
||||
int SANITIZER_CDECL dfsan_has_label(dfsan_label label, dfsan_label elem);
|
||||
|
||||
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
|
||||
/// with the application memory. Use this call to start over the taint tracking
|
||||
|
@ -79,37 +81,39 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
|
|||
///
|
||||
/// Note: If another thread is working with tainted data during the flush, that
|
||||
/// taint could still be written to shadow after the flush.
|
||||
void dfsan_flush(void);
|
||||
void SANITIZER_CDECL dfsan_flush(void);
|
||||
|
||||
/// Sets a callback to be invoked on calls to write(). The callback is invoked
|
||||
/// before the write is done. The write is not guaranteed to succeed when the
|
||||
/// callback executes. Pass in NULL to remove any callback.
|
||||
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
||||
void SANITIZER_CDECL
|
||||
dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
||||
|
||||
/// Sets a callback to be invoked on any conditional expressions which have a
|
||||
/// taint label set. This can be used to find where tainted data influences
|
||||
/// the behavior of the program.
|
||||
/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
|
||||
void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
|
||||
void SANITIZER_CDECL
|
||||
dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
|
||||
|
||||
/// Conditional expressions occur during signal handlers.
|
||||
/// Making callbacks that handle signals well is tricky, so when
|
||||
/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
|
||||
/// handlers will add the labels they see into a global (bitwise-or together).
|
||||
/// This function returns all label bits seen in signal handler conditions.
|
||||
dfsan_label dfsan_get_labels_in_signal_conditional();
|
||||
dfsan_label SANITIZER_CDECL dfsan_get_labels_in_signal_conditional();
|
||||
|
||||
/// Sets a callback to be invoked when tainted data reaches a function.
|
||||
/// This could occur at function entry, or at a load instruction.
|
||||
/// These callbacks will only be added if -dfsan-reaches-function-callbacks=1.
|
||||
void dfsan_set_reaches_function_callback(
|
||||
dfsan_reaches_function_callback_t callback);
|
||||
void SANITIZER_CDECL
|
||||
dfsan_set_reaches_function_callback(dfsan_reaches_function_callback_t callback);
|
||||
|
||||
/// Making callbacks that handle signals well is tricky, so when
|
||||
/// -dfsan-reaches-function-callbacks=true, functions reached in signal
|
||||
/// handlers will add the labels they see into a global (bitwise-or together).
|
||||
/// This function returns all label bits seen during signal handlers.
|
||||
dfsan_label dfsan_get_labels_in_signal_reaches_function();
|
||||
dfsan_label SANITIZER_CDECL dfsan_get_labels_in_signal_reaches_function();
|
||||
|
||||
/// Interceptor hooks.
|
||||
/// Whenever a dfsan's custom function is called the corresponding
|
||||
|
@ -117,20 +121,25 @@ dfsan_label dfsan_get_labels_in_signal_reaches_function();
|
|||
/// The primary use case is taint-guided fuzzing, where the fuzzer
|
||||
/// needs to see the parameters of the function and the labels.
|
||||
/// FIXME: implement more hooks.
|
||||
void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
|
||||
size_t n, dfsan_label s1_label,
|
||||
dfsan_label s2_label, dfsan_label n_label);
|
||||
void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
|
||||
size_t n, dfsan_label s1_label,
|
||||
dfsan_label s2_label, dfsan_label n_label);
|
||||
void SANITIZER_CDECL dfsan_weak_hook_memcmp(void *caller_pc, const void *s1,
|
||||
const void *s2, size_t n,
|
||||
dfsan_label s1_label,
|
||||
dfsan_label s2_label,
|
||||
dfsan_label n_label);
|
||||
void SANITIZER_CDECL dfsan_weak_hook_strncmp(void *caller_pc, const char *s1,
|
||||
const char *s2, size_t n,
|
||||
dfsan_label s1_label,
|
||||
dfsan_label s2_label,
|
||||
dfsan_label n_label);
|
||||
|
||||
/// Prints the origin trace of the label at the address addr to stderr. It also
|
||||
/// prints description at the beginning of the trace. If origin tracking is not
|
||||
/// on, or the address is not labeled, it prints nothing.
|
||||
void dfsan_print_origin_trace(const void *addr, const char *description);
|
||||
void SANITIZER_CDECL dfsan_print_origin_trace(const void *addr,
|
||||
const char *description);
|
||||
/// As above, but use an origin id from dfsan_get_origin() instead of address.
|
||||
/// Does not include header line with taint label and address information.
|
||||
void dfsan_print_origin_id_trace(dfsan_origin origin);
|
||||
void SANITIZER_CDECL dfsan_print_origin_id_trace(dfsan_origin origin);
|
||||
|
||||
/// Prints the origin trace of the label at the address \p addr to a
|
||||
/// pre-allocated output buffer. If origin tracking is not on, or the address is
|
||||
|
@ -166,12 +175,15 @@ void dfsan_print_origin_id_trace(dfsan_origin origin);
|
|||
/// \returns The number of symbols that should have been written to \p out_buf
|
||||
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
|
||||
/// return value is not less than \p out_buf_size.
|
||||
size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
|
||||
char *out_buf, size_t out_buf_size);
|
||||
size_t SANITIZER_CDECL dfsan_sprint_origin_trace(const void *addr,
|
||||
const char *description,
|
||||
char *out_buf,
|
||||
size_t out_buf_size);
|
||||
/// As above, but use an origin id from dfsan_get_origin() instead of address.
|
||||
/// Does not include header line with taint label and address information.
|
||||
size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
|
||||
size_t out_buf_size);
|
||||
size_t SANITIZER_CDECL dfsan_sprint_origin_id_trace(dfsan_origin origin,
|
||||
char *out_buf,
|
||||
size_t out_buf_size);
|
||||
|
||||
/// Prints the stack trace leading to this call to a pre-allocated output
|
||||
/// buffer.
|
||||
|
@ -184,19 +196,20 @@ size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
|
|||
/// \returns The number of symbols that should have been written to \p out_buf
|
||||
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
|
||||
/// return value is not less than \p out_buf_size.
|
||||
size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size);
|
||||
size_t SANITIZER_CDECL dfsan_sprint_stack_trace(char *out_buf,
|
||||
size_t out_buf_size);
|
||||
|
||||
/// Retrieves the very first origin associated with the data at the given
|
||||
/// address.
|
||||
dfsan_origin dfsan_get_init_origin(const void *addr);
|
||||
dfsan_origin SANITIZER_CDECL dfsan_get_init_origin(const void *addr);
|
||||
|
||||
/// Returns the value of -dfsan-track-origins.
|
||||
/// * 0: do not track origins.
|
||||
/// * 1: track origins at memory store operations.
|
||||
/// * 2: track origins at memory load and store operations.
|
||||
int dfsan_get_track_origins(void);
|
||||
int SANITIZER_CDECL dfsan_get_track_origins(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
||||
template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
|
||||
dfsan_set_label(label, (void *)&data, sizeof(T));
|
||||
|
@ -204,4 +217,4 @@ template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
|
|||
|
||||
#endif
|
||||
|
||||
#endif // DFSAN_INTERFACE_H
|
||||
#endif // DFSAN_INTERFACE_H
|
||||
|
|
|
@ -18,82 +18,88 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// Libc hook for program startup in statically linked executables.
|
||||
// Initializes enough of the runtime to run instrumented code. This function
|
||||
// should only be called in statically linked executables because it modifies
|
||||
// the GOT, which won't work in regular binaries because RELRO will already
|
||||
// have been applied by the time the function is called. This also means that
|
||||
// the function should be called before libc applies RELRO.
|
||||
// Does not call libc unless there is an error.
|
||||
// Can be called multiple times.
|
||||
void __hwasan_init_static(void);
|
||||
// Libc hook for program startup in statically linked executables.
|
||||
// Initializes enough of the runtime to run instrumented code. This function
|
||||
// should only be called in statically linked executables because it modifies
|
||||
// the GOT, which won't work in regular binaries because RELRO will already
|
||||
// have been applied by the time the function is called. This also means that
|
||||
// the function should be called before libc applies RELRO.
|
||||
// Does not call libc unless there is an error.
|
||||
// Can be called multiple times.
|
||||
void SANITIZER_CDECL __hwasan_init_static(void);
|
||||
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing HWASan runtime options. See asan_flags.h for details.
|
||||
const char* __hwasan_default_options(void);
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing HWASan runtime options. See asan_flags.h for details.
|
||||
const char *SANITIZER_CDECL __hwasan_default_options(void);
|
||||
|
||||
void __hwasan_enable_allocator_tagging(void);
|
||||
void __hwasan_disable_allocator_tagging(void);
|
||||
void SANITIZER_CDECL __hwasan_enable_allocator_tagging(void);
|
||||
void SANITIZER_CDECL __hwasan_disable_allocator_tagging(void);
|
||||
|
||||
// Mark region of memory with the given tag. Both address and size need to be
|
||||
// 16-byte aligned.
|
||||
void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
|
||||
size_t size);
|
||||
// Mark region of memory with the given tag. Both address and size need to be
|
||||
// 16-byte aligned.
|
||||
void SANITIZER_CDECL __hwasan_tag_memory(const volatile void *p,
|
||||
unsigned char tag, size_t size);
|
||||
|
||||
/// Set pointer tag. Previous tag is lost.
|
||||
void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
|
||||
/// Set pointer tag. Previous tag is lost.
|
||||
void *SANITIZER_CDECL __hwasan_tag_pointer(const volatile void *p,
|
||||
unsigned char tag);
|
||||
|
||||
// Set memory tag from the current SP address to the given address to zero.
|
||||
// This is meant to annotate longjmp and other non-local jumps.
|
||||
// This function needs to know the (almost) exact destination frame address;
|
||||
// clearing shadow for the entire thread stack like __asan_handle_no_return
|
||||
// does would cause false reports.
|
||||
void __hwasan_handle_longjmp(const void *sp_dst);
|
||||
// Set memory tag from the current SP address to the given address to zero.
|
||||
// This is meant to annotate longjmp and other non-local jumps.
|
||||
// This function needs to know the (almost) exact destination frame address;
|
||||
// clearing shadow for the entire thread stack like __asan_handle_no_return
|
||||
// does would cause false reports.
|
||||
void SANITIZER_CDECL __hwasan_handle_longjmp(const void *sp_dst);
|
||||
|
||||
// Set memory tag for the part of the current thread stack below sp_dst to
|
||||
// zero. Call this in vfork() before returning in the parent process.
|
||||
void __hwasan_handle_vfork(const void *sp_dst);
|
||||
// Set memory tag for the part of the current thread stack below sp_dst to
|
||||
// zero. Call this in vfork() before returning in the parent process.
|
||||
void SANITIZER_CDECL __hwasan_handle_vfork(const void *sp_dst);
|
||||
|
||||
// Libc hook for thread creation. Should be called in the child thread before
|
||||
// any instrumented code.
|
||||
void __hwasan_thread_enter();
|
||||
// Libc hook for thread creation. Should be called in the child thread before
|
||||
// any instrumented code.
|
||||
void SANITIZER_CDECL __hwasan_thread_enter();
|
||||
|
||||
// Libc hook for thread destruction. No instrumented code should run after
|
||||
// this call.
|
||||
void __hwasan_thread_exit();
|
||||
// Libc hook for thread destruction. No instrumented code should run after
|
||||
// this call.
|
||||
void SANITIZER_CDECL __hwasan_thread_exit();
|
||||
|
||||
// Print shadow and origin for the memory range to stderr in a human-readable
|
||||
// format.
|
||||
void __hwasan_print_shadow(const volatile void *x, size_t size);
|
||||
// Print shadow and origin for the memory range to stderr in a human-readable
|
||||
// format.
|
||||
void SANITIZER_CDECL __hwasan_print_shadow(const volatile void *x, size_t size);
|
||||
|
||||
// Print one-line report about the memory usage of the current process.
|
||||
void __hwasan_print_memory_usage();
|
||||
// Print one-line report about the memory usage of the current process.
|
||||
void SANITIZER_CDECL __hwasan_print_memory_usage();
|
||||
|
||||
/* Returns the offset of the first byte in the memory range that can not be
|
||||
* accessed through the pointer in x, or -1 if the whole range is good. */
|
||||
intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
|
||||
/* Returns the offset of the first byte in the memory range that can not be
|
||||
* accessed through the pointer in x, or -1 if the whole range is good. */
|
||||
intptr_t SANITIZER_CDECL __hwasan_test_shadow(const volatile void *x,
|
||||
size_t size);
|
||||
|
||||
/* Sets the callback function to be called during HWASan error reporting. */
|
||||
void __hwasan_set_error_report_callback(void (*callback)(const char *));
|
||||
/* Sets the callback function to be called during HWASan error reporting. */
|
||||
void SANITIZER_CDECL
|
||||
__hwasan_set_error_report_callback(void (*callback)(const char *));
|
||||
|
||||
int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
|
||||
void * __sanitizer_memalign(size_t alignment, size_t size);
|
||||
void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
|
||||
void * __sanitizer___libc_memalign(size_t alignment, size_t size);
|
||||
void * __sanitizer_valloc(size_t size);
|
||||
void * __sanitizer_pvalloc(size_t size);
|
||||
void __sanitizer_free(void *ptr);
|
||||
void __sanitizer_cfree(void *ptr);
|
||||
size_t __sanitizer_malloc_usable_size(const void *ptr);
|
||||
struct mallinfo __sanitizer_mallinfo();
|
||||
int __sanitizer_mallopt(int cmd, int value);
|
||||
void __sanitizer_malloc_stats(void);
|
||||
void * __sanitizer_calloc(size_t nmemb, size_t size);
|
||||
void * __sanitizer_realloc(void *ptr, size_t size);
|
||||
void * __sanitizer_reallocarray(void *ptr, size_t nmemb, size_t size);
|
||||
void * __sanitizer_malloc(size_t size);
|
||||
int SANITIZER_CDECL __sanitizer_posix_memalign(void **memptr, size_t alignment,
|
||||
size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_memalign(size_t alignment, size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_aligned_alloc(size_t alignment, size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer___libc_memalign(size_t alignment,
|
||||
size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_valloc(size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_pvalloc(size_t size);
|
||||
void SANITIZER_CDECL __sanitizer_free(void *ptr);
|
||||
void SANITIZER_CDECL __sanitizer_cfree(void *ptr);
|
||||
size_t SANITIZER_CDECL __sanitizer_malloc_usable_size(const void *ptr);
|
||||
struct mallinfo SANITIZER_CDECL __sanitizer_mallinfo();
|
||||
int SANITIZER_CDECL __sanitizer_mallopt(int cmd, int value);
|
||||
void SANITIZER_CDECL __sanitizer_malloc_stats(void);
|
||||
void *SANITIZER_CDECL __sanitizer_calloc(size_t nmemb, size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_realloc(void *ptr, size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_reallocarray(void *ptr, size_t nmemb,
|
||||
size_t size);
|
||||
void *SANITIZER_CDECL __sanitizer_malloc(size_t size);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_HWASAN_INTERFACE_H
|
||||
#endif // SANITIZER_HWASAN_INTERFACE_H
|
||||
|
|
|
@ -18,72 +18,72 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// Allocations made between calls to __lsan_disable() and __lsan_enable() will
|
||||
// be treated as non-leaks. Disable/enable pairs may be nested.
|
||||
void __lsan_disable(void);
|
||||
void __lsan_enable(void);
|
||||
// Allocations made between calls to __lsan_disable() and __lsan_enable() will
|
||||
// be treated as non-leaks. Disable/enable pairs may be nested.
|
||||
void SANITIZER_CDECL __lsan_disable(void);
|
||||
void SANITIZER_CDECL __lsan_enable(void);
|
||||
|
||||
// The heap object into which p points will be treated as a non-leak.
|
||||
void __lsan_ignore_object(const void *p);
|
||||
// The heap object into which p points will be treated as a non-leak.
|
||||
void SANITIZER_CDECL __lsan_ignore_object(const void *p);
|
||||
|
||||
// Memory regions registered through this interface will be treated as sources
|
||||
// of live pointers during leak checking. Useful if you store pointers in
|
||||
// mapped memory.
|
||||
// Points of note:
|
||||
// - __lsan_unregister_root_region() must be called with the same pointer and
|
||||
// size that have earlier been passed to __lsan_register_root_region()
|
||||
// - LSan will skip any inaccessible memory when scanning a root region. E.g.,
|
||||
// if you map memory within a larger region that you have mprotect'ed, you can
|
||||
// register the entire large region.
|
||||
// - the implementation is not optimized for performance. This interface is
|
||||
// intended to be used for a small number of relatively static regions.
|
||||
void __lsan_register_root_region(const void *p, size_t size);
|
||||
void __lsan_unregister_root_region(const void *p, size_t size);
|
||||
// Memory regions registered through this interface will be treated as sources
|
||||
// of live pointers during leak checking. Useful if you store pointers in
|
||||
// mapped memory.
|
||||
// Points of note:
|
||||
// - __lsan_unregister_root_region() must be called with the same pointer and
|
||||
// size that have earlier been passed to __lsan_register_root_region()
|
||||
// - LSan will skip any inaccessible memory when scanning a root region. E.g.,
|
||||
// if you map memory within a larger region that you have mprotect'ed, you can
|
||||
// register the entire large region.
|
||||
// - the implementation is not optimized for performance. This interface is
|
||||
// intended to be used for a small number of relatively static regions.
|
||||
void SANITIZER_CDECL __lsan_register_root_region(const void *p, size_t size);
|
||||
void SANITIZER_CDECL __lsan_unregister_root_region(const void *p, size_t size);
|
||||
|
||||
// Check for leaks now. This function behaves identically to the default
|
||||
// end-of-process leak check. In particular, it will terminate the process if
|
||||
// leaks are found and the exitcode runtime flag is non-zero.
|
||||
// Subsequent calls to this function will have no effect and end-of-process
|
||||
// leak check will not run. Effectively, end-of-process leak check is moved to
|
||||
// the time of first invocation of this function.
|
||||
// By calling this function early during process shutdown, you can instruct
|
||||
// LSan to ignore shutdown-only leaks which happen later on.
|
||||
void __lsan_do_leak_check(void);
|
||||
// Check for leaks now. This function behaves identically to the default
|
||||
// end-of-process leak check. In particular, it will terminate the process if
|
||||
// leaks are found and the exitcode runtime flag is non-zero.
|
||||
// Subsequent calls to this function will have no effect and end-of-process
|
||||
// leak check will not run. Effectively, end-of-process leak check is moved to
|
||||
// the time of first invocation of this function.
|
||||
// By calling this function early during process shutdown, you can instruct
|
||||
// LSan to ignore shutdown-only leaks which happen later on.
|
||||
void SANITIZER_CDECL __lsan_do_leak_check(void);
|
||||
|
||||
// Check for leaks now. Returns zero if no leaks have been found or if leak
|
||||
// detection is disabled, non-zero otherwise.
|
||||
// This function may be called repeatedly, e.g. to periodically check a
|
||||
// long-running process. It prints a leak report if appropriate, but does not
|
||||
// terminate the process. It does not affect the behavior of
|
||||
// __lsan_do_leak_check() or the end-of-process leak check, and is not
|
||||
// affected by them.
|
||||
int __lsan_do_recoverable_leak_check(void);
|
||||
// Check for leaks now. Returns zero if no leaks have been found or if leak
|
||||
// detection is disabled, non-zero otherwise.
|
||||
// This function may be called repeatedly, e.g. to periodically check a
|
||||
// long-running process. It prints a leak report if appropriate, but does not
|
||||
// terminate the process. It does not affect the behavior of
|
||||
// __lsan_do_leak_check() or the end-of-process leak check, and is not
|
||||
// affected by them.
|
||||
int SANITIZER_CDECL __lsan_do_recoverable_leak_check(void);
|
||||
|
||||
// The user may optionally provide this function to disallow leak checking
|
||||
// for the program it is linked into (if the return value is non-zero). This
|
||||
// function must be defined as returning a constant value; any behavior beyond
|
||||
// that is unsupported.
|
||||
// To avoid dead stripping, you may need to define this function with
|
||||
// __attribute__((used))
|
||||
int __lsan_is_turned_off(void);
|
||||
// The user may optionally provide this function to disallow leak checking
|
||||
// for the program it is linked into (if the return value is non-zero). This
|
||||
// function must be defined as returning a constant value; any behavior beyond
|
||||
// that is unsupported.
|
||||
// To avoid dead stripping, you may need to define this function with
|
||||
// __attribute__((used))
|
||||
int SANITIZER_CDECL __lsan_is_turned_off(void);
|
||||
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing LSan runtime options. See lsan_flags.inc for details.
|
||||
const char *__lsan_default_options(void);
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing LSan runtime options. See lsan_flags.inc for details.
|
||||
const char *SANITIZER_CDECL __lsan_default_options(void);
|
||||
|
||||
// This function may be optionally provided by the user and should return
|
||||
// a string containing LSan suppressions.
|
||||
const char *__lsan_default_suppressions(void);
|
||||
// This function may be optionally provided by the user and should return
|
||||
// a string containing LSan suppressions.
|
||||
const char *SANITIZER_CDECL __lsan_default_suppressions(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
||||
namespace __lsan {
|
||||
class ScopedDisabler {
|
||||
public:
|
||||
public:
|
||||
ScopedDisabler() { __lsan_disable(); }
|
||||
~ScopedDisabler() { __lsan_enable(); }
|
||||
};
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_LSAN_INTERFACE_H
|
||||
#endif // SANITIZER_LSAN_INTERFACE_H
|
||||
|
|
|
@ -24,25 +24,26 @@ extern "C" {
|
|||
///
|
||||
/// \param addr Start of memory region.
|
||||
/// \param size Size of memory region.
|
||||
void __memprof_record_access_range(void const volatile *addr, size_t size);
|
||||
void SANITIZER_CDECL __memprof_record_access_range(void const volatile *addr,
|
||||
size_t size);
|
||||
|
||||
/// Records access to a memory address <c><i>addr</i></c>.
|
||||
///
|
||||
/// This memory must be previously allocated by your program.
|
||||
///
|
||||
/// \param addr Accessed memory address
|
||||
void __memprof_record_access(void const volatile *addr);
|
||||
void SANITIZER_CDECL __memprof_record_access(void const volatile *addr);
|
||||
|
||||
/// User-provided callback on MemProf errors.
|
||||
///
|
||||
/// You can provide a function that would be called immediately when MemProf
|
||||
/// detects an error. This is useful in cases when MemProf detects an error but
|
||||
/// your program crashes before the MemProf report is printed.
|
||||
void __memprof_on_error(void);
|
||||
void SANITIZER_CDECL __memprof_on_error(void);
|
||||
|
||||
/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
|
||||
/// debugger).
|
||||
void __memprof_print_accumulated_stats(void);
|
||||
void SANITIZER_CDECL __memprof_print_accumulated_stats(void);
|
||||
|
||||
/// User-provided default option settings.
|
||||
///
|
||||
|
@ -51,12 +52,12 @@ void __memprof_print_accumulated_stats(void);
|
|||
/// <c>verbosity=1:print_stats=1</c>).
|
||||
///
|
||||
/// \returns Default options string.
|
||||
const char *__memprof_default_options(void);
|
||||
const char *SANITIZER_CDECL __memprof_default_options(void);
|
||||
|
||||
/// Prints the memory profile to the current profile file.
|
||||
///
|
||||
/// \returns 0 on success.
|
||||
int __memprof_profile_dump(void);
|
||||
int SANITIZER_CDECL __memprof_profile_dump(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
|
|
@ -18,109 +18,118 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/* Set raw origin for the memory range. */
|
||||
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
|
||||
/* Set raw origin for the memory range. */
|
||||
void SANITIZER_CDECL __msan_set_origin(const volatile void *a, size_t size,
|
||||
uint32_t origin);
|
||||
|
||||
/* Get raw origin for an address. */
|
||||
uint32_t __msan_get_origin(const volatile void *a);
|
||||
/* Get raw origin for an address. */
|
||||
uint32_t SANITIZER_CDECL __msan_get_origin(const volatile void *a);
|
||||
|
||||
/* Test that this_id is a descendant of prev_id (or they are simply equal).
|
||||
* "descendant" here means they are part of the same chain, created with
|
||||
* __msan_chain_origin. */
|
||||
int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
|
||||
/* Test that this_id is a descendant of prev_id (or they are simply equal).
|
||||
* "descendant" here means they are part of the same chain, created with
|
||||
* __msan_chain_origin. */
|
||||
int SANITIZER_CDECL __msan_origin_is_descendant_or_same(uint32_t this_id,
|
||||
uint32_t prev_id);
|
||||
|
||||
/* Returns non-zero if tracking origins. */
|
||||
int __msan_get_track_origins(void);
|
||||
/* Returns non-zero if tracking origins. */
|
||||
int SANITIZER_CDECL __msan_get_track_origins(void);
|
||||
|
||||
/* Returns the origin id of the latest UMR in the calling thread. */
|
||||
uint32_t __msan_get_umr_origin(void);
|
||||
/* Returns the origin id of the latest UMR in the calling thread. */
|
||||
uint32_t SANITIZER_CDECL __msan_get_umr_origin(void);
|
||||
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void __msan_unpoison(const volatile void *a, size_t size);
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void SANITIZER_CDECL __msan_unpoison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make a null-terminated string fully initialized (without changing its
|
||||
contents). */
|
||||
void __msan_unpoison_string(const volatile char *a);
|
||||
/* Make a null-terminated string fully initialized (without changing its
|
||||
contents). */
|
||||
void SANITIZER_CDECL __msan_unpoison_string(const volatile char *a);
|
||||
|
||||
/* Make first n parameters of the next function call fully initialized. */
|
||||
void __msan_unpoison_param(size_t n);
|
||||
/* Make first n parameters of the next function call fully initialized. */
|
||||
void SANITIZER_CDECL __msan_unpoison_param(size_t n);
|
||||
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void __msan_poison(const volatile void *a, size_t size);
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void SANITIZER_CDECL __msan_poison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make memory region partially uninitialized (without changing its contents).
|
||||
*/
|
||||
void __msan_partial_poison(const volatile void *data, void *shadow,
|
||||
size_t size);
|
||||
/* Make memory region partially uninitialized (without changing its contents).
|
||||
*/
|
||||
void SANITIZER_CDECL __msan_partial_poison(const volatile void *data,
|
||||
void *shadow, size_t size);
|
||||
|
||||
/* Returns the offset of the first (at least partially) poisoned byte in the
|
||||
memory range, or -1 if the whole range is good. */
|
||||
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
||||
/* Returns the offset of the first (at least partially) poisoned byte in the
|
||||
memory range, or -1 if the whole range is good. */
|
||||
intptr_t SANITIZER_CDECL __msan_test_shadow(const volatile void *x,
|
||||
size_t size);
|
||||
|
||||
/* Checks that memory range is fully initialized, and reports an error if it
|
||||
* is not. */
|
||||
void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
|
||||
/* Checks that memory range is fully initialized, and reports an error if it
|
||||
* is not. */
|
||||
void SANITIZER_CDECL __msan_check_mem_is_initialized(const volatile void *x,
|
||||
size_t size);
|
||||
|
||||
/* For testing:
|
||||
__msan_set_expect_umr(1);
|
||||
... some buggy code ...
|
||||
__msan_set_expect_umr(0);
|
||||
The last line will verify that a UMR happened. */
|
||||
void __msan_set_expect_umr(int expect_umr);
|
||||
/* For testing:
|
||||
__msan_set_expect_umr(1);
|
||||
... some buggy code ...
|
||||
__msan_set_expect_umr(0);
|
||||
The last line will verify that a UMR happened. */
|
||||
void SANITIZER_CDECL __msan_set_expect_umr(int expect_umr);
|
||||
|
||||
/* Change the value of keep_going flag. Non-zero value means don't terminate
|
||||
program execution when an error is detected. This will not affect error in
|
||||
modules that were compiled without the corresponding compiler flag. */
|
||||
void __msan_set_keep_going(int keep_going);
|
||||
/* Change the value of keep_going flag. Non-zero value means don't terminate
|
||||
program execution when an error is detected. This will not affect error in
|
||||
modules that were compiled without the corresponding compiler flag. */
|
||||
void SANITIZER_CDECL __msan_set_keep_going(int keep_going);
|
||||
|
||||
/* Print shadow and origin for the memory range to stderr in a human-readable
|
||||
format. */
|
||||
void __msan_print_shadow(const volatile void *x, size_t size);
|
||||
/* Print shadow and origin for the memory range to stderr in a human-readable
|
||||
format. */
|
||||
void SANITIZER_CDECL __msan_print_shadow(const volatile void *x, size_t size);
|
||||
|
||||
/* Print shadow for the memory range to stderr in a minimalistic
|
||||
human-readable format. */
|
||||
void __msan_dump_shadow(const volatile void *x, size_t size);
|
||||
/* Print shadow for the memory range to stderr in a minimalistic
|
||||
human-readable format. */
|
||||
void SANITIZER_CDECL __msan_dump_shadow(const volatile void *x, size_t size);
|
||||
|
||||
/* Returns true if running under a dynamic tool (DynamoRio-based). */
|
||||
int __msan_has_dynamic_component(void);
|
||||
/* Returns true if running under a dynamic tool (DynamoRio-based). */
|
||||
int SANITIZER_CDECL __msan_has_dynamic_component(void);
|
||||
|
||||
/* Tell MSan about newly allocated memory (ex.: custom allocator).
|
||||
Memory will be marked uninitialized, with origin at the call site. */
|
||||
void __msan_allocated_memory(const volatile void* data, size_t size);
|
||||
/* Tell MSan about newly allocated memory (ex.: custom allocator).
|
||||
Memory will be marked uninitialized, with origin at the call site. */
|
||||
void SANITIZER_CDECL __msan_allocated_memory(const volatile void *data,
|
||||
size_t size);
|
||||
|
||||
/* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
|
||||
void __sanitizer_dtor_callback(const volatile void* data, size_t size);
|
||||
void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
|
||||
void __sanitizer_dtor_callback_vptr(const volatile void *data);
|
||||
/* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
|
||||
void SANITIZER_CDECL __sanitizer_dtor_callback(const volatile void *data,
|
||||
size_t size);
|
||||
void SANITIZER_CDECL __sanitizer_dtor_callback_fields(const volatile void *data,
|
||||
size_t size);
|
||||
void SANITIZER_CDECL __sanitizer_dtor_callback_vptr(const volatile void *data);
|
||||
|
||||
/* This function may be optionally provided by user and should return
|
||||
a string containing Msan runtime options. See msan_flags.h for details. */
|
||||
const char* __msan_default_options(void);
|
||||
/* This function may be optionally provided by user and should return
|
||||
a string containing Msan runtime options. See msan_flags.h for details. */
|
||||
const char *SANITIZER_CDECL __msan_default_options(void);
|
||||
|
||||
/* Deprecated. Call __sanitizer_set_death_callback instead. */
|
||||
void __msan_set_death_callback(void (*callback)(void));
|
||||
/* Deprecated. Call __sanitizer_set_death_callback instead. */
|
||||
void SANITIZER_CDECL
|
||||
__msan_set_death_callback(void(SANITIZER_CDECL *callback)(void));
|
||||
|
||||
/* Update shadow for the application copy of size bytes from src to dst.
|
||||
Src and dst are application addresses. This function does not copy the
|
||||
actual application memory, it only updates shadow and origin for such
|
||||
copy. Source and destination regions can overlap. */
|
||||
void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
|
||||
size_t size);
|
||||
/* Update shadow for the application copy of size bytes from src to dst.
|
||||
Src and dst are application addresses. This function does not copy the
|
||||
actual application memory, it only updates shadow and origin for such
|
||||
copy. Source and destination regions can overlap. */
|
||||
void SANITIZER_CDECL __msan_copy_shadow(const volatile void *dst,
|
||||
const volatile void *src, size_t size);
|
||||
|
||||
/* Disables uninitialized memory checks in interceptors. */
|
||||
void __msan_scoped_disable_interceptor_checks(void);
|
||||
/* Disables uninitialized memory checks in interceptors. */
|
||||
void SANITIZER_CDECL __msan_scoped_disable_interceptor_checks(void);
|
||||
|
||||
/* Re-enables uninitialized memory checks in interceptors after a previous
|
||||
call to __msan_scoped_disable_interceptor_checks. */
|
||||
void __msan_scoped_enable_interceptor_checks(void);
|
||||
/* Re-enables uninitialized memory checks in interceptors after a previous
|
||||
call to __msan_scoped_disable_interceptor_checks. */
|
||||
void SANITIZER_CDECL __msan_scoped_enable_interceptor_checks(void);
|
||||
|
||||
void __msan_start_switch_fiber(const void *bottom, size_t size);
|
||||
void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
|
||||
void SANITIZER_CDECL __msan_start_switch_fiber(const void *bottom, size_t size);
|
||||
void SANITIZER_CDECL __msan_finish_switch_fiber(const void **bottom_old,
|
||||
size_t *size_old);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,22 +17,22 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// This function may be optionally provided by a user and should return
|
||||
// a string containing Scudo runtime options. See scudo_flags.h for details.
|
||||
const char* __scudo_default_options(void);
|
||||
// This function may be optionally provided by a user and should return
|
||||
// a string containing Scudo runtime options. See scudo_flags.h for details.
|
||||
const char *SANITIZER_CDECL __scudo_default_options(void);
|
||||
|
||||
// This function allows to set the RSS limit at runtime. This can be either
|
||||
// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
|
||||
// can be removed by setting LimitMb to 0. This function's parameters should
|
||||
// be fully trusted to avoid security mishaps.
|
||||
void __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
|
||||
// This function allows to set the RSS limit at runtime. This can be either
|
||||
// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
|
||||
// can be removed by setting LimitMb to 0. This function's parameters should
|
||||
// be fully trusted to avoid security mishaps.
|
||||
void SANITIZER_CDECL __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
|
||||
|
||||
// This function outputs various allocator statistics for both the Primary
|
||||
// and Secondary allocators, including memory usage, number of allocations
|
||||
// and deallocations.
|
||||
void __scudo_print_stats(void);
|
||||
// This function outputs various allocator statistics for both the Primary
|
||||
// and Secondary allocators, including memory usage, number of allocations
|
||||
// and deallocations.
|
||||
void SANITIZER_CDECL __scudo_print_stats(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_SCUDO_INTERFACE_H_
|
||||
#endif // SANITIZER_SCUDO_INTERFACE_H_
|
||||
|
|
|
@ -21,8 +21,8 @@ extern "C" {
|
|||
|
||||
// __tsan_release establishes a happens-before relation with a preceding
|
||||
// __tsan_acquire on the same address.
|
||||
void __tsan_acquire(void *addr);
|
||||
void __tsan_release(void *addr);
|
||||
void SANITIZER_CDECL __tsan_acquire(void *addr);
|
||||
void SANITIZER_CDECL __tsan_release(void *addr);
|
||||
|
||||
// Annotations for custom mutexes.
|
||||
// The annotations allow to get better reports (with sets of locked mutexes),
|
||||
|
@ -52,16 +52,16 @@ static const unsigned __tsan_mutex_not_static = 1 << 8;
|
|||
// Mutex operation flags:
|
||||
|
||||
// Denotes read lock operation.
|
||||
static const unsigned __tsan_mutex_read_lock = 1 << 3;
|
||||
static const unsigned __tsan_mutex_read_lock = 1 << 3;
|
||||
// Denotes try lock operation.
|
||||
static const unsigned __tsan_mutex_try_lock = 1 << 4;
|
||||
static const unsigned __tsan_mutex_try_lock = 1 << 4;
|
||||
// Denotes that a try lock operation has failed to acquire the mutex.
|
||||
static const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
|
||||
static const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
|
||||
// Denotes that the lock operation acquires multiple recursion levels.
|
||||
// Number of levels is passed in recursion parameter.
|
||||
// This is useful for annotation of e.g. Java builtin monitors,
|
||||
// for which wait operation releases all recursive acquisitions of the mutex.
|
||||
static const unsigned __tsan_mutex_recursive_lock = 1 << 6;
|
||||
static const unsigned __tsan_mutex_recursive_lock = 1 << 6;
|
||||
// Denotes that the unlock operation releases all recursion levels.
|
||||
// Number of released levels is returned and later must be passed to
|
||||
// the corresponding __tsan_mutex_post_lock annotation.
|
||||
|
@ -75,20 +75,20 @@ static const unsigned __tsan_mutex_try_read_lock_failed =
|
|||
|
||||
// Annotate creation of a mutex.
|
||||
// Supported flags: mutex creation flags.
|
||||
void __tsan_mutex_create(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_create(void *addr, unsigned flags);
|
||||
|
||||
// Annotate destruction of a mutex.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_linker_init
|
||||
// - __tsan_mutex_not_static
|
||||
void __tsan_mutex_destroy(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_destroy(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start of lock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_try_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_pre_lock(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_pre_lock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of lock operation.
|
||||
// Supported flags:
|
||||
|
@ -97,23 +97,24 @@ void __tsan_mutex_pre_lock(void *addr, unsigned flags);
|
|||
// - __tsan_mutex_try_lock_failed
|
||||
// - __tsan_mutex_recursive_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
|
||||
void SANITIZER_CDECL __tsan_mutex_post_lock(void *addr, unsigned flags,
|
||||
int recursion);
|
||||
|
||||
// Annotate start of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_recursive_unlock
|
||||
int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
|
||||
int SANITIZER_CDECL __tsan_mutex_pre_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
|
||||
void __tsan_mutex_post_unlock(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_post_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of notify/signal/broadcast operation.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_signal(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_signal(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_pre_signal(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_post_signal(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of a region of code where lock/unlock/signal operation
|
||||
// diverts to do something else unrelated to the mutex. This can be used to
|
||||
|
@ -123,8 +124,12 @@ void __tsan_mutex_post_signal(void *addr, unsigned flags);
|
|||
// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
|
||||
// __tsan_mutex_pre/post_signal regions.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_divert(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_pre_divert(void *addr, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_mutex_post_divert(void *addr, unsigned flags);
|
||||
|
||||
// Check that the current thread does not hold any mutexes,
|
||||
// report a bug report otherwise.
|
||||
void SANITIZER_CDECL __tsan_check_no_mutexes_held();
|
||||
|
||||
// External race detection API.
|
||||
// Can be used by non-instrumented libraries to detect when their objects are
|
||||
|
@ -136,11 +141,14 @@ void __tsan_mutex_post_divert(void *addr, unsigned flags);
|
|||
// - __tsan_external_register_tag registers a 'tag' with the specified name,
|
||||
// which is later used in read/write annotations to denote the object type
|
||||
// - __tsan_external_assign_tag can optionally mark a heap object with a tag
|
||||
void *__tsan_external_register_tag(const char *object_type);
|
||||
void __tsan_external_register_header(void *tag, const char *header);
|
||||
void __tsan_external_assign_tag(void *addr, void *tag);
|
||||
void __tsan_external_read(void *addr, void *caller_pc, void *tag);
|
||||
void __tsan_external_write(void *addr, void *caller_pc, void *tag);
|
||||
void *SANITIZER_CDECL __tsan_external_register_tag(const char *object_type);
|
||||
void SANITIZER_CDECL __tsan_external_register_header(void *tag,
|
||||
const char *header);
|
||||
void SANITIZER_CDECL __tsan_external_assign_tag(void *addr, void *tag);
|
||||
void SANITIZER_CDECL __tsan_external_read(void *addr, void *caller_pc,
|
||||
void *tag);
|
||||
void SANITIZER_CDECL __tsan_external_write(void *addr, void *caller_pc,
|
||||
void *tag);
|
||||
|
||||
// Fiber switching API.
|
||||
// - TSAN context for fiber can be created by __tsan_create_fiber
|
||||
|
@ -150,36 +158,159 @@ void __tsan_external_write(void *addr, void *caller_pc, void *tag);
|
|||
// - __tsan_switch_to_fiber should be called immediately before switch
|
||||
// to fiber, such as call of swapcontext.
|
||||
// - Fiber name can be set by __tsan_set_fiber_name.
|
||||
void *__tsan_get_current_fiber(void);
|
||||
void *__tsan_create_fiber(unsigned flags);
|
||||
void __tsan_destroy_fiber(void *fiber);
|
||||
void __tsan_switch_to_fiber(void *fiber, unsigned flags);
|
||||
void __tsan_set_fiber_name(void *fiber, const char *name);
|
||||
void *SANITIZER_CDECL __tsan_get_current_fiber(void);
|
||||
void *SANITIZER_CDECL __tsan_create_fiber(unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_destroy_fiber(void *fiber);
|
||||
void SANITIZER_CDECL __tsan_switch_to_fiber(void *fiber, unsigned flags);
|
||||
void SANITIZER_CDECL __tsan_set_fiber_name(void *fiber, const char *name);
|
||||
|
||||
// Flags for __tsan_switch_to_fiber:
|
||||
// Do not establish a happens-before relation between fibers
|
||||
static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
|
||||
|
||||
// User-provided callback invoked on TSan initialization.
|
||||
void __tsan_on_initialize();
|
||||
void SANITIZER_CDECL __tsan_on_initialize();
|
||||
|
||||
// User-provided callback invoked on TSan shutdown.
|
||||
// `failed` - Nonzero if TSan did detect issues, zero otherwise.
|
||||
// Return `0` if TSan should exit as if no issues were detected. Return nonzero
|
||||
// if TSan should exit as if issues were detected.
|
||||
int __tsan_on_finalize(int failed);
|
||||
int SANITIZER_CDECL __tsan_on_finalize(int failed);
|
||||
|
||||
// Release TSan internal memory in a best-effort manner.
|
||||
void __tsan_flush_memory();
|
||||
void SANITIZER_CDECL __tsan_flush_memory();
|
||||
|
||||
// User-provided default TSAN options.
|
||||
const char* __tsan_default_options(void);
|
||||
const char *SANITIZER_CDECL __tsan_default_options(void);
|
||||
|
||||
// User-provided default TSAN suppressions.
|
||||
const char* __tsan_default_suppressions(void);
|
||||
const char *SANITIZER_CDECL __tsan_default_suppressions(void);
|
||||
|
||||
/// Returns a report's description.
|
||||
///
|
||||
/// Returns a report's description (issue type), number of duplicate issues
|
||||
/// found, counts of array data (stack traces, memory operations, locations,
|
||||
/// mutexes, threads, unique thread IDs) and a stack trace of a <c>sleep()</c>
|
||||
/// call (if one was involved in the issue).
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param[out] description Report type description.
|
||||
/// \param[out] count Count of duplicate issues.
|
||||
/// \param[out] stack_count Count of stack traces.
|
||||
/// \param[out] mop_count Count of memory operations.
|
||||
/// \param[out] loc_count Count of locations.
|
||||
/// \param[out] mutex_count Count of mutexes.
|
||||
/// \param[out] thread_count Count of threads.
|
||||
/// \param[out] unique_tid_count Count of unique thread IDs.
|
||||
/// \param sleep_trace A buffer to store the stack trace of a <c>sleep()</c>
|
||||
/// call.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_data(
|
||||
void *report, const char **description, int *count, int *stack_count,
|
||||
int *mop_count, int *loc_count, int *mutex_count, int *thread_count,
|
||||
int *unique_tid_count, void **sleep_trace, unsigned long trace_size);
|
||||
|
||||
/// Returns information about stack traces included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's stacks.
|
||||
/// \param trace A buffer to store the stack trace.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_stack(void *report, unsigned long idx,
|
||||
void **trace,
|
||||
unsigned long trace_size);
|
||||
|
||||
/// Returns information about memory operations included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's memory operations.
|
||||
/// \param[out] tid Thread ID of the memory operation.
|
||||
/// \param[out] addr Address of the memory operation.
|
||||
/// \param[out] size Size of the memory operation.
|
||||
/// \param[out] write Write flag of the memory operation.
|
||||
/// \param[out] atomic Atomicity flag of the memory operation.
|
||||
/// \param trace A buffer to store the stack trace.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_mop(void *report, unsigned long idx,
|
||||
int *tid, void **addr, int *size,
|
||||
int *write, int *atomic, void **trace,
|
||||
unsigned long trace_size);
|
||||
|
||||
/// Returns information about locations included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's locations.
|
||||
/// \param[out] type Type of the location.
|
||||
/// \param[out] addr Address of the location.
|
||||
/// \param[out] start Start of the location.
|
||||
/// \param[out] size Size of the location.
|
||||
/// \param[out] tid Thread ID of the location.
|
||||
/// \param[out] fd File descriptor of the location.
|
||||
/// \param[out] suppressable Suppressable flag.
|
||||
/// \param trace A buffer to store the stack trace.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_loc(void *report, unsigned long idx,
|
||||
const char **type, void **addr,
|
||||
void **start, unsigned long *size,
|
||||
int *tid, int *fd, int *suppressable,
|
||||
void **trace,
|
||||
unsigned long trace_size);
|
||||
|
||||
/// Returns information about mutexes included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's mutexes.
|
||||
/// \param[out] mutex_id Id of the mutex.
|
||||
/// \param[out] addr Address of the mutex.
|
||||
/// \param[out] destroyed Destroyed mutex flag.
|
||||
/// \param trace A buffer to store the stack trace.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_mutex(void *report, unsigned long idx,
|
||||
uint64_t *mutex_id, void **addr,
|
||||
int *destroyed, void **trace,
|
||||
unsigned long trace_size);
|
||||
|
||||
/// Returns information about threads included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's threads.
|
||||
/// \param[out] tid Thread ID of the thread.
|
||||
/// \param[out] os_id Operating system's ID of the thread.
|
||||
/// \param[out] running Running flag of the thread.
|
||||
/// \param[out] name Name of the thread.
|
||||
/// \param[out] parent_tid ID of the parent thread.
|
||||
/// \param trace A buffer to store the stack trace.
|
||||
/// \param trace_size Size in bytes of the trace buffer.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_thread(void *report, unsigned long idx,
|
||||
int *tid, uint64_t *os_id,
|
||||
int *running, const char **name,
|
||||
int *parent_tid, void **trace,
|
||||
unsigned long trace_size);
|
||||
|
||||
/// Returns information about unique thread IDs included in the report.
|
||||
///
|
||||
/// \param report Opaque pointer to the current report.
|
||||
/// \param idx Index to the report's unique thread IDs.
|
||||
/// \param[out] tid Unique thread ID of the report.
|
||||
/// \returns Returns 1 if successful, 0 if not.
|
||||
int SANITIZER_CDECL __tsan_get_report_unique_tid(void *report,
|
||||
unsigned long idx, int *tid);
|
||||
|
||||
/// Returns the current report.
|
||||
///
|
||||
/// If TSan is currently reporting a detected issue on the current thread,
|
||||
/// returns an opaque pointer to the current report. Otherwise returns NULL.
|
||||
/// \returns An opaque pointer to the current report. Otherwise returns NULL.
|
||||
void *SANITIZER_CDECL __tsan_get_current_report();
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_TSAN_INTERFACE_H
|
||||
#endif // SANITIZER_TSAN_INTERFACE_H
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#ifndef TSAN_INTERFACE_ATOMIC_H
|
||||
#define TSAN_INTERFACE_ATOMIC_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -21,12 +23,12 @@ typedef char __tsan_atomic8;
|
|||
typedef short __tsan_atomic16;
|
||||
typedef int __tsan_atomic32;
|
||||
typedef long __tsan_atomic64;
|
||||
#if defined(__SIZEOF_INT128__) \
|
||||
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
|
||||
#if defined(__SIZEOF_INT128__) || \
|
||||
(__clang_major__ * 100 + __clang_minor__ >= 302)
|
||||
__extension__ typedef __int128 __tsan_atomic128;
|
||||
# define __TSAN_HAS_INT128 1
|
||||
#define __TSAN_HAS_INT128 1
|
||||
#else
|
||||
# define __TSAN_HAS_INT128 0
|
||||
#define __TSAN_HAS_INT128 0
|
||||
#endif
|
||||
|
||||
// Part of ABI, do not change.
|
||||
|
@ -40,182 +42,187 @@ typedef enum {
|
|||
__tsan_memory_order_seq_cst
|
||||
} __tsan_memory_order;
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL
|
||||
__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL
|
||||
__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL
|
||||
__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL
|
||||
__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
|
||||
const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo);
|
||||
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo);
|
||||
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo);
|
||||
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v,
|
||||
__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v,
|
||||
__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v,
|
||||
__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v,
|
||||
__tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v,
|
||||
__tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
|
||||
#endif
|
||||
|
||||
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#endif
|
||||
|
||||
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#endif
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
|
||||
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
|
||||
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
|
||||
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
|
||||
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#if __TSAN_HAS_INT128
|
||||
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
|
||||
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
#endif
|
||||
|
||||
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
|
||||
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
|
||||
void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TSAN_INTERFACE_ATOMIC_H
|
||||
#endif // TSAN_INTERFACE_ATOMIC_H
|
||||
|
|
|
@ -23,10 +23,10 @@ extern "C" {
|
|||
/// <c>verbosity=1:halt_on_error=0</c>).
|
||||
///
|
||||
/// \returns Default options string.
|
||||
const char* __ubsan_default_options(void);
|
||||
const char *SANITIZER_CDECL __ubsan_default_options(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_UBSAN_INTERFACE_H
|
||||
#endif // SANITIZER_UBSAN_INTERFACE_H
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
#ifndef INTERCEPTION_H
|
||||
#define INTERCEPTION_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
|
||||
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
|
||||
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
|
||||
!SANITIZER_SOLARIS
|
||||
# error "Interception doesn't work on this operating system."
|
||||
|
@ -67,24 +68,50 @@ typedef __sanitizer::OFF64_T OFF64_T;
|
|||
// for more details). To intercept such functions you need to use the
|
||||
// INTERCEPTOR_WITH_SUFFIX(...) macro.
|
||||
|
||||
// How it works:
|
||||
// To replace system functions on Linux we just need to declare functions
|
||||
// with same names in our library and then obtain the real function pointers
|
||||
// using dlsym().
|
||||
// There is one complication. A user may also intercept some of the functions
|
||||
// we intercept. To resolve this we declare our interceptors with __interceptor_
|
||||
// prefix, and then make actual interceptors weak aliases to __interceptor_
|
||||
// functions.
|
||||
// How it works on Linux
|
||||
// ---------------------
|
||||
//
|
||||
// To replace system functions on Linux we just need to declare functions with
|
||||
// the same names in our library and then obtain the real function pointers
|
||||
// using dlsym().
|
||||
//
|
||||
// There is one complication: a user may also intercept some of the functions we
|
||||
// intercept. To allow for up to 3 interceptors (including ours) of a given
|
||||
// function "func", the interceptor implementation is in ___interceptor_func,
|
||||
// which is aliased by a weak function __interceptor_func, which in turn is
|
||||
// aliased (via a trampoline) by weak wrapper function "func".
|
||||
//
|
||||
// Most user interceptors should define a foreign interceptor as follows:
|
||||
//
|
||||
// - provide a non-weak function "func" that performs interception;
|
||||
// - if __interceptor_func exists, call it to perform the real functionality;
|
||||
// - if it does not exist, figure out the real function and call it instead.
|
||||
//
|
||||
// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
|
||||
// may be defined as follows (on supported architectures):
|
||||
//
|
||||
// - provide a non-weak function __interceptor_func that performs interception;
|
||||
// - if ___interceptor_func exists, call it to perform the real functionality;
|
||||
// - if it does not exist, figure out the real function and call it instead;
|
||||
// - provide a weak function "func" that is an alias to __interceptor_func.
|
||||
//
|
||||
// With this protocol, sanitizer interceptors, foreign user interceptors, and
|
||||
// foreign interceptors of other dynamic analysis runtimes, or any combination
|
||||
// thereof, may co-exist simultaneously.
|
||||
//
|
||||
// How it works on Mac OS
|
||||
// ----------------------
|
||||
//
|
||||
// This is not so on Mac OS, where the two-level namespace makes our replacement
|
||||
// functions invisible to other libraries. This may be overcomed using the
|
||||
// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
|
||||
// Chromium were noticed when doing so.
|
||||
//
|
||||
// This is not so on Mac OS, where the two-level namespace makes
|
||||
// our replacement functions invisible to other libraries. This may be overcomed
|
||||
// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
|
||||
// libraries in Chromium were noticed when doing so.
|
||||
// Instead we create a dylib containing a __DATA,__interpose section that
|
||||
// associates library functions with their wrappers. When this dylib is
|
||||
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
|
||||
// the calls to interposed functions done through stubs to the wrapper
|
||||
// functions.
|
||||
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
|
||||
// calls to interposed functions done through stubs to the wrapper functions.
|
||||
//
|
||||
// As it's decided at compile time which functions are to be intercepted on Mac,
|
||||
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
|
||||
|
||||
|
@ -100,53 +127,102 @@ struct interpose_substitution {
|
|||
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
|
||||
// the __DATA,__interpose section.
|
||||
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
|
||||
#define INTERPOSER(func_name) __attribute__((used)) \
|
||||
#define INTERPOSER(func_name) __attribute__((used)) \
|
||||
const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((section("__DATA, __interpose"))) = { \
|
||||
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
}
|
||||
|
||||
// For a function foo() and a wrapper function bar() create a global pair
|
||||
// of pointers { bar, foo } in the __DATA,__interpose section.
|
||||
// As a result all the calls to foo() will be routed to bar() at runtime.
|
||||
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
|
||||
const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((section("__DATA, __interpose"))) = { \
|
||||
{ reinterpret_cast<const uptr>(wrapper_name), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((section("__DATA, __interpose"))) = { \
|
||||
{ reinterpret_cast<const uptr>(wrapper_name), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
}
|
||||
|
||||
# define WRAP(x) wrap_##x
|
||||
# define WRAPPER_NAME(x) "wrap_"#x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# define INTERCEPTOR_ATTRIBUTE
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...)
|
||||
|
||||
#elif SANITIZER_WINDOWS
|
||||
# define WRAP(x) __asan_wrap_##x
|
||||
# define WRAPPER_NAME(x) "__asan_wrap_"#x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
||||
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
||||
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
|
||||
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define WRAPPER_NAME(x) "__interceptor_" #x
|
||||
#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
// Weak aliases of weak aliases do not work, therefore we need to set up a
|
||||
// trampoline function. The function "func" is a weak alias to the trampoline
|
||||
// (so that we may check if "func" was overridden), which calls the weak
|
||||
// function __interceptor_func, which in turn aliases the actual interceptor
|
||||
// implementation ___interceptor_func:
|
||||
//
|
||||
// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
|
||||
// |
|
||||
// +--------(tail call)-------+
|
||||
// |
|
||||
// v
|
||||
// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
|
||||
//
|
||||
// We use inline assembly to define most of this, because not all compilers
|
||||
// support functions with the "naked" attribute with every architecture.
|
||||
# define WRAP(x) ___interceptor_ ## x
|
||||
# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
|
||||
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
|
||||
// priority than weak ones so weak aliases won't work for indirect calls
|
||||
// in position-independent (-fPIC / -fPIE) mode.
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) \
|
||||
__attribute__((alias("__interceptor_" #func), visibility("default")));
|
||||
#elif !SANITIZER_FUCHSIA
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define WRAPPER_NAME(x) "__interceptor_" #x
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) \
|
||||
__attribute__((weak, alias("__interceptor_" #func), visibility("default")));
|
||||
# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
|
||||
# else
|
||||
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
|
||||
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__); \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
|
||||
INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
|
||||
asm( \
|
||||
".text\n" \
|
||||
__ASM_WEAK_WRAPPER(func) \
|
||||
".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
|
||||
SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
|
||||
SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
|
||||
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
|
||||
SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func)) "\n" \
|
||||
SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
|
||||
".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
|
||||
".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
);
|
||||
# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
// Some architectures cannot implement efficient interceptor trampolines with
|
||||
// just a plain jump due to complexities of resolving a preemptible symbol. In
|
||||
// those cases, revert to just this scheme:
|
||||
//
|
||||
// [wrapper "func": weak] --(alias)--> [WRAP(func)]
|
||||
//
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define __ATTRIBUTE_WEAK_WRAPPER
|
||||
# else
|
||||
# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
|
||||
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) \
|
||||
INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
|
||||
# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
@ -162,10 +238,10 @@ const interpose_substitution substitution_##func_name[] \
|
|||
# define REAL(x) __interception::PTR_TO_REAL(x)
|
||||
# define FUNC_TYPE(x) x##_type
|
||||
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
namespace __interception { \
|
||||
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
}
|
||||
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
|
||||
#else // SANITIZER_APPLE
|
||||
|
@ -176,14 +252,16 @@ const interpose_substitution substitution_##func_name[] \
|
|||
#endif // SANITIZER_APPLE
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__);
|
||||
// Declare an interceptor and its wrapper defined in a different translation
|
||||
// unit (ex. asm).
|
||||
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
#else
|
||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
|
||||
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
|
||||
|
@ -215,12 +293,10 @@ const interpose_substitution substitution_##func_name[] \
|
|||
|
||||
#elif !SANITIZER_APPLE
|
||||
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" \
|
||||
INTERCEPTOR_ATTRIBUTE \
|
||||
ret_type WRAP(func)(__VA_ARGS__)
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
|
||||
|
||||
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
|
||||
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
|
||||
|
@ -228,10 +304,10 @@ const interpose_substitution substitution_##func_name[] \
|
|||
|
||||
#else // SANITIZER_APPLE
|
||||
|
||||
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) suffix; \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
INTERPOSER(func); \
|
||||
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) suffix; \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
INTERPOSER(func); \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
|
||||
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
|
@ -246,14 +322,12 @@ const interpose_substitution substitution_##func_name[] \
|
|||
#endif
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
|
||||
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
|
||||
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
} \
|
||||
extern "C" \
|
||||
INTERCEPTOR_ATTRIBUTE \
|
||||
ret_type __stdcall WRAP(func)(__VA_ARGS__)
|
||||
namespace __interception { \
|
||||
FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
} \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
|
||||
|
|
|
@ -33,7 +33,7 @@ static int StrCmp(const char *s1, const char *s2) {
|
|||
}
|
||||
#endif
|
||||
|
||||
static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
|
||||
static void *GetFuncAddr(const char *name, uptr trampoline) {
|
||||
#if SANITIZER_NETBSD
|
||||
// FIXME: Find a better way to handle renames
|
||||
if (StrCmp(name, "sigaction"))
|
||||
|
@ -50,17 +50,17 @@ static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
|
|||
|
||||
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
|
||||
// We don't want to intercept the wrapper and have it point to itself.
|
||||
if ((uptr)addr == wrapper_addr)
|
||||
if ((uptr)addr == trampoline)
|
||||
addr = nullptr;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
|
||||
uptr wrapper) {
|
||||
void *addr = GetFuncAddr(name, wrapper);
|
||||
uptr trampoline) {
|
||||
void *addr = GetFuncAddr(name, trampoline);
|
||||
*ptr_to_real = (uptr)addr;
|
||||
return addr && (func == wrapper);
|
||||
return addr && (func == trampoline);
|
||||
}
|
||||
|
||||
// dlvsym is a GNU extension supported by some other platforms.
|
||||
|
@ -70,12 +70,12 @@ static void *GetFuncAddr(const char *name, const char *ver) {
|
|||
}
|
||||
|
||||
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
|
||||
uptr func, uptr wrapper) {
|
||||
uptr func, uptr trampoline) {
|
||||
void *addr = GetFuncAddr(name, ver);
|
||||
*ptr_to_real = (uptr)addr;
|
||||
return addr && (func == wrapper);
|
||||
return addr && (func == trampoline);
|
||||
}
|
||||
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
SANITIZER_SOLARIS
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_linux.h should be included from interception library only"
|
||||
# error interception_linux.h should be included from interception library only
|
||||
#endif
|
||||
|
||||
#ifndef INTERCEPTION_LINUX_H
|
||||
|
@ -23,26 +23,26 @@
|
|||
|
||||
namespace __interception {
|
||||
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
|
||||
uptr wrapper);
|
||||
uptr trampoline);
|
||||
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
|
||||
uptr func, uptr wrapper);
|
||||
uptr func, uptr trampoline);
|
||||
} // namespace __interception
|
||||
|
||||
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
|
||||
::__interception::InterceptFunction( \
|
||||
#func, \
|
||||
(::__interception::uptr *) & REAL(func), \
|
||||
(::__interception::uptr) & (func), \
|
||||
(::__interception::uptr) & WRAP(func))
|
||||
(::__interception::uptr *)&REAL(func), \
|
||||
(::__interception::uptr)&(func), \
|
||||
(::__interception::uptr)&TRAMPOLINE(func))
|
||||
|
||||
// dlvsym is a GNU extension supported by some other platforms.
|
||||
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
::__interception::InterceptFunction( \
|
||||
#func, symver, \
|
||||
(::__interception::uptr *) & REAL(func), \
|
||||
(::__interception::uptr) & (func), \
|
||||
(::__interception::uptr) & WRAP(func))
|
||||
(::__interception::uptr *)&REAL(func), \
|
||||
(::__interception::uptr)&(func), \
|
||||
(::__interception::uptr)&TRAMPOLINE(func))
|
||||
#else
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
|
||||
//===-- interception_win.cpp ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
|
@ -141,8 +141,29 @@ static const int kBranchLength =
|
|||
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
|
||||
static const int kDirectBranchLength = kBranchLength + kAddressLength;
|
||||
|
||||
# if defined(_MSC_VER)
|
||||
# define INTERCEPTION_FORMAT(f, a)
|
||||
# else
|
||||
# define INTERCEPTION_FORMAT(f, a) __attribute__((format(printf, f, a)))
|
||||
# endif
|
||||
|
||||
static void (*ErrorReportCallback)(const char *format, ...)
|
||||
INTERCEPTION_FORMAT(1, 2);
|
||||
|
||||
void SetErrorReportCallback(void (*callback)(const char *format, ...)) {
|
||||
ErrorReportCallback = callback;
|
||||
}
|
||||
|
||||
# define ReportError(...) \
|
||||
do { \
|
||||
if (ErrorReportCallback) \
|
||||
ErrorReportCallback(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
static void InterceptionFailed() {
|
||||
// Do we have a good way to abort with an error message here?
|
||||
ReportError("interception_win: failed due to an unrecoverable error.\n");
|
||||
// This acts like an abort when no debugger is attached. According to an old
|
||||
// comment, calling abort() leads to an infinite recursion in CheckFailed.
|
||||
__debugbreak();
|
||||
}
|
||||
|
||||
|
@ -249,8 +270,13 @@ static void WritePadding(uptr from, uptr size) {
|
|||
}
|
||||
|
||||
static void WriteJumpInstruction(uptr from, uptr target) {
|
||||
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
|
||||
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target)) {
|
||||
ReportError(
|
||||
"interception_win: cannot write jmp further than 2GB away, from %p to "
|
||||
"%p.\n",
|
||||
(void *)from, (void *)target);
|
||||
InterceptionFailed();
|
||||
}
|
||||
ptrdiff_t offset = target - from - kJumpInstructionLength;
|
||||
*(u8*)from = 0xE9;
|
||||
*(u32*)(from + 1) = offset;
|
||||
|
@ -274,6 +300,10 @@ static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
|
|||
int offset = indirect_target - from - kIndirectJumpInstructionLength;
|
||||
if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
|
||||
indirect_target)) {
|
||||
ReportError(
|
||||
"interception_win: cannot write indirect jmp with target further than "
|
||||
"2GB away, from %p to %p.\n",
|
||||
(void *)from, (void *)indirect_target);
|
||||
InterceptionFailed();
|
||||
}
|
||||
*(u16*)from = 0x25FF;
|
||||
|
@ -427,6 +457,11 @@ static const u8 kPrologueWithShortJump2[] = {
|
|||
|
||||
// Returns 0 on error.
|
||||
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
#if SANITIZER_ARM64
|
||||
// An ARM64 instruction is 4 bytes long.
|
||||
return 4;
|
||||
#endif
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
if (memcmp((u8*)address, kPrologueWithShortJump1,
|
||||
sizeof(kPrologueWithShortJump1)) == 0 ||
|
||||
|
@ -492,6 +527,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
|||
case 0xFF8B: // 8B FF : mov edi, edi
|
||||
case 0xEC8B: // 8B EC : mov ebp, esp
|
||||
case 0xc889: // 89 C8 : mov eax, ecx
|
||||
case 0xE589: // 89 E5 : mov ebp, esp
|
||||
case 0xC18B: // 8B C1 : mov eax, ecx
|
||||
case 0xC033: // 33 C0 : xor eax, eax
|
||||
case 0xC933: // 33 C9 : xor ecx, ecx
|
||||
|
@ -588,7 +624,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
|||
// mov rax, QWORD PTR [rip + XXXXXXXX]
|
||||
case 0x25ff48: // 48 ff 25 XX XX XX XX :
|
||||
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
|
||||
|
||||
case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
|
||||
// Instructions having offset relative to 'rip' need offset adjustment.
|
||||
if (rel_offset)
|
||||
*rel_offset = 3;
|
||||
|
@ -641,6 +677,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
|||
case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
|
||||
case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
|
||||
case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
|
||||
case 0x245C8B: // 8B 5C 24 XX : mov ebx, dword ptr [esp + XX]
|
||||
case 0x246C8B: // 8B 6C 24 XX : mov ebp, dword ptr [esp + XX]
|
||||
case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
|
||||
case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
|
||||
return 4;
|
||||
|
@ -652,12 +690,20 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
|||
}
|
||||
#endif
|
||||
|
||||
// Unknown instruction!
|
||||
// FIXME: Unknown instruction failures might happen when we add a new
|
||||
// interceptor or a new compiler version. In either case, they should result
|
||||
// in visible and readable error messages. However, merely calling abort()
|
||||
// leads to an infinite recursion in CheckFailed.
|
||||
InterceptionFailed();
|
||||
// Unknown instruction! This might happen when we add a new interceptor, use
|
||||
// a new compiler version, or if Windows changed how some functions are
|
||||
// compiled. In either case, we print the address and 8 bytes of instructions
|
||||
// to notify the user about the error and to help identify the unknown
|
||||
// instruction. Don't treat this as a fatal error, though we can break the
|
||||
// debugger if one has been attached.
|
||||
u8 *bytes = (u8 *)address;
|
||||
ReportError(
|
||||
"interception_win: unhandled instruction at %p: %02x %02x %02x %02x %02x "
|
||||
"%02x %02x %02x\n",
|
||||
(void *)address, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
|
||||
bytes[5], bytes[6], bytes[7]);
|
||||
if (::IsDebuggerPresent())
|
||||
__debugbreak();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -678,16 +724,24 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
|
|||
while (cursor != size) {
|
||||
size_t rel_offset = 0;
|
||||
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
|
||||
_memcpy((void*)(to + cursor), (void*)(from + cursor),
|
||||
if (!instruction_size)
|
||||
return false;
|
||||
_memcpy((void *)(to + cursor), (void *)(from + cursor),
|
||||
(size_t)instruction_size);
|
||||
if (rel_offset) {
|
||||
uptr delta = to - from;
|
||||
uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
|
||||
#if SANITIZER_WINDOWS64
|
||||
if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
|
||||
# if SANITIZER_WINDOWS64
|
||||
// we want to make sure that the new relative offset still fits in 32-bits
|
||||
// this will be untrue if relocated_offset \notin [-2**31, 2**31)
|
||||
s64 delta = to - from;
|
||||
s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
|
||||
if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll)
|
||||
return false;
|
||||
#endif
|
||||
*(u32*)(to + cursor + rel_offset) = relocated_offset;
|
||||
# else
|
||||
// on 32-bit, the relative offset will always be correct
|
||||
s32 delta = to - from;
|
||||
s32 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
|
||||
# endif
|
||||
*(s32 *)(to + cursor + rel_offset) = relocated_offset;
|
||||
}
|
||||
cursor += instruction_size;
|
||||
}
|
||||
|
@ -895,6 +949,10 @@ static void **InterestingDLLsAvailable() {
|
|||
"msvcr120.dll", // VS2013
|
||||
"vcruntime140.dll", // VS2015
|
||||
"ucrtbase.dll", // Universal CRT
|
||||
#if (defined(__MINGW32__) && defined(__i386__))
|
||||
"libc++.dll", // libc++
|
||||
"libunwind.dll", // libunwind
|
||||
#endif
|
||||
// NTDLL should go last as it exports some functions that we should
|
||||
// override in the CRT [presumably only used internally].
|
||||
"ntdll.dll", NULL};
|
||||
|
|
|
@ -41,6 +41,11 @@ bool OverrideImportedFunction(const char *module_to_patch,
|
|||
const char *function_name, uptr new_function,
|
||||
uptr *orig_old_func);
|
||||
|
||||
// Sets a callback to be used for reporting errors by interception_win. The
|
||||
// callback will be called with printf-like arguments. Intended to be used with
|
||||
// __sanitizer::Report. Pass nullptr to disable error reporting (default).
|
||||
void SetErrorReportCallback(void (*callback)(const char *format, ...));
|
||||
|
||||
#if !SANITIZER_WINDOWS64
|
||||
// Exposed for unittests
|
||||
bool OverrideFunctionWithDetour(
|
||||
|
|
|
@ -97,7 +97,7 @@ extern "C" void __lsan_init() {
|
|||
ReplaceSystemMalloc();
|
||||
InitTlsSize();
|
||||
InitializeInterceptors();
|
||||
InitializeThreadRegistry();
|
||||
InitializeThreads();
|
||||
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
|
||||
InitializeMainThread();
|
||||
InstallAtExitCheckLeaks();
|
||||
|
|
|
@ -49,8 +49,11 @@ void InitializeAllocator() {
|
|||
max_malloc_size = kMaxAllowedMallocSize;
|
||||
}
|
||||
|
||||
void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }
|
||||
|
||||
void AllocatorThreadFinish() {
|
||||
allocator.SwallowCache(GetAllocatorCache());
|
||||
allocator.DestroyCache(GetAllocatorCache());
|
||||
}
|
||||
|
||||
static ChunkMetadata *Metadata(const void *p) {
|
||||
|
@ -65,12 +68,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
|
|||
m->stack_trace_id = StackDepotPut(stack);
|
||||
m->requested_size = size;
|
||||
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
|
||||
RunMallocHooks(p, size);
|
||||
}
|
||||
|
||||
static void RegisterDeallocation(void *p) {
|
||||
if (!p) return;
|
||||
ChunkMetadata *m = Metadata(p);
|
||||
CHECK(m);
|
||||
RunFreeHooks(p);
|
||||
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
@ -104,7 +109,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
|||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
RegisterAllocation(stack, p, size);
|
||||
RunMallocHooks(p, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -119,7 +123,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
|
|||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
RunFreeHooks(p);
|
||||
RegisterDeallocation(p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
}
|
||||
|
@ -169,6 +172,10 @@ uptr GetMallocUsableSize(const void *p) {
|
|||
return m->requested_size;
|
||||
}
|
||||
|
||||
uptr GetMallocUsableSizeFast(const void *p) {
|
||||
return Metadata(p)->requested_size;
|
||||
}
|
||||
|
||||
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
const StackTrace &stack) {
|
||||
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
|
||||
|
@ -339,15 +346,6 @@ IgnoreObjectResult IgnoreObject(const void *p) {
|
|||
}
|
||||
}
|
||||
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
// This function can be used to treat memory reachable from `tctx` as live.
|
||||
// This is useful for threads that have been created but not yet started.
|
||||
|
||||
// This is currently a no-op because the LSan `pthread_create()` interceptor
|
||||
// blocks until the child thread starts which keeps the thread's `arg` pointer
|
||||
// live.
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
using namespace __lsan;
|
||||
|
@ -368,7 +366,7 @@ uptr __sanitizer_get_heap_size() {
|
|||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_free_bytes() { return 0; }
|
||||
uptr __sanitizer_get_free_bytes() { return 1; }
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_unmapped_bytes() { return 0; }
|
||||
|
@ -377,7 +375,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
|||
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
|
||||
int __sanitizer_get_ownership(const void *p) {
|
||||
return GetMallocBegin(p) != nullptr;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void * __sanitizer_get_allocated_begin(const void *p) {
|
||||
|
@ -389,4 +389,15 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
|||
return GetMallocUsableSize(p);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_allocated_size_fast(const void *p) {
|
||||
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
|
||||
uptr ret = GetMallocUsableSizeFast(p);
|
||||
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
|
||||
return ret;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
|
||||
|
||||
} // extern "C"
|
||||
|
|
|
@ -32,6 +32,7 @@ template<typename Callable>
|
|||
void ForEachChunk(const Callable &callback);
|
||||
|
||||
void GetAllocatorCacheRange(uptr *begin, uptr *end);
|
||||
void AllocatorThreadStart();
|
||||
void AllocatorThreadFinish();
|
||||
void InitializeAllocator();
|
||||
|
||||
|
@ -67,20 +68,42 @@ using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
|
|||
#else
|
||||
# if SANITIZER_FUCHSIA || defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
# if SANITIZER_RISCV64
|
||||
// See the comments in compiler-rt/lib/asan/asan_allocator.h for why these
|
||||
// values were chosen.
|
||||
const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
|
||||
using LSanSizeClassMap = SizeClassMap</*kNumBits=*/2,
|
||||
/*kMinSizeLog=*/5,
|
||||
/*kMidSizeLog=*/8,
|
||||
/*kMaxSizeLog=*/18,
|
||||
/*kNumCachedHintT=*/8,
|
||||
/*kMaxBytesCachedLog=*/10>;
|
||||
static_assert(LSanSizeClassMap::kNumClassesRounded <= 32,
|
||||
"32 size classes is the optimal number to ensure tests run "
|
||||
"effieciently on Fuchsia.");
|
||||
# else
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
using LSanSizeClassMap = DefaultSizeClassMap;
|
||||
# endif
|
||||
# elif SANITIZER_RISCV64
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
|
||||
using LSanSizeClassMap = DefaultSizeClassMap;
|
||||
# elif SANITIZER_APPLE
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
using LSanSizeClassMap = DefaultSizeClassMap;
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x500000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
using LSanSizeClassMap = DefaultSizeClassMap;
|
||||
# endif
|
||||
template <typename AddressSpaceViewTy>
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = kAllocatorSpace;
|
||||
static const uptr kSpaceSize = kAllocatorSize;
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
using SizeClassMap = LSanSizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
using AddressSpaceView = AddressSpaceViewTy;
|
||||
|
|
|
@ -34,15 +34,13 @@
|
|||
# else
|
||||
# define OBJC_DATA_MASK 0x00007ffffffffff8UL
|
||||
# endif
|
||||
// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
|
||||
# define OBJC_FAST_IS_RW 0x8000000000000000UL
|
||||
# endif
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
|
||||
// also to protect the global list of root regions.
|
||||
Mutex global_mutex;
|
||||
static Mutex global_mutex;
|
||||
|
||||
Flags lsan_flags;
|
||||
|
||||
|
@ -173,13 +171,11 @@ static uptr GetCallerPC(const StackTrace &stack) {
|
|||
}
|
||||
|
||||
# if SANITIZER_APPLE
|
||||
// Objective-C class data pointers are stored with flags in the low bits, so
|
||||
// they need to be transformed back into something that looks like a pointer.
|
||||
static inline void *MaybeTransformPointer(void *p) {
|
||||
// Several pointers in the Objective-C runtime (method cache and class_rw_t,
|
||||
// for example) are tagged with additional bits we need to strip.
|
||||
static inline void *TransformPointer(void *p) {
|
||||
uptr ptr = reinterpret_cast<uptr>(p);
|
||||
if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
|
||||
ptr &= OBJC_DATA_MASK;
|
||||
return reinterpret_cast<void *>(ptr);
|
||||
return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
|
||||
}
|
||||
# endif
|
||||
|
||||
|
@ -241,12 +237,6 @@ static LeakSuppressionContext *GetSuppressionContext() {
|
|||
return suppression_ctx;
|
||||
}
|
||||
|
||||
static InternalMmapVectorNoCtor<RootRegion> root_regions;
|
||||
|
||||
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
|
||||
return &root_regions;
|
||||
}
|
||||
|
||||
void InitCommonLsan() {
|
||||
if (common_flags()->detect_leaks) {
|
||||
// Initialization which can fail or print warnings should only be done if
|
||||
|
@ -270,9 +260,14 @@ static inline bool MaybeUserPointer(uptr p) {
|
|||
if (p < kMinAddress)
|
||||
return false;
|
||||
# if defined(__x86_64__)
|
||||
// TODO: add logic similar to ARM when Intel LAM is available.
|
||||
// Accept only canonical form user-space addresses.
|
||||
return ((p >> 47) == 0);
|
||||
// TODO: support LAM48 and 5 level page tables.
|
||||
// LAM_U57 mask format
|
||||
// * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
|
||||
// * top-1 byte: 0xff because it should be 0
|
||||
// * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
|
||||
constexpr uptr kLAM_U57Mask = 0x81ff80;
|
||||
constexpr uptr kPointerMask = kLAM_U57Mask << 40;
|
||||
return ((p & kPointerMask) == 0);
|
||||
# elif defined(__mips64)
|
||||
return ((p >> 40) == 0);
|
||||
# elif defined(__aarch64__)
|
||||
|
@ -307,7 +302,7 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
|
|||
for (; pp + sizeof(void *) <= end; pp += alignment) {
|
||||
void *p = *reinterpret_cast<void **>(pp);
|
||||
# if SANITIZER_APPLE
|
||||
p = MaybeTransformPointer(p);
|
||||
p = TransformPointer(p);
|
||||
# endif
|
||||
if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
|
||||
continue;
|
||||
|
@ -527,38 +522,52 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
|||
|
||||
# endif // SANITIZER_FUCHSIA
|
||||
|
||||
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
||||
uptr region_begin, uptr region_end, bool is_readable) {
|
||||
uptr intersection_begin = Max(root_region.begin, region_begin);
|
||||
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
|
||||
if (intersection_begin >= intersection_end)
|
||||
return;
|
||||
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
||||
(void *)root_region.begin,
|
||||
(void *)(root_region.begin + root_region.size),
|
||||
(void *)region_begin, (void *)region_end,
|
||||
is_readable ? "readable" : "unreadable");
|
||||
if (is_readable)
|
||||
ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
|
||||
kReachable);
|
||||
// A map that contains [region_begin, region_end) pairs.
|
||||
using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
|
||||
|
||||
static RootRegions &GetRootRegionsLocked() {
|
||||
global_mutex.CheckLocked();
|
||||
static RootRegions *regions = nullptr;
|
||||
alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
|
||||
if (!regions)
|
||||
regions = new (placeholder) RootRegions();
|
||||
return *regions;
|
||||
}
|
||||
|
||||
static void ProcessRootRegion(Frontier *frontier,
|
||||
const RootRegion &root_region) {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
ScanRootRegion(frontier, root_region, segment.start, segment.end,
|
||||
segment.IsReadable());
|
||||
bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
|
||||
|
||||
void ScanRootRegions(Frontier *frontier,
|
||||
const InternalMmapVectorNoCtor<Region> &mapped_regions) {
|
||||
if (!flags()->use_root_regions)
|
||||
return;
|
||||
|
||||
InternalMmapVector<Region> regions;
|
||||
GetRootRegionsLocked().forEach([&](const auto &kv) {
|
||||
regions.push_back({kv.first.first, kv.first.second});
|
||||
return true;
|
||||
});
|
||||
|
||||
InternalMmapVector<Region> intersection;
|
||||
Intersect(mapped_regions, regions, intersection);
|
||||
|
||||
for (const Region &r : intersection) {
|
||||
LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
|
||||
(void *)r.begin, (void *)r.end);
|
||||
ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
|
||||
}
|
||||
}
|
||||
|
||||
// Scans root regions for heap pointers.
|
||||
static void ProcessRootRegions(Frontier *frontier) {
|
||||
if (!flags()->use_root_regions)
|
||||
if (!flags()->use_root_regions || !HasRootRegions())
|
||||
return;
|
||||
for (uptr i = 0; i < root_regions.size(); i++)
|
||||
ProcessRootRegion(frontier, root_regions[i]);
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
MemoryMappedSegment segment;
|
||||
InternalMmapVector<Region> mapped_regions;
|
||||
while (proc_maps.Next(&segment))
|
||||
if (segment.IsReadable())
|
||||
mapped_regions.push_back({segment.start, segment.end});
|
||||
ScanRootRegions(frontier, mapped_regions);
|
||||
}
|
||||
|
||||
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
|
||||
|
@ -941,8 +950,8 @@ void LeakReport::PrintSummary() {
|
|||
allocations += leaks_[i].hit_count;
|
||||
}
|
||||
InternalScopedString summary;
|
||||
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
|
||||
allocations);
|
||||
summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
|
||||
allocations);
|
||||
ReportErrorSummary(summary.data());
|
||||
}
|
||||
|
||||
|
@ -1013,36 +1022,37 @@ void __lsan_ignore_object(const void *p) {
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_register_root_region(const void *begin, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
Lock l(&global_mutex);
|
||||
RootRegion region = {reinterpret_cast<uptr>(begin), size};
|
||||
root_regions.push_back(region);
|
||||
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
|
||||
uptr b = reinterpret_cast<uptr>(begin);
|
||||
uptr e = b + size;
|
||||
CHECK_LT(b, e);
|
||||
|
||||
Lock l(&global_mutex);
|
||||
++GetRootRegionsLocked()[{b, e}];
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_unregister_root_region(const void *begin, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
Lock l(&global_mutex);
|
||||
bool removed = false;
|
||||
for (uptr i = 0; i < root_regions.size(); i++) {
|
||||
RootRegion region = root_regions[i];
|
||||
if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
|
||||
removed = true;
|
||||
uptr last_index = root_regions.size() - 1;
|
||||
root_regions[i] = root_regions[last_index];
|
||||
root_regions.pop_back();
|
||||
VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
|
||||
break;
|
||||
uptr b = reinterpret_cast<uptr>(begin);
|
||||
uptr e = b + size;
|
||||
CHECK_LT(b, e);
|
||||
VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
|
||||
|
||||
{
|
||||
Lock l(&global_mutex);
|
||||
if (auto *f = GetRootRegionsLocked().find({b, e})) {
|
||||
if (--(f->second) == 0)
|
||||
GetRootRegionsLocked().erase(f);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!removed) {
|
||||
Report(
|
||||
"__lsan_unregister_root_region(): region at %p of size %zu has not "
|
||||
"been registered.\n",
|
||||
begin, size);
|
||||
Die();
|
||||
}
|
||||
Report(
|
||||
"__lsan_unregister_root_region(): region at %p of size %zu has not "
|
||||
"been registered.\n",
|
||||
begin, size);
|
||||
Die();
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "sanitizer_common/sanitizer_range.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
@ -79,11 +80,6 @@ enum IgnoreObjectResult {
|
|||
kIgnoreObjectInvalid
|
||||
};
|
||||
|
||||
struct Range {
|
||||
uptr begin;
|
||||
uptr end;
|
||||
};
|
||||
|
||||
//// --------------------------------------------------------------------------
|
||||
//// Poisoning prototypes.
|
||||
//// --------------------------------------------------------------------------
|
||||
|
@ -96,8 +92,8 @@ bool WordIsPoisoned(uptr addr);
|
|||
//// --------------------------------------------------------------------------
|
||||
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
// If called from the main thread, updates the main thread's TID in the thread
|
||||
// registry. We need this to handle processes that fork() without a subsequent
|
||||
// exec(), which invalidates the recorded TID. To update it, we must call
|
||||
|
@ -160,13 +156,13 @@ IgnoreObjectResult IgnoreObject(const void *p);
|
|||
|
||||
struct ScopedStopTheWorldLock {
|
||||
ScopedStopTheWorldLock() {
|
||||
LockThreadRegistry();
|
||||
LockThreads();
|
||||
LockAllocator();
|
||||
}
|
||||
|
||||
~ScopedStopTheWorldLock() {
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
UnlockThreads();
|
||||
}
|
||||
|
||||
ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
|
||||
|
@ -239,11 +235,6 @@ void InitializePlatformSpecificModules();
|
|||
void ProcessGlobalRegions(Frontier *frontier);
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier);
|
||||
|
||||
struct RootRegion {
|
||||
uptr begin;
|
||||
uptr size;
|
||||
};
|
||||
|
||||
// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
|
||||
// this Frontier vector before the StopTheWorldCallback actually runs.
|
||||
// This is used when the OS has a unified callback API for suspending
|
||||
|
@ -256,9 +247,11 @@ struct CheckForLeaksParam {
|
|||
bool success = false;
|
||||
};
|
||||
|
||||
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
|
||||
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
||||
uptr region_begin, uptr region_end, bool is_readable);
|
||||
using Region = Range;
|
||||
|
||||
bool HasRootRegions();
|
||||
void ScanRootRegions(Frontier *frontier,
|
||||
const InternalMmapVectorNoCtor<Region> ®ion);
|
||||
// Run stoptheworld while holding any platform-specific locks, as well as the
|
||||
// allocator and thread registry locks.
|
||||
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
||||
|
|
|
@ -119,7 +119,8 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
|||
auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
|
||||
if (i < params->allocator_caches.size() &&
|
||||
params->allocator_caches[i] >= begin &&
|
||||
end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
|
||||
params->allocator_caches[i] <= end &&
|
||||
end - params->allocator_caches[i] >= sizeof(AllocatorCache)) {
|
||||
// Split the range in two and omit the allocator cache within.
|
||||
ScanRangeForPointers(begin, params->allocator_caches[i],
|
||||
¶ms->argument->frontier, "TLS", kReachable);
|
||||
|
|
|
@ -165,7 +165,8 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
|||
vm_address_t address = 0;
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
|
||||
InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
|
||||
InternalMmapVector<Region> mapped_regions;
|
||||
bool use_root_regions = flags()->use_root_regions && HasRootRegions();
|
||||
|
||||
RegionScanState scan_state;
|
||||
while (err == KERN_SUCCESS) {
|
||||
|
@ -203,8 +204,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
|||
|
||||
// Recursing over the full memory map is very slow, break out
|
||||
// early if we don't need the full iteration.
|
||||
if (scan_state.seen_regions == SeenRegion::All &&
|
||||
!(flags()->use_root_regions && root_regions->size() > 0)) {
|
||||
if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -215,15 +215,12 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
|||
//
|
||||
// TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
|
||||
// behavior as sanitizer_procmaps_linux and traverses all memory regions
|
||||
if (flags()->use_root_regions) {
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
|
||||
info.protection & kProtectionRead);
|
||||
}
|
||||
}
|
||||
if (use_root_regions && (info.protection & kProtectionRead))
|
||||
mapped_regions.push_back({address, end_address});
|
||||
|
||||
address = end_address;
|
||||
}
|
||||
ScanRootRegions(frontier, mapped_regions);
|
||||
}
|
||||
|
||||
// On darwin, we can intercept _exit gracefully, and return a failing exit code
|
||||
|
|
|
@ -197,7 +197,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
|
|||
#endif // SANITIZER_INTERCEPT_PVALLOC
|
||||
|
||||
#if SANITIZER_INTERCEPT_CFREE
|
||||
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
|
||||
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAP(free));
|
||||
#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
|
||||
#else
|
||||
#define LSAN_MAYBE_INTERCEPT_CFREE
|
||||
|
@ -415,16 +415,10 @@ INTERCEPTOR(char *, strerror, int errnum) {
|
|||
|
||||
#if SANITIZER_POSIX
|
||||
|
||||
struct ThreadParam {
|
||||
void *(*callback)(void *arg);
|
||||
void *param;
|
||||
atomic_uintptr_t tid;
|
||||
};
|
||||
|
||||
extern "C" void *__lsan_thread_start_func(void *arg) {
|
||||
ThreadParam *p = (ThreadParam*)arg;
|
||||
void* (*callback)(void *arg) = p->callback;
|
||||
void *param = p->param;
|
||||
template <bool Detached>
|
||||
static void *ThreadStartFunc(void *arg) {
|
||||
u32 parent_tid = (uptr)arg;
|
||||
uptr tid = ThreadCreate(parent_tid, Detached);
|
||||
// Wait until the last iteration to maximize the chance that we are the last
|
||||
// destructor to run.
|
||||
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
|
||||
|
@ -433,56 +427,104 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
|
|||
Report("LeakSanitizer: failed to set thread key.\n");
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
int tid = 0;
|
||||
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
|
||||
internal_sched_yield();
|
||||
# endif
|
||||
ThreadStart(tid, GetTid());
|
||||
atomic_store(&p->tid, 0, memory_order_release);
|
||||
return callback(param);
|
||||
auto self = GetThreadSelf();
|
||||
auto args = GetThreadArgRetval().GetArgs(self);
|
||||
void *retval = (*args.routine)(args.arg_retval);
|
||||
GetThreadArgRetval().Finish(self, retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
||||
void *(*callback)(void *), void *param) {
|
||||
ENSURE_LSAN_INITED;
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
|
||||
bool detached = [attr]() {
|
||||
int d = 0;
|
||||
return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
|
||||
}();
|
||||
|
||||
__sanitizer_pthread_attr_t myattr;
|
||||
if (!attr) {
|
||||
pthread_attr_init(&myattr);
|
||||
attr = &myattr;
|
||||
}
|
||||
AdjustStackSize(attr);
|
||||
int detached = 0;
|
||||
pthread_attr_getdetachstate(attr, &detached);
|
||||
ThreadParam p;
|
||||
p.callback = callback;
|
||||
p.param = param;
|
||||
atomic_store(&p.tid, 0, memory_order_relaxed);
|
||||
int res;
|
||||
uptr this_tid = GetCurrentThreadId();
|
||||
int result;
|
||||
{
|
||||
// Ignore all allocations made by pthread_create: thread stack/TLS may be
|
||||
// stored by pthread for future reuse even after thread destruction, and
|
||||
// the linked list it's stored in doesn't even hold valid pointers to the
|
||||
// objects, the latter are calculated by obscure pointer arithmetic.
|
||||
ScopedInterceptorDisabler disabler;
|
||||
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
}
|
||||
if (res == 0) {
|
||||
int tid = ThreadCreate(GetCurrentThreadId(), IsStateDetached(detached));
|
||||
CHECK_NE(tid, kMainTid);
|
||||
atomic_store(&p.tid, tid, memory_order_release);
|
||||
while (atomic_load(&p.tid, memory_order_acquire) != 0)
|
||||
internal_sched_yield();
|
||||
GetThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
|
||||
result = REAL(pthread_create)(
|
||||
th, attr, detached ? ThreadStartFunc<true> : ThreadStartFunc<false>,
|
||||
(void *)this_tid);
|
||||
return result ? 0 : *(uptr *)(th);
|
||||
});
|
||||
}
|
||||
if (attr == &myattr)
|
||||
pthread_attr_destroy(&myattr);
|
||||
return res;
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return REAL(pthread_join)(t, arg);
|
||||
INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
|
||||
int result;
|
||||
GetThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_join)(thread, retval);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_detach, void *thread) {
|
||||
int result;
|
||||
GetThreadArgRetval().Detach((uptr)thread, [&]() {
|
||||
result = REAL(pthread_detach)(thread);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, pthread_exit, void *retval) {
|
||||
GetThreadArgRetval().Finish(GetThreadSelf(), retval);
|
||||
REAL(pthread_exit)(retval);
|
||||
}
|
||||
|
||||
# if SANITIZER_INTERCEPT_TRYJOIN
|
||||
INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
|
||||
int result;
|
||||
GetThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_tryjoin_np)(thread, ret);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
# define LSAN_MAYBE_INTERCEPT_TRYJOIN INTERCEPT_FUNCTION(pthread_tryjoin_np)
|
||||
# else
|
||||
# define LSAN_MAYBE_INTERCEPT_TRYJOIN
|
||||
# endif // SANITIZER_INTERCEPT_TRYJOIN
|
||||
|
||||
# if SANITIZER_INTERCEPT_TIMEDJOIN
|
||||
INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
|
||||
const struct timespec *abstime) {
|
||||
int result;
|
||||
GetThreadArgRetval().Join((uptr)thread, [&]() {
|
||||
result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
|
||||
return !result;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN \
|
||||
INTERCEPT_FUNCTION(pthread_timedjoin_np)
|
||||
# else
|
||||
# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN
|
||||
# endif // SANITIZER_INTERCEPT_TIMEDJOIN
|
||||
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
INTERCEPTOR(void, _exit, int status) {
|
||||
|
@ -491,6 +533,7 @@ INTERCEPTOR(void, _exit, int status) {
|
|||
}
|
||||
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
|
||||
#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_LSAN_INITED
|
||||
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
|
||||
|
||||
#endif // SANITIZER_POSIX
|
||||
|
@ -518,6 +561,10 @@ void InitializeInterceptors() {
|
|||
LSAN_MAYBE_INTERCEPT_MALLOPT;
|
||||
INTERCEPT_FUNCTION(pthread_create);
|
||||
INTERCEPT_FUNCTION(pthread_join);
|
||||
INTERCEPT_FUNCTION(pthread_detach);
|
||||
INTERCEPT_FUNCTION(pthread_exit);
|
||||
LSAN_MAYBE_INTERCEPT_TIMEDJOIN;
|
||||
LSAN_MAYBE_INTERCEPT_TRYJOIN;
|
||||
INTERCEPT_FUNCTION(_exit);
|
||||
|
||||
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
|
||||
|
|
|
@ -80,7 +80,7 @@ extern "C" void lsan_dispatch_call_block_and_release(void *block) {
|
|||
VReport(2,
|
||||
"lsan_dispatch_call_block_and_release(): "
|
||||
"context: %p, pthread_self: %p\n",
|
||||
block, pthread_self());
|
||||
block, (void*)pthread_self());
|
||||
lsan_register_worker_thread(context->parent_tid);
|
||||
// Call the original dispatcher for the block.
|
||||
context->func(context->block);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
namespace __lsan {
|
||||
|
||||
static ThreadRegistry *thread_registry;
|
||||
static ThreadArgRetval *thread_arg_retval;
|
||||
|
||||
static Mutex mu_for_thread_context;
|
||||
static LowLevelAllocator allocator_for_thread_context;
|
||||
|
@ -33,16 +34,26 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
|
|||
return new (allocator_for_thread_context) ThreadContext(tid);
|
||||
}
|
||||
|
||||
void InitializeThreadRegistry() {
|
||||
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||
void InitializeThreads() {
|
||||
static ALIGNED(alignof(
|
||||
ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||
thread_registry =
|
||||
new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
|
||||
|
||||
static ALIGNED(alignof(ThreadArgRetval)) char
|
||||
thread_arg_retval_placeholder[sizeof(ThreadArgRetval)];
|
||||
thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval();
|
||||
}
|
||||
|
||||
ThreadArgRetval &GetThreadArgRetval() { return *thread_arg_retval; }
|
||||
|
||||
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
|
||||
: ThreadContextBase(tid) {}
|
||||
|
||||
void ThreadContextLsanBase::OnStarted(void *arg) { SetCurrentThread(this); }
|
||||
void ThreadContextLsanBase::OnStarted(void *arg) {
|
||||
SetCurrentThread(this);
|
||||
AllocatorThreadStart();
|
||||
}
|
||||
|
||||
void ThreadContextLsanBase::OnFinished() {
|
||||
AllocatorThreadFinish();
|
||||
|
@ -72,9 +83,15 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
|
|||
InternalMmapVector<Range> *ranges) {}
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
|
||||
|
||||
void LockThreadRegistry() { thread_registry->Lock(); }
|
||||
void LockThreads() {
|
||||
thread_registry->Lock();
|
||||
thread_arg_retval->Lock();
|
||||
}
|
||||
|
||||
void UnlockThreadRegistry() { thread_registry->Unlock(); }
|
||||
void UnlockThreads() {
|
||||
thread_arg_retval->Unlock();
|
||||
thread_registry->Unlock();
|
||||
}
|
||||
|
||||
ThreadRegistry *GetLsanThreadRegistryLocked() {
|
||||
thread_registry->CheckLocked();
|
||||
|
@ -92,4 +109,8 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
|
|||
threads);
|
||||
}
|
||||
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
GetThreadArgRetval().GetAllPtrsLocked(ptrs);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#ifndef LSAN_THREAD_H
|
||||
#define LSAN_THREAD_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
@ -43,10 +44,11 @@ class ThreadContextLsanBase : public ThreadContextBase {
|
|||
// This subclass of ThreadContextLsanBase is declared in an OS-specific header.
|
||||
class ThreadContext;
|
||||
|
||||
void InitializeThreadRegistry();
|
||||
void InitializeThreads();
|
||||
void InitializeMainThread();
|
||||
|
||||
ThreadRegistry *GetLsanThreadRegistryLocked();
|
||||
ThreadArgRetval &GetThreadArgRetval();
|
||||
|
||||
u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
|
||||
void ThreadFinish();
|
||||
|
|
|
@ -31,6 +31,7 @@ sanitizer_common_files = \
|
|||
sanitizer_coverage_libcdep_new.cpp \
|
||||
sanitizer_deadlock_detector1.cpp \
|
||||
sanitizer_deadlock_detector2.cpp \
|
||||
sanitizer_dl.cpp \
|
||||
sanitizer_errno.cpp \
|
||||
sanitizer_file.cpp \
|
||||
sanitizer_flags.cpp \
|
||||
|
@ -57,6 +58,7 @@ sanitizer_common_files = \
|
|||
sanitizer_procmaps_linux.cpp \
|
||||
sanitizer_procmaps_mac.cpp \
|
||||
sanitizer_procmaps_solaris.cpp \
|
||||
sanitizer_range.cpp \
|
||||
sanitizer_solaris.cpp \
|
||||
sanitizer_stack_store.cpp \
|
||||
sanitizer_stackdepot.cpp \
|
||||
|
@ -75,6 +77,7 @@ sanitizer_common_files = \
|
|||
sanitizer_symbolizer_posix_libcdep.cpp \
|
||||
sanitizer_symbolizer_win.cpp \
|
||||
sanitizer_termination.cpp \
|
||||
sanitizer_thread_arg_retval.cpp \
|
||||
sanitizer_thread_registry.cpp \
|
||||
sanitizer_tls_get_addr.cpp \
|
||||
sanitizer_unwind_linux_libcdep.cpp \
|
||||
|
|
|
@ -125,9 +125,10 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
|
|||
sanitizer_common.lo sanitizer_common_libcdep.lo \
|
||||
sanitizer_coverage_libcdep_new.lo \
|
||||
sanitizer_deadlock_detector1.lo \
|
||||
sanitizer_deadlock_detector2.lo sanitizer_errno.lo \
|
||||
sanitizer_file.lo sanitizer_flags.lo sanitizer_flag_parser.lo \
|
||||
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
|
||||
sanitizer_deadlock_detector2.lo sanitizer_dl.lo \
|
||||
sanitizer_errno.lo sanitizer_file.lo sanitizer_flags.lo \
|
||||
sanitizer_flag_parser.lo sanitizer_libc.lo \
|
||||
sanitizer_libignore.lo sanitizer_linux.lo \
|
||||
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
|
||||
sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_mutex.lo \
|
||||
sanitizer_netbsd.lo sanitizer_platform_limits_freebsd.lo \
|
||||
|
@ -138,20 +139,21 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
|
|||
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
|
||||
sanitizer_procmaps_bsd.lo sanitizer_procmaps_common.lo \
|
||||
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
|
||||
sanitizer_procmaps_solaris.lo sanitizer_solaris.lo \
|
||||
sanitizer_stack_store.lo sanitizer_stackdepot.lo \
|
||||
sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
|
||||
sanitizer_stacktrace_sparc.lo sanitizer_symbolizer_mac.lo \
|
||||
sanitizer_symbolizer_report.lo sanitizer_stacktrace_printer.lo \
|
||||
sanitizer_procmaps_solaris.lo sanitizer_range.lo \
|
||||
sanitizer_solaris.lo sanitizer_stack_store.lo \
|
||||
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
|
||||
sanitizer_stacktrace_libcdep.lo sanitizer_stacktrace_sparc.lo \
|
||||
sanitizer_symbolizer_mac.lo sanitizer_symbolizer_report.lo \
|
||||
sanitizer_stacktrace_printer.lo \
|
||||
sanitizer_stoptheworld_linux_libcdep.lo \
|
||||
sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
|
||||
sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
|
||||
sanitizer_symbolizer_libcdep.lo \
|
||||
sanitizer_symbolizer_posix_libcdep.lo \
|
||||
sanitizer_symbolizer_win.lo sanitizer_termination.lo \
|
||||
sanitizer_thread_registry.lo sanitizer_tls_get_addr.lo \
|
||||
sanitizer_unwind_linux_libcdep.lo sanitizer_unwind_win.lo \
|
||||
sanitizer_win.lo
|
||||
sanitizer_thread_arg_retval.lo sanitizer_thread_registry.lo \
|
||||
sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
|
||||
sanitizer_unwind_win.lo sanitizer_win.lo
|
||||
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
|
||||
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
|
||||
AM_V_lt = $(am__v_lt_@AM_V@)
|
||||
|
@ -390,6 +392,7 @@ sanitizer_common_files = \
|
|||
sanitizer_coverage_libcdep_new.cpp \
|
||||
sanitizer_deadlock_detector1.cpp \
|
||||
sanitizer_deadlock_detector2.cpp \
|
||||
sanitizer_dl.cpp \
|
||||
sanitizer_errno.cpp \
|
||||
sanitizer_file.cpp \
|
||||
sanitizer_flags.cpp \
|
||||
|
@ -416,6 +419,7 @@ sanitizer_common_files = \
|
|||
sanitizer_procmaps_linux.cpp \
|
||||
sanitizer_procmaps_mac.cpp \
|
||||
sanitizer_procmaps_solaris.cpp \
|
||||
sanitizer_range.cpp \
|
||||
sanitizer_solaris.cpp \
|
||||
sanitizer_stack_store.cpp \
|
||||
sanitizer_stackdepot.cpp \
|
||||
|
@ -434,6 +438,7 @@ sanitizer_common_files = \
|
|||
sanitizer_symbolizer_posix_libcdep.cpp \
|
||||
sanitizer_symbolizer_win.cpp \
|
||||
sanitizer_termination.cpp \
|
||||
sanitizer_thread_arg_retval.cpp \
|
||||
sanitizer_thread_registry.cpp \
|
||||
sanitizer_tls_get_addr.cpp \
|
||||
sanitizer_unwind_linux_libcdep.cpp \
|
||||
|
@ -546,6 +551,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep_new.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_dl.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_errno.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_file.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@
|
||||
|
@ -572,6 +578,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_solaris.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_range.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_solaris.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stack_store.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
|
||||
|
@ -590,6 +597,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_report.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_termination.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_arg_retval.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
|
||||
|
|
|
@ -138,14 +138,20 @@ void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
|||
|
||||
// LowLevelAllocator
|
||||
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
|
||||
constexpr uptr kMinNumPagesRounded = 16;
|
||||
constexpr uptr kMinRoundedSize = 65536;
|
||||
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
|
||||
static LowLevelAllocateCallback low_level_alloc_callback;
|
||||
|
||||
static LowLevelAllocator Alloc;
|
||||
LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
|
||||
|
||||
void *LowLevelAllocator::Allocate(uptr size) {
|
||||
// Align allocation size.
|
||||
size = RoundUpTo(size, low_level_alloc_min_alignment);
|
||||
if (allocated_end_ - allocated_current_ < (sptr)size) {
|
||||
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
|
||||
uptr size_to_allocate = RoundUpTo(
|
||||
size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
|
||||
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
|
||||
allocated_end_ = allocated_current_ + size_to_allocate;
|
||||
if (low_level_alloc_callback) {
|
||||
|
|
|
@ -62,6 +62,13 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
|
|||
*rand_state = state;
|
||||
}
|
||||
|
||||
struct NoOpMapUnmapCallback {
|
||||
void OnMap(uptr p, uptr size) const {}
|
||||
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
|
||||
uptr user_size) const {}
|
||||
void OnUnmap(uptr p, uptr size) const {}
|
||||
};
|
||||
|
||||
#include "sanitizer_allocator_size_class_map.h"
|
||||
#include "sanitizer_allocator_stats.h"
|
||||
#include "sanitizer_allocator_primary64.h"
|
||||
|
|
|
@ -29,9 +29,9 @@ class CombinedAllocator {
|
|||
LargeMmapAllocatorPtrArray,
|
||||
typename PrimaryAllocator::AddressSpaceView>;
|
||||
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
|
||||
stats_.InitLinkerInitialized();
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms,
|
||||
uptr heap_start = 0) {
|
||||
primary_.Init(release_to_os_interval_ms, heap_start);
|
||||
secondary_.InitLinkerInitialized();
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
|
|||
const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr
|
||||
__sanitizer_get_allocated_size(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr
|
||||
__sanitizer_get_allocated_size_fast(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
|
||||
|
|
|
@ -353,7 +353,7 @@ class SizeClassAllocator32 {
|
|||
DCHECK_GT(max_count, 0);
|
||||
TransferBatch *b = nullptr;
|
||||
constexpr uptr kShuffleArraySize = 48;
|
||||
uptr shuffle_array[kShuffleArraySize];
|
||||
UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
|
||||
uptr count = 0;
|
||||
for (uptr i = region; i < region + n_chunks * size; i += size) {
|
||||
shuffle_array[count++] = i;
|
||||
|
|
|
@ -635,8 +635,9 @@ class SizeClassAllocator64 {
|
|||
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
|
||||
}
|
||||
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
|
||||
// kRegionSize must be >= 2^32.
|
||||
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
||||
// kRegionSize should be able to satisfy the largest size class.
|
||||
static_assert(kRegionSize >= SizeClassMap::kMaxSize,
|
||||
"Region size exceed largest size");
|
||||
// kRegionSize must be <= 2^36, see CompactPtrT.
|
||||
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
|
||||
// Call mmap for user memory with at least this size.
|
||||
|
|
|
@ -82,7 +82,7 @@ class LargeMmapAllocator {
|
|||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||
void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = RoundUpMapSize(size);
|
||||
if (alignment > page_size_)
|
||||
|
@ -99,11 +99,11 @@ class LargeMmapAllocator {
|
|||
if (!map_beg)
|
||||
return nullptr;
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||
uptr map_end = map_beg + map_size;
|
||||
uptr res = map_beg + page_size_;
|
||||
if (res & (alignment - 1)) // Align.
|
||||
res += alignment - (res & (alignment - 1));
|
||||
MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
|
||||
CHECK(IsAligned(res, alignment));
|
||||
CHECK(IsAligned(res, page_size_));
|
||||
CHECK_GE(res + size, map_beg);
|
||||
|
|
|
@ -25,19 +25,13 @@ typedef uptr AllocatorStatCounters[AllocatorStatCount];
|
|||
// Per-thread stats, live in per-thread cache.
|
||||
class AllocatorStats {
|
||||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
void InitLinkerInitialized() {}
|
||||
|
||||
void Init() { internal_memset(this, 0, sizeof(*this)); }
|
||||
void Add(AllocatorStat i, uptr v) {
|
||||
v += atomic_load(&stats_[i], memory_order_relaxed);
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Sub(AllocatorStat i, uptr v) {
|
||||
v = atomic_load(&stats_[i], memory_order_relaxed) - v;
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Set(AllocatorStat i, uptr v) {
|
||||
|
@ -58,17 +52,13 @@ class AllocatorStats {
|
|||
// Global stats, used for aggregation and querying.
|
||||
class AllocatorGlobalStats : public AllocatorStats {
|
||||
public:
|
||||
void InitLinkerInitialized() {
|
||||
next_ = this;
|
||||
prev_ = this;
|
||||
}
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void Register(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
LazyInit();
|
||||
s->next_ = next_;
|
||||
s->prev_ = this;
|
||||
next_->prev_ = s;
|
||||
|
@ -87,7 +77,7 @@ class AllocatorGlobalStats : public AllocatorStats {
|
|||
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
|
||||
SpinMutexLock l(&mu_);
|
||||
const AllocatorStats *stats = this;
|
||||
for (;;) {
|
||||
for (; stats;) {
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
s[i] += stats->Get(AllocatorStat(i));
|
||||
stats = stats->next_;
|
||||
|
@ -100,6 +90,13 @@ class AllocatorGlobalStats : public AllocatorStats {
|
|||
}
|
||||
|
||||
private:
|
||||
void LazyInit() {
|
||||
if (!next_) {
|
||||
next_ = this;
|
||||
prev_ = this;
|
||||
}
|
||||
}
|
||||
|
||||
mutable StaticSpinMutex mu_;
|
||||
};
|
||||
|
||||
|
|
123
libsanitizer/sanitizer_common/sanitizer_array_ref.h
Normal file
123
libsanitizer/sanitizer_common/sanitizer_array_ref.h
Normal file
|
@ -0,0 +1,123 @@
|
|||
//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ARRAY_REF_H
|
||||
#define SANITIZER_ARRAY_REF_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
/// ArrayRef - Represent a constant reference to an array (0 or more elements
|
||||
/// consecutively in memory), i.e. a start pointer and a length. It allows
|
||||
/// various APIs to take consecutive elements easily and conveniently.
|
||||
///
|
||||
/// This class does not own the underlying data, it is expected to be used in
|
||||
/// situations where the data resides in some other buffer, whose lifetime
|
||||
/// extends past that of the ArrayRef. For this reason, it is not in general
|
||||
/// safe to store an ArrayRef.
|
||||
///
|
||||
/// This is intended to be trivially copyable, so it should be passed by
|
||||
/// value.
|
||||
template <typename T>
|
||||
class ArrayRef {
|
||||
public:
|
||||
constexpr ArrayRef() {}
|
||||
constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
|
||||
DCHECK(empty() || begin);
|
||||
}
|
||||
constexpr ArrayRef(const T *data, uptr length)
|
||||
: ArrayRef(data, data + length) {}
|
||||
template <uptr N>
|
||||
constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
|
||||
template <typename C>
|
||||
constexpr ArrayRef(const C &src)
|
||||
: ArrayRef(src.data(), src.data() + src.size()) {}
|
||||
ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
|
||||
|
||||
const T *data() const { return empty() ? nullptr : begin_; }
|
||||
|
||||
const T *begin() const { return begin_; }
|
||||
const T *end() const { return end_; }
|
||||
|
||||
bool empty() const { return begin_ == end_; }
|
||||
|
||||
uptr size() const { return end_ - begin_; }
|
||||
|
||||
/// equals - Check for element-wise equality.
|
||||
bool equals(ArrayRef rhs) const {
|
||||
if (size() != rhs.size())
|
||||
return false;
|
||||
auto r = rhs.begin();
|
||||
for (auto &l : *this) {
|
||||
if (!(l == *r))
|
||||
return false;
|
||||
++r;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// slice(n, m) - Chop off the first N elements of the array, and keep M
|
||||
/// elements in the array.
|
||||
ArrayRef<T> slice(uptr N, uptr M) const {
|
||||
DCHECK_LE(N + M, size());
|
||||
return ArrayRef<T>(data() + N, M);
|
||||
}
|
||||
|
||||
/// slice(n) - Chop off the first N elements of the array.
|
||||
ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
|
||||
|
||||
/// Drop the first \p N elements of the array.
|
||||
ArrayRef<T> drop_front(uptr N = 1) const {
|
||||
DCHECK_GE(size(), N);
|
||||
return slice(N, size() - N);
|
||||
}
|
||||
|
||||
/// Drop the last \p N elements of the array.
|
||||
ArrayRef<T> drop_back(uptr N = 1) const {
|
||||
DCHECK_GE(size(), N);
|
||||
return slice(0, size() - N);
|
||||
}
|
||||
|
||||
/// Return a copy of *this with only the first \p N elements.
|
||||
ArrayRef<T> take_front(uptr N = 1) const {
|
||||
if (N >= size())
|
||||
return *this;
|
||||
return drop_back(size() - N);
|
||||
}
|
||||
|
||||
/// Return a copy of *this with only the last \p N elements.
|
||||
ArrayRef<T> take_back(uptr N = 1) const {
|
||||
if (N >= size())
|
||||
return *this;
|
||||
return drop_front(size() - N);
|
||||
}
|
||||
|
||||
const T &operator[](uptr index) const {
|
||||
DCHECK_LT(index, size());
|
||||
return begin_[index];
|
||||
}
|
||||
|
||||
private:
|
||||
const T *begin_ = nullptr;
|
||||
const T *end_ = nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
|
||||
return lhs.equals(rhs);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ARRAY_REF_H
|
|
@ -42,13 +42,57 @@
|
|||
# define CFI_RESTORE(reg)
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
|
||||
# define ASM_TAIL_CALL jmp
|
||||
#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
|
||||
defined(__powerpc__) || defined(__loongarch_lp64)
|
||||
# define ASM_TAIL_CALL b
|
||||
#elif defined(__s390__)
|
||||
# define ASM_TAIL_CALL jg
|
||||
#elif defined(__riscv)
|
||||
# define ASM_TAIL_CALL tail
|
||||
#endif
|
||||
|
||||
#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
|
||||
defined(__riscv)
|
||||
# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
|
||||
#else
|
||||
# define ASM_PREEMPTIBLE_SYM(sym) sym
|
||||
#endif
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
# define ASM_HIDDEN(symbol) .hidden symbol
|
||||
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
|
||||
# define ASM_SIZE(symbol) .size symbol, .-symbol
|
||||
# define ASM_SYMBOL(symbol) symbol
|
||||
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
|
||||
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
|
||||
# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
|
||||
defined(__sparc__)
|
||||
// For details, see interception.h
|
||||
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
|
||||
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
|
||||
.weak symbol; \
|
||||
.set symbol, ASM_WRAPPER_NAME(name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE(name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
|
||||
# else // Architecture supports interceptor trampoline
|
||||
// Keep trampoline implementation in sync with interception/interception.h
|
||||
# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
|
||||
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
|
||||
.weak symbol; \
|
||||
.set symbol, __interceptor_trampoline_##name
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
|
||||
.weak __interceptor_##name; \
|
||||
.set __interceptor_##name, ASM_WRAPPER_NAME(name); \
|
||||
.globl __interceptor_trampoline_##name; \
|
||||
ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
|
||||
__interceptor_trampoline_##name: \
|
||||
CFI_STARTPROC; \
|
||||
ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
|
||||
CFI_ENDPROC; \
|
||||
ASM_SIZE(__interceptor_trampoline_##name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
|
||||
# endif // Architecture supports interceptor trampoline
|
||||
#else
|
||||
# define ASM_HIDDEN(symbol)
|
||||
# define ASM_TYPE_FUNCTION(symbol)
|
||||
|
|
|
@ -115,8 +115,9 @@ void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
|
|||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
InternalScopedString buff;
|
||||
buff.append("SUMMARY: %s: %s",
|
||||
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
|
||||
buff.AppendF("SUMMARY: %s: %s",
|
||||
alt_tool_name ? alt_tool_name : SanitizerToolName,
|
||||
error_message);
|
||||
__sanitizer_report_error_summary(buff.data());
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
|||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
bool MprotectReadOnly(uptr addr, uptr size);
|
||||
bool MprotectReadWrite(uptr addr, uptr size);
|
||||
|
||||
void MprotectMallocZones(void *addr, int prot);
|
||||
|
||||
|
@ -207,6 +208,11 @@ void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
|
|||
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
|
||||
// constructor, so all instances of LowLevelAllocator should be
|
||||
// linker initialized.
|
||||
//
|
||||
// NOTE: Users should instead use the singleton provided via
|
||||
// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
|
||||
// number of mmap fragments can be reduced and use the same contiguous mmap
|
||||
// provided by this singleton.
|
||||
class LowLevelAllocator {
|
||||
public:
|
||||
// Requires an external lock.
|
||||
|
@ -223,6 +229,8 @@ typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
|
|||
// Passing NULL removes the callback.
|
||||
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
|
||||
|
||||
LowLevelAllocator &GetGlobalLowLevelAllocator();
|
||||
|
||||
// IO
|
||||
void CatastrophicErrorWrite(const char *buffer, uptr length);
|
||||
void RawWrite(const char *buffer);
|
||||
|
@ -519,8 +527,8 @@ class InternalMmapVectorNoCtor {
|
|||
return data_[i];
|
||||
}
|
||||
void push_back(const T &element) {
|
||||
CHECK_LE(size_, capacity());
|
||||
if (size_ == capacity()) {
|
||||
if (UNLIKELY(size_ >= capacity())) {
|
||||
CHECK_EQ(size_, capacity());
|
||||
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
|
||||
Realloc(new_capacity);
|
||||
}
|
||||
|
@ -580,7 +588,7 @@ class InternalMmapVectorNoCtor {
|
|||
}
|
||||
|
||||
private:
|
||||
void Realloc(uptr new_capacity) {
|
||||
NOINLINE void Realloc(uptr new_capacity) {
|
||||
CHECK_GT(new_capacity, 0);
|
||||
CHECK_LE(size_, new_capacity);
|
||||
uptr new_capacity_bytes =
|
||||
|
@ -635,7 +643,8 @@ class InternalScopedString {
|
|||
buffer_.resize(1);
|
||||
buffer_[0] = '\0';
|
||||
}
|
||||
void append(const char *format, ...) FORMAT(2, 3);
|
||||
void Append(const char *str);
|
||||
void AppendF(const char *format, ...) FORMAT(2, 3);
|
||||
const char *data() const { return buffer_.data(); }
|
||||
char *data() { return buffer_.data(); }
|
||||
|
||||
|
@ -796,7 +805,11 @@ inline const char *ModuleArchToString(ModuleArch arch) {
|
|||
return "";
|
||||
}
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
const uptr kModuleUUIDSize = 16;
|
||||
#else
|
||||
const uptr kModuleUUIDSize = 32;
|
||||
#endif
|
||||
const uptr kMaxSegName = 16;
|
||||
|
||||
// Represents a binary loaded into virtual memory (e.g. this can be an
|
||||
|
@ -1079,20 +1092,6 @@ inline u32 GetNumberOfCPUsCached() {
|
|||
return NumberOfCPUsCached;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class ArrayRef {
|
||||
public:
|
||||
ArrayRef() {}
|
||||
ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
|
||||
|
||||
T *begin() { return begin_; }
|
||||
T *end() { return end_; }
|
||||
|
||||
private:
|
||||
T *begin_ = nullptr;
|
||||
T *end_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
|
|
|
@ -26,25 +26,24 @@
|
|||
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
|
||||
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
|
||||
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
|
||||
// COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
// COMMON_INTERCEPTOR_MMAP_IMPL
|
||||
// COMMON_INTERCEPTOR_MUNMAP_IMPL
|
||||
// COMMON_INTERCEPTOR_COPY_STRING
|
||||
// COMMON_INTERCEPTOR_STRNDUP_IMPL
|
||||
// COMMON_INTERCEPTOR_STRERROR
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_addrhashmap.h"
|
||||
#include "sanitizer_dl.h"
|
||||
#include "sanitizer_errno.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_platform_interceptors.h"
|
||||
#include "sanitizer_symbolizer.h"
|
||||
#include "sanitizer_tls_get_addr.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#if SANITIZER_INTERCEPTOR_HOOKS
|
||||
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
|
||||
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
|
||||
|
@ -198,15 +197,6 @@ extern const short *_tolower_tab_;
|
|||
#define wait4 __wait4_time64
|
||||
#endif
|
||||
|
||||
// Platform-specific options.
|
||||
#if SANITIZER_APPLE
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#elif SANITIZER_WINDOWS64
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#else
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
|
||||
#endif // SANITIZER_APPLE
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
|
||||
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
|
||||
#endif
|
||||
|
@ -302,53 +292,17 @@ extern const short *_tolower_tab_;
|
|||
COMMON_INTERCEPT_FUNCTION(fn)
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memset(dst, v, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
|
||||
if (common_flags()->intercept_intrin) \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
return REAL(memset)(dst, v, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memmove(dst, src, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memmove)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
|
||||
return internal_memmove(dst, src, size); \
|
||||
} \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memcpy)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
|
||||
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
|
||||
off) \
|
||||
{ return REAL(mmap)(addr, sz, prot, flags, fd, off); }
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUNMAP_IMPL
|
||||
#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
|
||||
{ return REAL(munmap)(addr, sz); }
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_COPY_STRING
|
||||
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
|
||||
#endif
|
||||
|
@ -492,11 +446,13 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
|
|||
#define INIT_TEXTDOMAIN
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRCMP
|
||||
#if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP
|
||||
static inline int CharCmpX(unsigned char c1, unsigned char c2) {
|
||||
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRCMP
|
||||
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
|
||||
const char *s1, const char *s2, int result)
|
||||
|
||||
|
@ -841,57 +797,6 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
|
|||
#define INIT_STRPBRK
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMSET
|
||||
INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
|
||||
#else
|
||||
#define INIT_MEMSET
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMMOVE
|
||||
INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
|
||||
#else
|
||||
#define INIT_MEMMOVE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMCPY
|
||||
INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
|
||||
// On OS X, calling internal_memcpy here will cause memory corruptions,
|
||||
// because memcpy and memmove are actually aliases of the same
|
||||
// implementation. We need to use internal_memmove here.
|
||||
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
|
||||
// due to memcpy being an alias of memmove on OS X.
|
||||
void *ctx;
|
||||
#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
|
||||
#else
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define INIT_MEMCPY \
|
||||
do { \
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
|
||||
COMMON_INTERCEPT_FUNCTION(memcpy); \
|
||||
} else { \
|
||||
ASSIGN_REAL(memcpy, memmove); \
|
||||
} \
|
||||
CHECK(REAL(memcpy)); \
|
||||
} while (false)
|
||||
|
||||
#else
|
||||
#define INIT_MEMCPY
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMCMP
|
||||
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
|
||||
const void *s1, const void *s2, uptr n,
|
||||
|
@ -1589,6 +1494,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
|
|||
|
||||
INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
|
||||
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap)
|
||||
VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format,
|
||||
va_list ap)
|
||||
VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap)
|
||||
VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap)
|
||||
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
|
||||
|
||||
INTERCEPTOR(int, scanf, const char *format, ...)
|
||||
|
@ -1609,6 +1524,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
|
|||
|
||||
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
|
||||
FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_scanf, const char *format, ...)
|
||||
FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...)
|
||||
FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format)
|
||||
|
||||
INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...)
|
||||
FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -1632,7 +1556,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
|
|||
COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf);
|
||||
#else
|
||||
#define INIT_ISOC99_SCANF
|
||||
#endif
|
||||
|
@ -3416,7 +3346,8 @@ INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
|
|||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
__sanitizer_dirent *res = REAL(readdir)(dirp);
|
||||
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
|
||||
if (res)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -3431,7 +3362,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
|
|||
if (!res) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
|
||||
if (*result)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -3452,7 +3383,8 @@ INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
|
|||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
|
||||
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
|
||||
if (res)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -3467,7 +3399,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
|
|||
if (!res) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
|
||||
if (*result)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -3635,30 +3567,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
|
|||
(real_endptr - nptr) + 1 : 0);
|
||||
}
|
||||
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRTOIMAX
|
||||
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
template <typename Fn>
|
||||
static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr,
|
||||
char **endptr, int base)
|
||||
-> decltype(real(nullptr, nullptr, 0)) {
|
||||
char *real_endptr;
|
||||
INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
|
||||
auto res = real(nptr, &real_endptr, base);
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
|
||||
return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base);
|
||||
}
|
||||
INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
char *real_endptr;
|
||||
UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return res;
|
||||
return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base);
|
||||
}
|
||||
|
||||
#define INIT_STRTOIMAX \
|
||||
|
@ -3668,6 +3596,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
|
|||
#define INIT_STRTOIMAX
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC
|
||||
INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base);
|
||||
return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base);
|
||||
}
|
||||
INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base);
|
||||
return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base);
|
||||
}
|
||||
|
||||
# define INIT_STRTOIMAX_C23 \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \
|
||||
COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax);
|
||||
#else
|
||||
# define INIT_STRTOIMAX_C23
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MBSTOWCS
|
||||
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
|
||||
void *ctx;
|
||||
|
@ -4039,7 +3986,7 @@ static THREADLOCAL scandir_compar_f scandir_compar;
|
|||
|
||||
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
|
||||
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
|
||||
return scandir_filter(dir);
|
||||
}
|
||||
|
||||
|
@ -4047,9 +3994,9 @@ static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
|
|||
const struct __sanitizer_dirent **b) {
|
||||
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
|
||||
return scandir_compar(a, b);
|
||||
}
|
||||
|
||||
|
@ -4073,7 +4020,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
|
|||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
|
||||
for (int i = 0; i < res; ++i)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
|
||||
(*namelist)[i]->d_reclen);
|
||||
__sanitizer_dirsiz((*namelist)[i]));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -4092,7 +4039,7 @@ static THREADLOCAL scandir64_compar_f scandir64_compar;
|
|||
|
||||
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
|
||||
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
|
||||
return scandir64_filter(dir);
|
||||
}
|
||||
|
||||
|
@ -4100,9 +4047,9 @@ static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
|
|||
const struct __sanitizer_dirent64 **b) {
|
||||
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
|
||||
return scandir64_compar(a, b);
|
||||
}
|
||||
|
||||
|
@ -4127,7 +4074,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
|
|||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
|
||||
for (int i = 0; i < res; ++i)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
|
||||
(*namelist)[i]->d_reclen);
|
||||
__sanitizer_dirsiz((*namelist)[i]));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -4404,12 +4351,16 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
|
|||
INTERCEPTOR(int, backtrace, void **buffer, int size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(backtrace)(buffer, size);
|
||||
if (res && buffer)
|
||||
// 'buffer' might be freed memory, hence it is unsafe to directly call
|
||||
// REAL(backtrace)(buffer, size). Instead, we use our own known-good
|
||||
// scratch buffer.
|
||||
void **scratch = (void**)InternalAlloc(sizeof(void*) * size);
|
||||
int res = REAL(backtrace)(scratch, size);
|
||||
if (res && buffer) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
|
||||
internal_memcpy(buffer, scratch, res * sizeof(*buffer));
|
||||
}
|
||||
InternalFree(scratch);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -4418,9 +4369,8 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
|
|||
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
|
||||
if (buffer && size)
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
// The COMMON_INTERCEPTOR_READ_RANGE above ensures that 'buffer' is
|
||||
// valid for reading.
|
||||
char **res = REAL(backtrace_symbols)(buffer, size);
|
||||
if (res && size) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
|
||||
|
@ -4453,7 +4403,7 @@ INTERCEPTOR(void, _exit, int status) {
|
|||
|
||||
#if SANITIZER_INTERCEPT___LIBC_MUTEX
|
||||
INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
|
||||
ALIAS(WRAPPER_NAME(pthread_setcancelstate));
|
||||
ALIAS(WRAP(pthread_setcancelstate));
|
||||
|
||||
#define INIT___LIBC_THR_SETCANCELSTATE \
|
||||
COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
|
||||
|
@ -5484,9 +5434,7 @@ INTERCEPTOR(void *, __tls_get_addr, void *arg) {
|
|||
// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
|
||||
// mostly the same semantics as __tls_get_addr, but its presence enables
|
||||
// some optimizations in linker (which are safe to ignore here).
|
||||
extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
|
||||
visibility("default")))
|
||||
void *__tls_get_addr_opt(void *arg);
|
||||
INTERCEPTOR(void *, __tls_get_addr_opt, void *arg) ALIAS(WRAP(__tls_get_addr));
|
||||
#endif
|
||||
#else // SANITIZER_S390
|
||||
// On s390, we have to intercept two functions here:
|
||||
|
@ -5520,21 +5468,20 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
|||
|
||||
#if SANITIZER_S390 && \
|
||||
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
|
||||
extern "C" uptr __tls_get_offset(void *arg);
|
||||
extern "C" uptr __interceptor___tls_get_offset(void *arg);
|
||||
// We need a hidden symbol aliasing the above, so that we can jump
|
||||
// directly to it from the assembly below.
|
||||
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
|
||||
visibility("hidden")))
|
||||
uptr __tls_get_addr_hidden(void *arg);
|
||||
extern "C" __attribute__((visibility("hidden"))) uptr __tls_get_addr_hidden(
|
||||
void *arg) ALIAS(WRAP(__tls_get_addr_internal));
|
||||
extern "C" uptr __tls_get_offset(void *arg);
|
||||
extern "C" uptr TRAMPOLINE(__tls_get_offset)(void *arg);
|
||||
extern "C" uptr WRAP(__tls_get_offset)(void *arg);
|
||||
// Now carefully intercept __tls_get_offset.
|
||||
asm(
|
||||
".text\n"
|
||||
// The __intercept_ version has to exist, so that gen_dynamic_list.py
|
||||
// exports our symbol.
|
||||
".weak __tls_get_offset\n"
|
||||
".type __tls_get_offset, @function\n"
|
||||
"__tls_get_offset:\n"
|
||||
".set __tls_get_offset, __interceptor___tls_get_offset\n"
|
||||
".global __interceptor___tls_get_offset\n"
|
||||
".type __interceptor___tls_get_offset, @function\n"
|
||||
"__interceptor___tls_get_offset:\n"
|
||||
|
@ -5790,105 +5737,6 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
|
|||
#define INIT_CAPGET
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_AEABI_MEM
|
||||
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
// Note the argument order.
|
||||
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
#define INIT_AEABI_MEM \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
|
||||
#else
|
||||
#define INIT_AEABI_MEM
|
||||
#endif // SANITIZER_INTERCEPT_AEABI_MEM
|
||||
|
||||
#if SANITIZER_INTERCEPT___BZERO
|
||||
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
|
||||
#else
|
||||
#define INIT___BZERO
|
||||
#endif // SANITIZER_INTERCEPT___BZERO
|
||||
|
||||
#if SANITIZER_INTERCEPT_BZERO
|
||||
INTERCEPTOR(void *, bzero, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
|
||||
#else
|
||||
#define INIT_BZERO
|
||||
#endif // SANITIZER_INTERCEPT_BZERO
|
||||
|
||||
#if SANITIZER_INTERCEPT_FTIME
|
||||
INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
|
||||
void *ctx;
|
||||
|
@ -6460,7 +6308,36 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
|
|||
INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
|
||||
if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
|
||||
|
||||
if (filename) {
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
|
||||
|
||||
# if !SANITIZER_DYNAMIC
|
||||
// We care about a very specific use-case: dladdr on
|
||||
// statically-linked ASan may return <main program>
|
||||
// instead of the library.
|
||||
// We therefore only take effect if the sanitizer is statically
|
||||
// linked, and we don't bother canonicalizing paths because
|
||||
// dladdr should return the same address both times (we assume
|
||||
// the user did not canonicalize the result from dladdr).
|
||||
if (common_flags()->test_only_replace_dlopen_main_program) {
|
||||
VPrintf(1, "dlopen interceptor: filename: %s\n", filename);
|
||||
|
||||
const char *SelfFName = DladdrSelfFName();
|
||||
VPrintf(1, "dlopen interceptor: DladdrSelfFName: %p %s\n",
|
||||
(void *)SelfFName, SelfFName);
|
||||
|
||||
if (internal_strcmp(SelfFName, filename) == 0) {
|
||||
// It's possible they copied the string from dladdr, so
|
||||
// we do a string comparison rather than pointer comparison.
|
||||
VPrintf(1, "dlopen interceptor: replacing %s because it matches %s\n",
|
||||
filename, SelfFName);
|
||||
filename = (char *)0; // RTLD_DEFAULT
|
||||
}
|
||||
}
|
||||
# endif // !SANITIZER_DYNAMIC
|
||||
}
|
||||
|
||||
void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
|
||||
Symbolizer::GetOrInit()->InvalidateModuleList();
|
||||
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
|
||||
|
@ -7173,6 +7050,7 @@ INTERCEPTOR(int, mprobe, void *ptr) {
|
|||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_WCSLEN
|
||||
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
|
||||
|
@ -7191,6 +7069,9 @@ INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
|
|||
#define INIT_WCSLEN \
|
||||
COMMON_INTERCEPT_FUNCTION(wcslen); \
|
||||
COMMON_INTERCEPT_FUNCTION(wcsnlen);
|
||||
#else
|
||||
#define INIT_WCSLEN
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_WCSCAT
|
||||
INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
|
||||
|
@ -7599,6 +7480,14 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
|
|||
COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, munmap, void *addr, SIZE_T sz) {
|
||||
void *ctx;
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
|
||||
return (int)internal_munmap(addr, sz);
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, munmap, addr, sz);
|
||||
COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
|
||||
void *ctx;
|
||||
if (common_flags()->detect_write_exec)
|
||||
|
@ -7611,6 +7500,7 @@ INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
|
|||
}
|
||||
#define INIT_MMAP \
|
||||
COMMON_INTERCEPT_FUNCTION(mmap); \
|
||||
COMMON_INTERCEPT_FUNCTION(munmap); \
|
||||
COMMON_INTERCEPT_FUNCTION(mprotect);
|
||||
#else
|
||||
#define INIT_MMAP
|
||||
|
@ -10355,14 +10245,33 @@ INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
|
|||
#define INIT_ARGP_PARSE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_CPUSET_GETAFFINITY
|
||||
INTERCEPTOR(int, cpuset_getaffinity, int level, int which, __int64_t id, SIZE_T cpusetsize, __sanitizer_cpuset_t *mask) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, cpuset_getaffinity, level, which, id, cpusetsize, mask);
|
||||
int res = REAL(cpuset_getaffinity)(level, which, id, cpusetsize, mask);
|
||||
if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
|
||||
return res;
|
||||
}
|
||||
#define INIT_CPUSET_GETAFFINITY COMMON_INTERCEPT_FUNCTION(cpuset_getaffinity);
|
||||
#else
|
||||
#define INIT_CPUSET_GETAFFINITY
|
||||
#endif
|
||||
|
||||
#include "sanitizer_common_interceptors_netbsd_compat.inc"
|
||||
|
||||
namespace __sanitizer {
|
||||
void InitializeMemintrinsicInterceptors();
|
||||
} // namespace __sanitizer
|
||||
|
||||
static void InitializeCommonInterceptors() {
|
||||
#if SI_POSIX
|
||||
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
|
||||
interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();
|
||||
#endif
|
||||
|
||||
__sanitizer::InitializeMemintrinsicInterceptors();
|
||||
|
||||
INIT_MMAP;
|
||||
INIT_MMAP64;
|
||||
INIT_TEXTDOMAIN;
|
||||
|
@ -10384,9 +10293,6 @@ static void InitializeCommonInterceptors() {
|
|||
INIT_STRPBRK;
|
||||
INIT_STRXFRM;
|
||||
INIT___STRXFRM_L;
|
||||
INIT_MEMSET;
|
||||
INIT_MEMMOVE;
|
||||
INIT_MEMCPY;
|
||||
INIT_MEMCHR;
|
||||
INIT_MEMCMP;
|
||||
INIT_BCMP;
|
||||
|
@ -10470,6 +10376,7 @@ static void InitializeCommonInterceptors() {
|
|||
INIT_GETCWD;
|
||||
INIT_GET_CURRENT_DIR_NAME;
|
||||
INIT_STRTOIMAX;
|
||||
INIT_STRTOIMAX_C23;
|
||||
INIT_MBSTOWCS;
|
||||
INIT_MBSNRTOWCS;
|
||||
INIT_WCSTOMBS;
|
||||
|
@ -10558,9 +10465,6 @@ static void InitializeCommonInterceptors() {
|
|||
INIT_GETIFADDRS;
|
||||
INIT_IF_INDEXTONAME;
|
||||
INIT_CAPGET;
|
||||
INIT_AEABI_MEM;
|
||||
INIT___BZERO;
|
||||
INIT_BZERO;
|
||||
INIT_FTIME;
|
||||
INIT_XDR;
|
||||
INIT_XDRREC_LINUX;
|
||||
|
@ -10673,6 +10577,7 @@ static void InitializeCommonInterceptors() {
|
|||
INIT___XUNAME;
|
||||
INIT_HEXDUMP;
|
||||
INIT_ARGP_PARSE;
|
||||
INIT_CPUSET_GETAFFINITY;
|
||||
|
||||
INIT___PRINTF_CHK;
|
||||
}
|
||||
|
|
|
@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
|
|||
size = 0;
|
||||
}
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
|
||||
// For %ms/%mc, write the allocated output buffer as well.
|
||||
// For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
|
||||
if (dir.allocate) {
|
||||
char *buf = *(char **)argp;
|
||||
if (buf)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
|
||||
if (char *buf = *(char **)argp) {
|
||||
if (dir.convSpecifier == 'c')
|
||||
size = 1;
|
||||
else if (dir.convSpecifier == 'C')
|
||||
size = sizeof(wchar_t);
|
||||
else if (dir.convSpecifier == 'S')
|
||||
size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
|
||||
else // 's' or '['
|
||||
size = internal_strlen(buf) + 1;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -539,24 +547,25 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
|
|||
continue;
|
||||
} else if (size == FSS_STRLEN) {
|
||||
if (void *argp = va_arg(aq, void *)) {
|
||||
uptr len;
|
||||
if (dir.starredPrecision) {
|
||||
// FIXME: properly support starred precision for strings.
|
||||
size = 0;
|
||||
len = 0;
|
||||
} else if (dir.fieldPrecision > 0) {
|
||||
// Won't read more than "precision" symbols.
|
||||
size = internal_strnlen((const char *)argp, dir.fieldPrecision);
|
||||
if (size < dir.fieldPrecision) size++;
|
||||
len = internal_strnlen((const char *)argp, dir.fieldPrecision);
|
||||
if (len < (uptr)dir.fieldPrecision)
|
||||
len++;
|
||||
} else {
|
||||
// Whole string will be accessed.
|
||||
size = internal_strlen((const char *)argp) + 1;
|
||||
len = internal_strlen((const char *)argp) + 1;
|
||||
}
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, len);
|
||||
}
|
||||
} else if (size == FSS_WCSLEN) {
|
||||
if (void *argp = va_arg(aq, void *)) {
|
||||
// FIXME: Properly support wide-character strings (via wcsrtombs).
|
||||
size = 0;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, 0);
|
||||
}
|
||||
} else {
|
||||
// Skip non-pointer args
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Memintrinsic function interceptors for tools like AddressSanitizer,
|
||||
// ThreadSanitizer, MemorySanitizer, etc.
|
||||
//
|
||||
// These interceptors are part of the common interceptors, but separated out so
|
||||
// that implementations may add them, if necessary, to a separate source file
|
||||
// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
|
||||
//
|
||||
// This file should be included into the tool's memintrinsic interceptor file,
|
||||
// which has to define its own macros:
|
||||
// COMMON_INTERCEPTOR_ENTER
|
||||
// COMMON_INTERCEPTOR_READ_RANGE
|
||||
// COMMON_INTERCEPTOR_WRITE_RANGE
|
||||
// COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef SANITIZER_REDEFINE_BUILTINS_H
|
||||
#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
|
||||
#endif
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_platform_interceptors.h"
|
||||
|
||||
// Platform-specific options.
|
||||
#if SANITIZER_APPLE
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#elif SANITIZER_WINDOWS64
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#else
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
|
||||
#endif // SANITIZER_APPLE
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memset(dst, v, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
|
||||
if (common_flags()->intercept_intrin) \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
return REAL(memset)(dst, v, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memmove(dst, src, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memmove)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
|
||||
return internal_memmove(dst, src, size); \
|
||||
} \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memcpy)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMSET
|
||||
INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
|
||||
#else
|
||||
#define INIT_MEMSET
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMMOVE
|
||||
INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
|
||||
#else
|
||||
#define INIT_MEMMOVE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMCPY
|
||||
INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
|
||||
// On OS X, calling internal_memcpy here will cause memory corruptions,
|
||||
// because memcpy and memmove are actually aliases of the same
|
||||
// implementation. We need to use internal_memmove here.
|
||||
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
|
||||
// due to memcpy being an alias of memmove on OS X.
|
||||
void *ctx;
|
||||
#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
|
||||
#else
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define INIT_MEMCPY \
|
||||
do { \
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
|
||||
COMMON_INTERCEPT_FUNCTION(memcpy); \
|
||||
} else { \
|
||||
ASSIGN_REAL(memcpy, memmove); \
|
||||
} \
|
||||
CHECK(REAL(memcpy)); \
|
||||
} while (false)
|
||||
|
||||
#else
|
||||
#define INIT_MEMCPY
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_AEABI_MEM
|
||||
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
// Note the argument order.
|
||||
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
#define INIT_AEABI_MEM \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
|
||||
#else
|
||||
#define INIT_AEABI_MEM
|
||||
#endif // SANITIZER_INTERCEPT_AEABI_MEM
|
||||
|
||||
#if SANITIZER_INTERCEPT___BZERO
|
||||
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
|
||||
#else
|
||||
#define INIT___BZERO
|
||||
#endif // SANITIZER_INTERCEPT___BZERO
|
||||
|
||||
#if SANITIZER_INTERCEPT_BZERO
|
||||
INTERCEPTOR(void *, bzero, void *block, uptr size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
|
||||
#else
|
||||
#define INIT_BZERO
|
||||
#endif // SANITIZER_INTERCEPT_BZERO
|
||||
|
||||
namespace __sanitizer {
|
||||
// This does not need to be called if InitializeCommonInterceptors() is called.
|
||||
void InitializeMemintrinsicInterceptors() {
|
||||
INIT_MEMSET;
|
||||
INIT_MEMMOVE;
|
||||
INIT_MEMCPY;
|
||||
INIT_AEABI_MEM;
|
||||
INIT___BZERO;
|
||||
INIT_BZERO;
|
||||
}
|
||||
} // namespace __sanitizer
|
|
@ -40,8 +40,8 @@ ASM_WRAPPER_NAME(vfork):
|
|||
ret
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
.weak vfork
|
||||
.set vfork, ASM_WRAPPER_NAME(vfork)
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
GNU_PROPERTY_BTI_PAC
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ ASM_WRAPPER_NAME(vfork):
|
|||
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
.weak vfork
|
||||
.set vfork, ASM_WRAPPER_NAME(vfork)
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -58,7 +58,7 @@ ASM_WRAPPER_NAME(vfork):
|
|||
ret
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
.weak vfork
|
||||
.set vfork, ASM_WRAPPER_NAME(vfork)
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue