libsanitizer: merge from upstream (3185e47b5a8444e9fd).
This commit is contained in:
parent
a8e1551bdb
commit
d53b3d94aa
132 changed files with 2770 additions and 1163 deletions
|
@ -1,4 +1,4 @@
|
|||
ae59131d3ef311fb4b1e50627c6457be00e60dc9
|
||||
3185e47b5a8444e9fd70b746a7ad679dd131ffe4
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -1094,10 +1094,16 @@ uptr PointsIntoChunk(void *p) {
|
|||
}
|
||||
|
||||
uptr GetUserBegin(uptr chunk) {
|
||||
// FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
|
||||
// not needed.
|
||||
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
|
||||
return m ? m->Beg() : 0;
|
||||
}
|
||||
|
||||
uptr GetUserAddr(uptr chunk) {
|
||||
return chunk;
|
||||
}
|
||||
|
||||
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||
metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
|
||||
: nullptr;
|
||||
|
@ -1138,7 +1144,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
|||
__asan::get_allocator().ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||
IgnoreObjectResult IgnoreObject(const void *p) {
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
|
||||
if (!m ||
|
||||
|
@ -1153,38 +1159,22 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
|||
return kIgnoreObjectSuccess;
|
||||
}
|
||||
|
||||
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
|
||||
// Look for the arg pointer of threads that have been created or are running.
|
||||
// This is necessary to prevent false positive leaks due to the AsanThread
|
||||
// holding the only live reference to a heap object. This can happen because
|
||||
// the `pthread_create()` interceptor doesn't wait for the child thread to
|
||||
// start before returning and thus loosing the the only live reference to the
|
||||
// heap object on the stack.
|
||||
|
||||
__asan::AsanThreadContext *atctx =
|
||||
reinterpret_cast<__asan::AsanThreadContext *>(tctx);
|
||||
__asan::AsanThread *asan_thread = atctx->thread;
|
||||
|
||||
// Note ThreadStatusRunning is required because there is a small window where
|
||||
// the thread status switches to `ThreadStatusRunning` but the `arg` pointer
|
||||
// still isn't on the stack yet.
|
||||
if (atctx->status != ThreadStatusCreated &&
|
||||
atctx->status != ThreadStatusRunning)
|
||||
return;
|
||||
|
||||
uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
|
||||
if (!thread_arg)
|
||||
return;
|
||||
|
||||
auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
|
||||
ptrsVec->push_back(thread_arg);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan;
|
||||
|
||||
static const void *AllocationBegin(const void *p) {
|
||||
AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
|
||||
if (!m)
|
||||
return nullptr;
|
||||
if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
|
||||
return nullptr;
|
||||
if (m->UsedSize() == 0)
|
||||
return nullptr;
|
||||
return (const void *)(m->Beg());
|
||||
}
|
||||
|
||||
// ASan allocator doesn't reserve extra bytes, so normally we would
|
||||
// just return "size". We don't want to expose our redzone sizes, etc here.
|
||||
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
|
||||
|
@ -1208,6 +1198,10 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
|||
return allocated_size;
|
||||
}
|
||||
|
||||
const void *__sanitizer_get_allocated_begin(const void *p) {
|
||||
return AllocationBegin(p);
|
||||
}
|
||||
|
||||
void __sanitizer_purge_allocator() {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
instance.Purge(&stack);
|
||||
|
|
|
@ -143,11 +143,15 @@ typedef DefaultSizeClassMap SizeClassMap;
|
|||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# else
|
||||
# elif SANITIZER_APPLE
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# endif
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x500000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# endif
|
||||
template <typename AddressSpaceViewTy>
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = kAllocatorSpace;
|
||||
|
|
|
@ -334,6 +334,26 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
|
|||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorBadParamsToAnnotateDoubleEndedContiguousContainer::Print() {
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: bad parameters to "
|
||||
"__sanitizer_annotate_double_ended_contiguous_container:\n"
|
||||
" storage_beg : %p\n"
|
||||
" storage_end : %p\n"
|
||||
" old_container_beg : %p\n"
|
||||
" old_container_end : %p\n"
|
||||
" new_container_beg : %p\n"
|
||||
" new_container_end : %p\n",
|
||||
(void *)storage_beg, (void *)storage_end, (void *)old_container_beg,
|
||||
(void *)old_container_end, (void *)new_container_beg,
|
||||
(void *)new_container_end);
|
||||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
if (!IsAligned(storage_beg, granularity))
|
||||
Report("ERROR: storage_beg is not aligned by %zu\n", granularity);
|
||||
stack->Print();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorODRViolation::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
|
|
|
@ -331,6 +331,28 @@ struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
|
|||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorBadParamsToAnnotateDoubleEndedContiguousContainer : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr storage_beg, storage_end, old_container_beg, old_container_end,
|
||||
new_container_beg, new_container_end;
|
||||
|
||||
ErrorBadParamsToAnnotateDoubleEndedContiguousContainer() = default; // (*)
|
||||
ErrorBadParamsToAnnotateDoubleEndedContiguousContainer(
|
||||
u32 tid, BufferedStackTrace *stack_, uptr storage_beg_, uptr storage_end_,
|
||||
uptr old_container_beg_, uptr old_container_end_, uptr new_container_beg_,
|
||||
uptr new_container_end_)
|
||||
: ErrorBase(tid, 10,
|
||||
"bad-__sanitizer_annotate_double_ended_contiguous_container"),
|
||||
stack(stack_),
|
||||
storage_beg(storage_beg_),
|
||||
storage_end(storage_end_),
|
||||
old_container_beg(old_container_beg_),
|
||||
old_container_end(old_container_end_),
|
||||
new_container_beg(new_container_beg_),
|
||||
new_container_end(new_container_end_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorODRViolation : ErrorBase {
|
||||
__asan_global global1, global2;
|
||||
u32 stack_id1, stack_id2;
|
||||
|
@ -378,28 +400,29 @@ struct ErrorGeneric : ErrorBase {
|
|||
};
|
||||
|
||||
// clang-format off
|
||||
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
|
||||
macro(DeadlySignal) \
|
||||
macro(DoubleFree) \
|
||||
macro(NewDeleteTypeMismatch) \
|
||||
macro(FreeNotMalloced) \
|
||||
macro(AllocTypeMismatch) \
|
||||
macro(MallocUsableSizeNotOwned) \
|
||||
macro(SanitizerGetAllocatedSizeNotOwned) \
|
||||
macro(CallocOverflow) \
|
||||
macro(ReallocArrayOverflow) \
|
||||
macro(PvallocOverflow) \
|
||||
macro(InvalidAllocationAlignment) \
|
||||
macro(InvalidAlignedAllocAlignment) \
|
||||
macro(InvalidPosixMemalignAlignment) \
|
||||
macro(AllocationSizeTooBig) \
|
||||
macro(RssLimitExceeded) \
|
||||
macro(OutOfMemory) \
|
||||
macro(StringFunctionMemoryRangesOverlap) \
|
||||
macro(StringFunctionSizeOverflow) \
|
||||
macro(BadParamsToAnnotateContiguousContainer) \
|
||||
macro(ODRViolation) \
|
||||
macro(InvalidPointerPair) \
|
||||
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
|
||||
macro(DeadlySignal) \
|
||||
macro(DoubleFree) \
|
||||
macro(NewDeleteTypeMismatch) \
|
||||
macro(FreeNotMalloced) \
|
||||
macro(AllocTypeMismatch) \
|
||||
macro(MallocUsableSizeNotOwned) \
|
||||
macro(SanitizerGetAllocatedSizeNotOwned) \
|
||||
macro(CallocOverflow) \
|
||||
macro(ReallocArrayOverflow) \
|
||||
macro(PvallocOverflow) \
|
||||
macro(InvalidAllocationAlignment) \
|
||||
macro(InvalidAlignedAllocAlignment) \
|
||||
macro(InvalidPosixMemalignAlignment) \
|
||||
macro(AllocationSizeTooBig) \
|
||||
macro(RssLimitExceeded) \
|
||||
macro(OutOfMemory) \
|
||||
macro(StringFunctionMemoryRangesOverlap) \
|
||||
macro(StringFunctionSizeOverflow) \
|
||||
macro(BadParamsToAnnotateContiguousContainer) \
|
||||
macro(BadParamsToAnnotateDoubleEndedContiguousContainer) \
|
||||
macro(ODRViolation) \
|
||||
macro(InvalidPointerPair) \
|
||||
macro(Generic)
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -158,6 +158,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
|||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
|
@ -203,6 +220,8 @@ static void RegisterGlobal(const Global *g) {
|
|||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
|
|
|
@ -257,12 +257,36 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
|
|||
PoisonShadow(bottom, ssize, 0);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, getcontext, struct ucontext_t *ucp) {
|
||||
// API does not requires to have ucp clean, and sets only part of fields. We
|
||||
// use ucp->uc_stack to unpoison new stack. We prefer to have zeroes then
|
||||
// uninitialized bytes.
|
||||
ResetContextStack(ucp);
|
||||
return REAL(getcontext)(ucp);
|
||||
INTERCEPTOR(void, makecontext, struct ucontext_t *ucp, void (*func)(), int argc,
|
||||
...) {
|
||||
va_list ap;
|
||||
uptr args[64];
|
||||
// We don't know a better way to forward ... into REAL function. We can
|
||||
// increase args size if neccecary.
|
||||
CHECK_LE(argc, ARRAY_SIZE(args));
|
||||
internal_memset(args, 0, sizeof(args));
|
||||
va_start(ap, argc);
|
||||
for (int i = 0; i < argc; ++i) args[i] = va_arg(ap, uptr);
|
||||
va_end(ap);
|
||||
|
||||
# define ENUMERATE_ARRAY_4(start) \
|
||||
args[start], args[start + 1], args[start + 2], args[start + 3]
|
||||
# define ENUMERATE_ARRAY_16(start) \
|
||||
ENUMERATE_ARRAY_4(start), ENUMERATE_ARRAY_4(start + 4), \
|
||||
ENUMERATE_ARRAY_4(start + 8), ENUMERATE_ARRAY_4(start + 12)
|
||||
# define ENUMERATE_ARRAY_64() \
|
||||
ENUMERATE_ARRAY_16(0), ENUMERATE_ARRAY_16(16), ENUMERATE_ARRAY_16(32), \
|
||||
ENUMERATE_ARRAY_16(48)
|
||||
|
||||
REAL(makecontext)
|
||||
((struct ucontext_t *)ucp, func, argc, ENUMERATE_ARRAY_64());
|
||||
|
||||
# undef ENUMERATE_ARRAY_4
|
||||
# undef ENUMERATE_ARRAY_16
|
||||
# undef ENUMERATE_ARRAY_64
|
||||
|
||||
// Sign the stack so we can identify it for unpoisoning.
|
||||
SignContextStack(ucp);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
|
||||
|
@ -279,9 +303,6 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
|
|||
ReadContextStack(ucp, &stack, &ssize);
|
||||
ClearShadowMemoryForContextStack(stack, ssize);
|
||||
|
||||
// See getcontext interceptor.
|
||||
ResetContextStack(oucp);
|
||||
|
||||
# if __has_attribute(__indirect_return__) && \
|
||||
(defined(__x86_64__) || defined(__i386__))
|
||||
int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *)
|
||||
|
@ -453,7 +474,9 @@ INTERCEPTOR(char*, strdup, const char *s) {
|
|||
}
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *new_mem = asan_malloc(length + 1, &stack);
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
if (new_mem) {
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
}
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
|
||||
|
@ -469,7 +492,9 @@ INTERCEPTOR(char*, __strdup, const char *s) {
|
|||
}
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *new_mem = asan_malloc(length + 1, &stack);
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
if (new_mem) {
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
}
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
#endif // ASAN_INTERCEPT___STRDUP
|
||||
|
@ -658,11 +683,11 @@ void InitializeAsanInterceptors() {
|
|||
// Intecept jump-related functions.
|
||||
ASAN_INTERCEPT_FUNC(longjmp);
|
||||
|
||||
#if ASAN_INTERCEPT_SWAPCONTEXT
|
||||
ASAN_INTERCEPT_FUNC(getcontext);
|
||||
# if ASAN_INTERCEPT_SWAPCONTEXT
|
||||
ASAN_INTERCEPT_FUNC(swapcontext);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT__LONGJMP
|
||||
ASAN_INTERCEPT_FUNC(makecontext);
|
||||
# endif
|
||||
# if ASAN_INTERCEPT__LONGJMP
|
||||
ASAN_INTERCEPT_FUNC(_longjmp);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT___LONGJMP_CHK
|
||||
|
@ -681,11 +706,11 @@ void InitializeAsanInterceptors() {
|
|||
#endif
|
||||
// Indirectly intercept std::rethrow_exception.
|
||||
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
|
||||
INTERCEPT_FUNCTION(_Unwind_RaiseException);
|
||||
ASAN_INTERCEPT_FUNC(_Unwind_RaiseException);
|
||||
#endif
|
||||
// Indirectly intercept std::rethrow_exception.
|
||||
#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
|
||||
INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
|
||||
ASAN_INTERCEPT_FUNC(_Unwind_SjLj_RaiseException);
|
||||
#endif
|
||||
|
||||
// Intercept threading-related functions
|
||||
|
|
|
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
|
|||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# endif
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
|
|
|
@ -105,8 +105,8 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
|
|||
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context);
|
||||
|
||||
void SignContextStack(void *context);
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
|
||||
void ResetContextStack(void *context);
|
||||
void StopInitOrderChecking();
|
||||
|
||||
// Wrapper for TLS/TSD.
|
||||
|
|
|
@ -15,55 +15,56 @@
|
|||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_premap_shadow.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_freebsd.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
# include <dlfcn.h>
|
||||
# include <fcntl.h>
|
||||
# include <limits.h>
|
||||
# include <pthread.h>
|
||||
# include <stdio.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/resource.h>
|
||||
# include <sys/syscall.h>
|
||||
# include <sys/time.h>
|
||||
# include <sys/types.h>
|
||||
# include <unistd.h>
|
||||
# include <unwind.h>
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <unwind.h>
|
||||
# include "asan_interceptors.h"
|
||||
# include "asan_internal.h"
|
||||
# include "asan_premap_shadow.h"
|
||||
# include "asan_thread.h"
|
||||
# include "sanitizer_common/sanitizer_flags.h"
|
||||
# include "sanitizer_common/sanitizer_freebsd.h"
|
||||
# include "sanitizer_common/sanitizer_hash.h"
|
||||
# include "sanitizer_common/sanitizer_libc.h"
|
||||
# include "sanitizer_common/sanitizer_procmaps.h"
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
#include <sys/link_elf.h>
|
||||
#endif
|
||||
# if SANITIZER_FREEBSD
|
||||
# include <sys/link_elf.h>
|
||||
# endif
|
||||
|
||||
#if SANITIZER_SOLARIS
|
||||
#include <link.h>
|
||||
#endif
|
||||
# if SANITIZER_SOLARIS
|
||||
# include <link.h>
|
||||
# endif
|
||||
|
||||
#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
|
||||
#include <ucontext.h>
|
||||
extern "C" void* _DYNAMIC;
|
||||
#elif SANITIZER_NETBSD
|
||||
#include <link_elf.h>
|
||||
#include <ucontext.h>
|
||||
# if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
|
||||
# include <ucontext.h>
|
||||
extern "C" void *_DYNAMIC;
|
||||
# elif SANITIZER_NETBSD
|
||||
# include <link_elf.h>
|
||||
# include <ucontext.h>
|
||||
extern Elf_Dyn _DYNAMIC;
|
||||
#else
|
||||
#include <sys/ucontext.h>
|
||||
#include <link.h>
|
||||
# else
|
||||
# include <link.h>
|
||||
# include <sys/ucontext.h>
|
||||
extern ElfW(Dyn) _DYNAMIC[];
|
||||
#endif
|
||||
# endif
|
||||
|
||||
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
|
||||
// 32-bit mode.
|
||||
#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
|
||||
__FreeBSD_version <= 902001 // v9.2
|
||||
#define ucontext_t xucontext_t
|
||||
#endif
|
||||
# if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
|
||||
__FreeBSD_version <= 902001 // v9.2
|
||||
# define ucontext_t xucontext_t
|
||||
# endif
|
||||
|
||||
typedef enum {
|
||||
ASAN_RT_VERSION_UNDEFINED = 0,
|
||||
|
@ -74,21 +75,21 @@ typedef enum {
|
|||
// FIXME: perhaps also store abi version here?
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
asan_rt_version_t __asan_rt_version;
|
||||
asan_rt_version_t __asan_rt_version;
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
bool IsSystemHeapAddress (uptr addr) { return false; }
|
||||
bool IsSystemHeapAddress(uptr addr) { return false; }
|
||||
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
// This will fail to link with -static.
|
||||
return &_DYNAMIC;
|
||||
}
|
||||
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
# if ASAN_PREMAP_SHADOW
|
||||
uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
|
||||
|
@ -98,14 +99,14 @@ uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
|
|||
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
|
||||
return shadow_start;
|
||||
}
|
||||
#endif
|
||||
# endif
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
# if ASAN_PREMAP_SHADOW
|
||||
if (!PremapShadowFailed())
|
||||
return FindPremappedShadowStart(shadow_size_bytes);
|
||||
#endif
|
||||
# endif
|
||||
|
||||
return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE,
|
||||
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
|
||||
|
@ -121,11 +122,11 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
|||
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
# if SANITIZER_ANDROID
|
||||
// FIXME: should we do anything for Android?
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
#else
|
||||
# else
|
||||
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
||||
void *data) {
|
||||
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", info->dlpi_name,
|
||||
|
@ -154,7 +155,7 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
|||
|
||||
static bool IsDynamicRTName(const char *libname) {
|
||||
return internal_strstr(libname, "libclang_rt.asan") ||
|
||||
internal_strstr(libname, "libasan.so");
|
||||
internal_strstr(libname, "libasan.so");
|
||||
}
|
||||
|
||||
static void ReportIncompatibleRT() {
|
||||
|
@ -170,9 +171,10 @@ void AsanCheckDynamicRTPrereqs() {
|
|||
const char *first_dso_name = nullptr;
|
||||
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
|
||||
if (first_dso_name && first_dso_name[0] && !IsDynamicRTName(first_dso_name)) {
|
||||
Report("ASan runtime does not come first in initial library list; "
|
||||
"you should either link runtime to your application or "
|
||||
"manually preload it with LD_PRELOAD.\n");
|
||||
Report(
|
||||
"ASan runtime does not come first in initial library list; "
|
||||
"you should either link runtime to your application or "
|
||||
"manually preload it with LD_PRELOAD.\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
@ -190,13 +192,14 @@ void AsanCheckIncompatibleRT() {
|
|||
// as early as possible, otherwise ASan interceptors could bind to
|
||||
// the functions in dynamic ASan runtime instead of the functions in
|
||||
// system libraries, causing crashes later in ASan initialization.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
char filename[PATH_MAX];
|
||||
MemoryMappedSegment segment(filename, sizeof(filename));
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (IsDynamicRTName(segment.filename)) {
|
||||
Report("Your application is linked against "
|
||||
"incompatible ASan runtimes.\n");
|
||||
Report(
|
||||
"Your application is linked against "
|
||||
"incompatible ASan runtimes.\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
@ -206,31 +209,36 @@ void AsanCheckIncompatibleRT() {
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif // SANITIZER_ANDROID
|
||||
# endif // SANITIZER_ANDROID
|
||||
|
||||
# if ASAN_INTERCEPT_SWAPCONTEXT
|
||||
constexpr u32 kAsanContextStackFlagsMagic = 0x51260eea;
|
||||
|
||||
static int HashContextStack(const ucontext_t &ucp) {
|
||||
MurMur2Hash64Builder hash(kAsanContextStackFlagsMagic);
|
||||
hash.add(reinterpret_cast<uptr>(ucp.uc_stack.ss_sp));
|
||||
hash.add(ucp.uc_stack.ss_size);
|
||||
return static_cast<int>(hash.get());
|
||||
}
|
||||
|
||||
void SignContextStack(void *context) {
|
||||
ucontext_t *ucp = reinterpret_cast<ucontext_t *>(context);
|
||||
ucp->uc_stack.ss_flags = HashContextStack(*ucp);
|
||||
}
|
||||
|
||||
#if !SANITIZER_ANDROID
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
ucontext_t *ucp = (ucontext_t*)context;
|
||||
*stack = (uptr)ucp->uc_stack.ss_sp;
|
||||
*ssize = ucp->uc_stack.ss_size;
|
||||
const ucontext_t *ucp = reinterpret_cast<const ucontext_t *>(context);
|
||||
if (HashContextStack(*ucp) == ucp->uc_stack.ss_flags) {
|
||||
*stack = reinterpret_cast<uptr>(ucp->uc_stack.ss_sp);
|
||||
*ssize = ucp->uc_stack.ss_size;
|
||||
return;
|
||||
}
|
||||
*stack = 0;
|
||||
*ssize = 0;
|
||||
}
|
||||
# endif // ASAN_INTERCEPT_SWAPCONTEXT
|
||||
|
||||
void ResetContextStack(void *context) {
|
||||
ucontext_t *ucp = (ucontext_t *)context;
|
||||
ucp->uc_stack.ss_sp = nullptr;
|
||||
ucp->uc_stack.ss_size = 0;
|
||||
}
|
||||
# else
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void ResetContextStack(void *context) { UNIMPLEMENTED(); }
|
||||
# endif
|
||||
|
||||
void *AsanDlSymNext(const char *sym) {
|
||||
return dlsym(RTLD_NEXT, sym);
|
||||
}
|
||||
void *AsanDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); }
|
||||
|
||||
bool HandleDlopenInit() {
|
||||
// Not supported on this platform.
|
||||
|
@ -239,7 +247,7 @@ bool HandleDlopenInit() {
|
|||
return false;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
|
||||
// SANITIZER_SOLARIS
|
||||
|
|
|
@ -95,12 +95,6 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
|||
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
|
||||
}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void ResetContextStack(void *context) { UNIMPLEMENTED(); }
|
||||
|
||||
// Support for the following functions from libdispatch on Mac OS:
|
||||
// dispatch_async_f()
|
||||
// dispatch_async()
|
||||
|
|
|
@ -508,10 +508,10 @@ void ReplaceSystemMalloc() {
|
|||
TryToOverrideFunction("_expand_base", (uptr)_expand);
|
||||
|
||||
if (flags()->windows_hook_rtl_allocators) {
|
||||
INTERCEPT_FUNCTION(HeapSize);
|
||||
INTERCEPT_FUNCTION(HeapFree);
|
||||
INTERCEPT_FUNCTION(HeapReAlloc);
|
||||
INTERCEPT_FUNCTION(HeapAlloc);
|
||||
ASAN_INTERCEPT_FUNC(HeapSize);
|
||||
ASAN_INTERCEPT_FUNC(HeapFree);
|
||||
ASAN_INTERCEPT_FUNC(HeapReAlloc);
|
||||
ASAN_INTERCEPT_FUNC(HeapAlloc);
|
||||
|
||||
// Undocumented functions must be intercepted by name, not by symbol.
|
||||
__interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap),
|
||||
|
|
|
@ -190,7 +190,7 @@
|
|||
# elif defined(__aarch64__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
|
||||
# elif defined(__powerpc64__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
|
||||
# elif defined(__s390x__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
|
||||
# elif SANITIZER_FREEBSD
|
||||
|
|
|
@ -11,12 +11,11 @@
|
|||
// This file implements __sanitizer_print_memory_profile.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan/asan_allocator.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "asan/asan_allocator.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
|
||||
|
@ -100,17 +99,16 @@ static void ChunkCallback(uptr chunk, void *arg) {
|
|||
FindHeapChunkByAllocBeg(chunk));
|
||||
}
|
||||
|
||||
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
|
||||
void *argument) {
|
||||
static void MemoryProfileCB(uptr top_percent, uptr max_number_of_contexts) {
|
||||
HeapProfile hp;
|
||||
__lsan::LockAllocator();
|
||||
__lsan::ForEachChunk(ChunkCallback, &hp);
|
||||
uptr *Arg = reinterpret_cast<uptr*>(argument);
|
||||
hp.Print(Arg[0], Arg[1]);
|
||||
__lsan::UnlockAllocator();
|
||||
hp.Print(top_percent, max_number_of_contexts);
|
||||
|
||||
if (Verbosity())
|
||||
__asan_print_accumulated_stats();
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
@ -120,10 +118,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
|||
void __sanitizer_print_memory_profile(uptr top_percent,
|
||||
uptr max_number_of_contexts) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
uptr Arg[2];
|
||||
Arg[0] = top_percent;
|
||||
Arg[1] = max_number_of_contexts;
|
||||
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
|
||||
__asan::MemoryProfileCB(top_percent, max_number_of_contexts);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
} // extern "C"
|
||||
|
|
|
@ -370,79 +370,77 @@ void __asan_unpoison_stack_memory(uptr addr, uptr size) {
|
|||
PoisonAlignedStackMemory(addr, size, false);
|
||||
}
|
||||
|
||||
static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
|
||||
uptr &old_beg, uptr &old_end, uptr &new_beg,
|
||||
uptr &new_end) {
|
||||
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
|
||||
uptr end_down = RoundDownTo(storage_end, granularity);
|
||||
// Ignore the last unaligned granule if the storage is followed by
|
||||
// unpoisoned byte, because we can't poison the prefix anyway. Don't call
|
||||
// AddressIsPoisoned at all if container changes does not affect the last
|
||||
// granule at all.
|
||||
if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||
|
||||
((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&
|
||||
!AddressIsPoisoned(storage_end)) {
|
||||
old_beg = Min(end_down, old_beg);
|
||||
old_end = Min(end_down, old_end);
|
||||
new_beg = Min(end_down, new_beg);
|
||||
new_end = Min(end_down, new_end);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle misaligned begin and cut it off.
|
||||
if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
|
||||
uptr beg_up = RoundUpTo(storage_beg, granularity);
|
||||
// The first unaligned granule needs special handling only if we had bytes
|
||||
// there before and will have none after.
|
||||
if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
|
||||
old_beg < beg_up) {
|
||||
// Keep granule prefix outside of the storage unpoisoned.
|
||||
uptr beg_down = RoundDownTo(storage_beg, granularity);
|
||||
*(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;
|
||||
old_beg = Max(beg_up, old_beg);
|
||||
old_end = Max(beg_up, old_end);
|
||||
new_beg = Max(beg_up, new_beg);
|
||||
new_end = Max(beg_up, new_end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
||||
const void *end_p,
|
||||
const void *old_mid_p,
|
||||
const void *new_mid_p) {
|
||||
if (!flags()->detect_container_overflow) return;
|
||||
if (!flags()->detect_container_overflow)
|
||||
return;
|
||||
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
|
||||
new_mid_p);
|
||||
uptr beg = reinterpret_cast<uptr>(beg_p);
|
||||
uptr end = reinterpret_cast<uptr>(end_p);
|
||||
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
|
||||
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
|
||||
uptr storage_beg = reinterpret_cast<uptr>(beg_p);
|
||||
uptr storage_end = reinterpret_cast<uptr>(end_p);
|
||||
uptr old_end = reinterpret_cast<uptr>(old_mid_p);
|
||||
uptr new_end = reinterpret_cast<uptr>(new_mid_p);
|
||||
uptr old_beg = storage_beg;
|
||||
uptr new_beg = storage_beg;
|
||||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end)) {
|
||||
if (!(storage_beg <= old_end && storage_beg <= new_end &&
|
||||
old_end <= storage_end && new_end <= storage_end)) {
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
|
||||
&stack);
|
||||
ReportBadParamsToAnnotateContiguousContainer(storage_beg, storage_end,
|
||||
old_end, new_end, &stack);
|
||||
}
|
||||
CHECK_LE(end - beg,
|
||||
FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
|
||||
CHECK_LE(storage_end - storage_beg,
|
||||
FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
|
||||
|
||||
if (old_mid == new_mid)
|
||||
if (old_end == new_end)
|
||||
return; // Nothing to do here.
|
||||
|
||||
// Handle misaligned end and cut it off.
|
||||
if (UNLIKELY(!AddrIsAlignedByGranularity(end))) {
|
||||
uptr end_down = RoundDownTo(end, granularity);
|
||||
// Either new or old mid must be in the granule to affect it.
|
||||
if (new_mid > end_down) {
|
||||
if (AddressIsPoisoned(end)) {
|
||||
*(u8 *)MemToShadow(end_down) = static_cast<u8>(new_mid - end_down);
|
||||
} else {
|
||||
// Something after the container - don't touch.
|
||||
}
|
||||
} else if (old_mid > end_down) {
|
||||
if (AddressIsPoisoned(end)) {
|
||||
*(u8 *)MemToShadow(end_down) = kAsanContiguousContainerOOBMagic;
|
||||
} else {
|
||||
// Something after the container - don't touch.
|
||||
}
|
||||
}
|
||||
FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
|
||||
new_end);
|
||||
|
||||
if (beg >= end_down)
|
||||
return; // Same granule.
|
||||
|
||||
old_mid = Min(end_down, old_mid);
|
||||
new_mid = Min(end_down, new_mid);
|
||||
}
|
||||
|
||||
// Handle misaligned begin and cut it off.
|
||||
if (UNLIKELY(!AddrIsAlignedByGranularity(beg))) {
|
||||
uptr beg_up = RoundUpTo(beg, granularity);
|
||||
uptr beg_down = RoundDownTo(beg, granularity);
|
||||
// As soon as we add first byte into container we will not be able to
|
||||
// determine the state of the byte before the container. So we assume it's
|
||||
// always unpoison.
|
||||
|
||||
// Either new or old mid must be in the granule to affect it.
|
||||
if (new_mid < beg_up) {
|
||||
*(u8 *)MemToShadow(beg_down) = static_cast<u8>(new_mid - beg_down);
|
||||
} else if (old_mid < beg_up) {
|
||||
*(u8 *)MemToShadow(beg_down) = 0;
|
||||
}
|
||||
|
||||
old_mid = Max(beg_up, old_mid);
|
||||
new_mid = Max(beg_up, new_mid);
|
||||
}
|
||||
|
||||
if (old_mid == new_mid)
|
||||
return;
|
||||
|
||||
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
|
||||
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
|
||||
uptr d1 = RoundDownTo(old_mid, granularity);
|
||||
uptr a = RoundDownTo(Min(old_end, new_end), granularity);
|
||||
uptr c = RoundUpTo(Max(old_end, new_end), granularity);
|
||||
uptr d1 = RoundDownTo(old_end, granularity);
|
||||
// uptr d2 = RoundUpTo(old_mid, granularity);
|
||||
// Currently we should be in this state:
|
||||
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
|
||||
|
@ -453,23 +451,148 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
|||
// if (d1 != d2)
|
||||
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
|
||||
if (a + granularity <= d1)
|
||||
CHECK_EQ(*(u8*)MemToShadow(a), 0);
|
||||
CHECK_EQ(*(u8 *)MemToShadow(a), 0);
|
||||
// if (d2 + granularity <= c && c <= end)
|
||||
// CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
|
||||
// kAsanContiguousContainerOOBMagic);
|
||||
|
||||
uptr b1 = RoundDownTo(new_mid, granularity);
|
||||
uptr b2 = RoundUpTo(new_mid, granularity);
|
||||
uptr b1 = RoundDownTo(new_end, granularity);
|
||||
uptr b2 = RoundUpTo(new_end, granularity);
|
||||
// New state:
|
||||
// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
|
||||
PoisonShadow(a, b1 - a, 0);
|
||||
PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
|
||||
if (b1 > a)
|
||||
PoisonShadow(a, b1 - a, 0);
|
||||
else if (c > b2)
|
||||
PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
|
||||
if (b1 != b2) {
|
||||
CHECK_EQ(b2 - b1, granularity);
|
||||
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
|
||||
*(u8 *)MemToShadow(b1) = static_cast<u8>(new_end - b1);
|
||||
}
|
||||
}
|
||||
|
||||
// Annotates a double ended contiguous memory area like std::deque's chunk.
|
||||
// It allows detecting buggy accesses to allocated but not used begining
|
||||
// or end items of such a container.
|
||||
void __sanitizer_annotate_double_ended_contiguous_container(
|
||||
const void *storage_beg_p, const void *storage_end_p,
|
||||
const void *old_container_beg_p, const void *old_container_end_p,
|
||||
const void *new_container_beg_p, const void *new_container_end_p) {
|
||||
if (!flags()->detect_container_overflow)
|
||||
return;
|
||||
|
||||
VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
|
||||
storage_end_p, old_container_beg_p, old_container_end_p,
|
||||
new_container_beg_p, new_container_end_p);
|
||||
|
||||
uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
|
||||
uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
|
||||
uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
|
||||
uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
|
||||
uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
|
||||
uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
|
||||
|
||||
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
|
||||
if (!(old_beg <= old_end && new_beg <= new_end) ||
|
||||
!(storage_beg <= new_beg && new_end <= storage_end) ||
|
||||
!(storage_beg <= old_beg && old_end <= storage_end)) {
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
|
||||
storage_beg, storage_end, old_beg, old_end, new_beg, new_end, &stack);
|
||||
}
|
||||
CHECK_LE(storage_end - storage_beg,
|
||||
FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
|
||||
|
||||
if ((old_beg == old_end && new_beg == new_end) ||
|
||||
(old_beg == new_beg && old_end == new_end))
|
||||
return; // Nothing to do here.
|
||||
|
||||
FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
|
||||
new_end);
|
||||
|
||||
// Handle non-intersecting new/old containers separately have simpler
|
||||
// intersecting case.
|
||||
if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
|
||||
old_end <= new_beg) {
|
||||
if (old_beg != old_end) {
|
||||
// Poisoning the old container.
|
||||
uptr a = RoundDownTo(old_beg, granularity);
|
||||
uptr b = RoundUpTo(old_end, granularity);
|
||||
PoisonShadow(a, b - a, kAsanContiguousContainerOOBMagic);
|
||||
}
|
||||
|
||||
if (new_beg != new_end) {
|
||||
// Unpoisoning the new container.
|
||||
uptr a = RoundDownTo(new_beg, granularity);
|
||||
uptr b = RoundDownTo(new_end, granularity);
|
||||
PoisonShadow(a, b - a, 0);
|
||||
if (!AddrIsAlignedByGranularity(new_end))
|
||||
*(u8 *)MemToShadow(b) = static_cast<u8>(new_end - b);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Intersection of old and new containers is not empty.
|
||||
CHECK_LT(new_beg, old_end);
|
||||
CHECK_GT(new_end, old_beg);
|
||||
|
||||
if (new_beg < old_beg) {
|
||||
// Round down because we can't poison prefixes.
|
||||
uptr a = RoundDownTo(new_beg, granularity);
|
||||
// Round down and ignore the [c, old_beg) as its state defined by unchanged
|
||||
// [old_beg, old_end).
|
||||
uptr c = RoundDownTo(old_beg, granularity);
|
||||
PoisonShadow(a, c - a, 0);
|
||||
} else if (new_beg > old_beg) {
|
||||
// Round down and poison [a, old_beg) because it was unpoisoned only as a
|
||||
// prefix.
|
||||
uptr a = RoundDownTo(old_beg, granularity);
|
||||
// Round down and ignore the [c, new_beg) as its state defined by unchanged
|
||||
// [new_beg, old_end).
|
||||
uptr c = RoundDownTo(new_beg, granularity);
|
||||
|
||||
PoisonShadow(a, c - a, kAsanContiguousContainerOOBMagic);
|
||||
}
|
||||
|
||||
if (new_end > old_end) {
|
||||
// Round down to poison the prefix.
|
||||
uptr a = RoundDownTo(old_end, granularity);
|
||||
// Round down and handle remainder below.
|
||||
uptr c = RoundDownTo(new_end, granularity);
|
||||
PoisonShadow(a, c - a, 0);
|
||||
if (!AddrIsAlignedByGranularity(new_end))
|
||||
*(u8 *)MemToShadow(c) = static_cast<u8>(new_end - c);
|
||||
} else if (new_end < old_end) {
|
||||
// Round up and handle remained below.
|
||||
uptr a2 = RoundUpTo(new_end, granularity);
|
||||
// Round up to poison entire granule as we had nothing in [old_end, c2).
|
||||
uptr c2 = RoundUpTo(old_end, granularity);
|
||||
PoisonShadow(a2, c2 - a2, kAsanContiguousContainerOOBMagic);
|
||||
|
||||
if (!AddrIsAlignedByGranularity(new_end)) {
|
||||
uptr a = RoundDownTo(new_end, granularity);
|
||||
*(u8 *)MemToShadow(a) = static_cast<u8>(new_end - a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
|
||||
CHECK_LE(begin, end);
|
||||
constexpr uptr kMaxRangeToCheck = 32;
|
||||
if (end - begin > kMaxRangeToCheck * 2) {
|
||||
if (auto *bad = FindBadAddress(begin, begin + kMaxRangeToCheck, poisoned))
|
||||
return bad;
|
||||
if (auto *bad = FindBadAddress(end - kMaxRangeToCheck, end, poisoned))
|
||||
return bad;
|
||||
}
|
||||
|
||||
for (uptr i = begin; i < end; ++i)
|
||||
if (AddressIsPoisoned(i) != poisoned)
|
||||
return reinterpret_cast<const void *>(i);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(
|
||||
const void *beg_p, const void *mid_p, const void *end_p) {
|
||||
if (!flags()->detect_container_overflow)
|
||||
|
@ -477,35 +600,22 @@ const void *__sanitizer_contiguous_container_find_bad_address(
|
|||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
uptr beg = reinterpret_cast<uptr>(beg_p);
|
||||
uptr end = reinterpret_cast<uptr>(end_p);
|
||||
uptr mid = reinterpret_cast<uptr>(mid_p);
|
||||
CHECK_LE(beg, mid);
|
||||
CHECK_LE(mid, end);
|
||||
// If the byte after the storage is unpoisoned, everything in the granule
|
||||
// before must stay unpoisoned.
|
||||
uptr annotations_end =
|
||||
(!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
|
||||
? RoundDownTo(end, granularity)
|
||||
: end;
|
||||
uptr mid = reinterpret_cast<uptr>(mid_p);
|
||||
CHECK_LE(beg, mid);
|
||||
CHECK_LE(mid, end);
|
||||
// Check some bytes starting from beg, some bytes around mid, and some bytes
|
||||
// ending with end.
|
||||
uptr kMaxRangeToCheck = 32;
|
||||
uptr r1_beg = beg;
|
||||
uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
|
||||
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
|
||||
uptr r2_end = Min(annotations_end, mid + kMaxRangeToCheck);
|
||||
uptr r3_beg = Max(annotations_end - kMaxRangeToCheck, mid);
|
||||
uptr r3_end = annotations_end;
|
||||
for (uptr i = r1_beg; i < r1_end; i++)
|
||||
if (AddressIsPoisoned(i))
|
||||
return reinterpret_cast<const void *>(i);
|
||||
for (uptr i = r2_beg; i < mid; i++)
|
||||
if (AddressIsPoisoned(i))
|
||||
return reinterpret_cast<const void *>(i);
|
||||
for (uptr i = mid; i < r2_end; i++)
|
||||
if (!AddressIsPoisoned(i))
|
||||
return reinterpret_cast<const void *>(i);
|
||||
for (uptr i = r3_beg; i < r3_end; i++)
|
||||
if (!AddressIsPoisoned(i))
|
||||
return reinterpret_cast<const void *>(i);
|
||||
return nullptr;
|
||||
beg = Min(beg, annotations_end);
|
||||
mid = Min(mid, annotations_end);
|
||||
if (auto *bad = FindBadAddress(beg, mid, false))
|
||||
return bad;
|
||||
if (auto *bad = FindBadAddress(mid, annotations_end, true))
|
||||
return bad;
|
||||
return FindBadAddress(annotations_end, end, false);
|
||||
}
|
||||
|
||||
int __sanitizer_verify_contiguous_container(const void *beg_p,
|
||||
|
@ -515,6 +625,48 @@ int __sanitizer_verify_contiguous_container(const void *beg_p,
|
|||
end_p) == nullptr;
|
||||
}
|
||||
|
||||
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *storage_beg_p, const void *container_beg_p,
|
||||
const void *container_end_p, const void *storage_end_p) {
|
||||
if (!flags()->detect_container_overflow)
|
||||
return nullptr;
|
||||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
|
||||
uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
|
||||
uptr beg = reinterpret_cast<uptr>(container_beg_p);
|
||||
uptr end = reinterpret_cast<uptr>(container_end_p);
|
||||
|
||||
// The prefix of the firs granule of the container is unpoisoned.
|
||||
if (beg != end)
|
||||
beg = Max(storage_beg, RoundDownTo(beg, granularity));
|
||||
|
||||
// If the byte after the storage is unpoisoned, the prefix of the last granule
|
||||
// is unpoisoned.
|
||||
uptr annotations_end = (!AddrIsAlignedByGranularity(storage_end) &&
|
||||
!AddressIsPoisoned(storage_end))
|
||||
? RoundDownTo(storage_end, granularity)
|
||||
: storage_end;
|
||||
storage_beg = Min(storage_beg, annotations_end);
|
||||
beg = Min(beg, annotations_end);
|
||||
end = Min(end, annotations_end);
|
||||
|
||||
if (auto *bad = FindBadAddress(storage_beg, beg, true))
|
||||
return bad;
|
||||
if (auto *bad = FindBadAddress(beg, end, false))
|
||||
return bad;
|
||||
if (auto *bad = FindBadAddress(end, annotations_end, true))
|
||||
return bad;
|
||||
return FindBadAddress(annotations_end, storage_end, false);
|
||||
}
|
||||
|
||||
int __sanitizer_verify_double_ended_contiguous_container(
|
||||
const void *storage_beg_p, const void *container_beg_p,
|
||||
const void *container_end_p, const void *storage_end_p) {
|
||||
return __sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
|
||||
nullptr;
|
||||
}
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
|
||||
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
|
||||
|
|
|
@ -354,6 +354,18 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
|
|||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
|
||||
uptr storage_beg, uptr storage_end, uptr old_container_beg,
|
||||
uptr old_container_end, uptr new_container_beg, uptr new_container_end,
|
||||
BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report;
|
||||
ErrorBadParamsToAnnotateDoubleEndedContiguousContainer error(
|
||||
GetCurrentTidOrInvalid(), stack, storage_beg, storage_end,
|
||||
old_container_beg, old_container_end, new_container_beg,
|
||||
new_container_end);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
|
||||
const __asan_global *g2, u32 stack_id2) {
|
||||
ScopedInErrorReport in_report;
|
||||
|
|
|
@ -83,6 +83,10 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
|
|||
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
|
||||
uptr old_mid, uptr new_mid,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
|
||||
uptr storage_beg, uptr storage_end, uptr old_container_beg,
|
||||
uptr old_container_end, uptr new_container_beg, uptr new_container_end,
|
||||
BufferedStackTrace *stack);
|
||||
|
||||
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
|
||||
const __asan_global *g2, u32 stack_id2);
|
||||
|
|
|
@ -10,17 +10,18 @@
|
|||
//
|
||||
// Thread-related code.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_thread.h"
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
|
@ -306,6 +307,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
|
|||
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
|
||||
&tls_begin_, &tls_size);
|
||||
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
|
||||
stack_bottom_ = RoundDownTo(stack_bottom_, ASAN_SHADOW_GRANULARITY);
|
||||
tls_end_ = tls_begin_ + tls_size;
|
||||
dtls_ = DTLS_Get();
|
||||
|
||||
|
@ -478,6 +480,17 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
|
|||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
void LockThreadRegistry() { __asan::asanThreadRegistry().Lock(); }
|
||||
|
||||
void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); }
|
||||
|
||||
static ThreadRegistry *GetAsanThreadRegistryLocked() {
|
||||
__asan::asanThreadRegistry().CheckLocked();
|
||||
return &__asan::asanThreadRegistry();
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
|
||||
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
|
@ -496,33 +509,76 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
|||
|
||||
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
|
||||
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg) {
|
||||
void GetThreadExtraStackRangesLocked(tid_t os_id,
|
||||
InternalMmapVector<Range> *ranges) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
if (!t)
|
||||
return;
|
||||
__asan::FakeStack *fake_stack = t->get_fake_stack();
|
||||
if (!fake_stack)
|
||||
return;
|
||||
fake_stack->ForEachFakeFrame(callback, arg);
|
||||
|
||||
fake_stack->ForEachFakeFrame(
|
||||
[](uptr begin, uptr end, void *arg) {
|
||||
reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
|
||||
{begin, end});
|
||||
},
|
||||
ranges);
|
||||
}
|
||||
|
||||
void LockThreadRegistry() {
|
||||
__asan::asanThreadRegistry().Lock();
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
|
||||
GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *arg) {
|
||||
GetThreadExtraStackRangesLocked(
|
||||
tctx->os_id, reinterpret_cast<InternalMmapVector<Range> *>(arg));
|
||||
},
|
||||
ranges);
|
||||
}
|
||||
|
||||
void UnlockThreadRegistry() {
|
||||
__asan::asanThreadRegistry().Unlock();
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *ptrs) {
|
||||
// Look for the arg pointer of threads that have been created or are
|
||||
// running. This is necessary to prevent false positive leaks due to the
|
||||
// AsanThread holding the only live reference to a heap object. This
|
||||
// can happen because the `pthread_create()` interceptor doesn't wait
|
||||
// for the child thread to start before returning and thus loosing the
|
||||
// the only live reference to the heap object on the stack.
|
||||
|
||||
__asan::AsanThreadContext *atctx =
|
||||
static_cast<__asan::AsanThreadContext *>(tctx);
|
||||
|
||||
// Note ThreadStatusRunning is required because there is a small window
|
||||
// where the thread status switches to `ThreadStatusRunning` but the
|
||||
// `arg` pointer still isn't on the stack yet.
|
||||
if (atctx->status != ThreadStatusCreated &&
|
||||
atctx->status != ThreadStatusRunning)
|
||||
return;
|
||||
|
||||
uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
|
||||
if (!thread_arg)
|
||||
return;
|
||||
|
||||
auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
|
||||
ptrsVec->push_back(thread_arg);
|
||||
},
|
||||
ptrs);
|
||||
}
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked() {
|
||||
__asan::asanThreadRegistry().CheckLocked();
|
||||
return &__asan::asanThreadRegistry();
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
|
||||
GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *threads) {
|
||||
if (tctx->status == ThreadStatusRunning)
|
||||
reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
|
||||
tctx->os_id);
|
||||
},
|
||||
threads);
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
__asan::EnsureMainThreadIDIsCorrect();
|
||||
void FinishThreadLocked(u32 tid) {
|
||||
GetAsanThreadRegistryLocked()->FinishThread(tid);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
|
|
|
@ -263,12 +263,6 @@ void AsanCheckDynamicRTPrereqs() {}
|
|||
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void ResetContextStack(void *context) { UNIMPLEMENTED(); }
|
||||
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
|
||||
|
||||
bool PlatformUnpoisonStacks() { return false; }
|
||||
|
|
|
@ -267,7 +267,7 @@
|
|||
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
|
||||
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
|
||||
|
||||
#if defined(__ARM_EABI__)
|
||||
|
|
|
@ -86,6 +86,9 @@ static void InitializeFlags() {
|
|||
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
|
||||
// Sigtrap is used in error reporting.
|
||||
cf.handle_sigtrap = kHandleSignalExclusive;
|
||||
// For now only tested on Linux. Other plantforms can be turned on as they
|
||||
// become ready.
|
||||
cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// Let platform handle other signals. It is better at reporting them then we
|
||||
|
@ -106,6 +109,15 @@ static void InitializeFlags() {
|
|||
RegisterHwasanFlags(&parser, f);
|
||||
RegisterCommonFlags(&parser);
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::Flags *lf = __lsan::flags();
|
||||
lf->SetDefaults();
|
||||
|
||||
FlagParser lsan_parser;
|
||||
__lsan::RegisterLsanFlags(&lsan_parser, lf);
|
||||
RegisterCommonFlags(&lsan_parser);
|
||||
#endif
|
||||
|
||||
#if HWASAN_CONTAINS_UBSAN
|
||||
__ubsan::Flags *uf = __ubsan::flags();
|
||||
uf->SetDefaults();
|
||||
|
@ -118,12 +130,18 @@ static void InitializeFlags() {
|
|||
// Override from user-specified string.
|
||||
if (__hwasan_default_options)
|
||||
parser.ParseString(__hwasan_default_options());
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
lsan_parser.ParseString(__lsan_default_options());
|
||||
#endif
|
||||
#if HWASAN_CONTAINS_UBSAN
|
||||
const char *ubsan_default_options = __ubsan_default_options();
|
||||
ubsan_parser.ParseString(ubsan_default_options);
|
||||
#endif
|
||||
|
||||
parser.ParseStringFromEnv("HWASAN_OPTIONS");
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
lsan_parser.ParseStringFromEnv("LSAN_OPTIONS");
|
||||
#endif
|
||||
#if HWASAN_CONTAINS_UBSAN
|
||||
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
|
||||
#endif
|
||||
|
@ -133,6 +151,12 @@ static void InitializeFlags() {
|
|||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
if (common_flags()->help) parser.PrintFlagDescriptions();
|
||||
// Flag validation:
|
||||
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
|
||||
Report("%s: detect_leaks is not supported on this platform.\n",
|
||||
SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
static void CheckUnwind() {
|
||||
|
@ -368,10 +392,20 @@ __attribute__((constructor(0))) void __hwasan_init() {
|
|||
HwasanAllocatorInit();
|
||||
HwasanInstallAtForkHandler();
|
||||
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
__lsan::InitCommonLsan();
|
||||
InstallAtExitCheckLeaks();
|
||||
}
|
||||
|
||||
#if HWASAN_CONTAINS_UBSAN
|
||||
__ubsan::InitAsPlugin();
|
||||
#endif
|
||||
|
||||
if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
Symbolizer::LateInitialize();
|
||||
}
|
||||
|
||||
VPrintf(1, "HWAddressSanitizer init done\n");
|
||||
|
||||
hwasan_init_is_running = 0;
|
||||
|
@ -519,7 +553,7 @@ void __hwasan_store16_noabort(uptr p) {
|
|||
}
|
||||
|
||||
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
|
||||
TagMemoryAligned(p, sz, tag);
|
||||
TagMemoryAligned(UntagAddr(p), sz, tag);
|
||||
}
|
||||
|
||||
uptr __hwasan_tag_pointer(uptr p, u8 tag) {
|
||||
|
|
|
@ -144,6 +144,8 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context);
|
|||
|
||||
void HwasanInstallAtForkHandler();
|
||||
|
||||
void InstallAtExitCheckLeaks();
|
||||
|
||||
void UpdateMemoryUsage();
|
||||
|
||||
void AppendToErrorMessageBuffer(const char *buffer);
|
||||
|
|
|
@ -16,14 +16,25 @@
|
|||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_mallinfo.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
|
||||
using namespace __hwasan;
|
||||
|
||||
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return !hwasan_inited; }
|
||||
static void OnAllocate(const void *ptr, uptr size) {
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
|
||||
// used by leak sanitizer as a root region.
|
||||
__lsan_register_root_region(ptr, size);
|
||||
# endif
|
||||
}
|
||||
static void OnFree(const void *ptr, uptr size) {
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
__lsan_unregister_root_region(ptr, size);
|
||||
# endif
|
||||
}
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
|
@ -143,12 +154,19 @@ void *__sanitizer_malloc(uptr size) {
|
|||
|
||||
} // extern "C"
|
||||
|
||||
#if HWASAN_WITH_INTERCEPTORS
|
||||
#if HWASAN_WITH_INTERCEPTORS || SANITIZER_FUCHSIA
|
||||
#if SANITIZER_FUCHSIA
|
||||
// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
|
||||
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
||||
ARGS) ALIAS("__sanitizer_" #FN)
|
||||
#else
|
||||
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
|
||||
ALIAS("__sanitizer_" #FN); \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
||||
ARGS) ALIAS("__sanitizer_" #FN)
|
||||
#endif
|
||||
|
||||
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
|
||||
SIZE_T size);
|
||||
|
@ -171,5 +189,3 @@ INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
|
|||
INTERCEPTOR_ALIAS(void, malloc_stats, void);
|
||||
# endif
|
||||
#endif // #if HWASAN_WITH_INTERCEPTORS
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "hwasan_malloc_bisect.h"
|
||||
#include "hwasan_thread.h"
|
||||
#include "hwasan_report.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
|
||||
namespace __hwasan {
|
||||
|
||||
|
@ -32,40 +33,39 @@ static atomic_uint8_t hwasan_allocator_tagging_enabled;
|
|||
static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
|
||||
static constexpr tag_t kFallbackFreeTag = 0xBC;
|
||||
|
||||
enum RightAlignMode {
|
||||
kRightAlignNever,
|
||||
kRightAlignSometimes,
|
||||
kRightAlignAlways
|
||||
enum {
|
||||
// Either just allocated by underlying allocator, but AsanChunk is not yet
|
||||
// ready, or almost returned to undelying allocator and AsanChunk is already
|
||||
// meaningless.
|
||||
CHUNK_INVALID = 0,
|
||||
// The chunk is allocated and not yet freed.
|
||||
CHUNK_ALLOCATED = 1,
|
||||
};
|
||||
|
||||
|
||||
// Initialized in HwasanAllocatorInit, an never changed.
|
||||
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
|
||||
static uptr max_malloc_size;
|
||||
|
||||
bool HwasanChunkView::IsAllocated() const {
|
||||
return metadata_ && metadata_->alloc_context_id &&
|
||||
metadata_->get_requested_size();
|
||||
}
|
||||
|
||||
// Aligns the 'addr' right to the granule boundary.
|
||||
static uptr AlignRight(uptr addr, uptr requested_size) {
|
||||
uptr tail_size = requested_size % kShadowAlignment;
|
||||
if (!tail_size) return addr;
|
||||
return addr + kShadowAlignment - tail_size;
|
||||
return metadata_ && metadata_->IsAllocated();
|
||||
}
|
||||
|
||||
uptr HwasanChunkView::Beg() const {
|
||||
if (metadata_ && metadata_->right_aligned)
|
||||
return AlignRight(block_, metadata_->get_requested_size());
|
||||
return block_;
|
||||
}
|
||||
uptr HwasanChunkView::End() const {
|
||||
return Beg() + UsedSize();
|
||||
}
|
||||
uptr HwasanChunkView::UsedSize() const {
|
||||
return metadata_->get_requested_size();
|
||||
return metadata_->GetRequestedSize();
|
||||
}
|
||||
u32 HwasanChunkView::GetAllocStackId() const {
|
||||
return metadata_->alloc_context_id;
|
||||
return metadata_->GetAllocStackId();
|
||||
}
|
||||
|
||||
u32 HwasanChunkView::GetAllocThreadId() const {
|
||||
return metadata_->GetAllocThreadId();
|
||||
}
|
||||
|
||||
uptr HwasanChunkView::ActualSize() const {
|
||||
|
@ -76,10 +76,58 @@ bool HwasanChunkView::FromSmallHeap() const {
|
|||
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
|
||||
}
|
||||
|
||||
bool HwasanChunkView::AddrIsInside(uptr addr) const {
|
||||
return (addr >= Beg()) && (addr < Beg() + UsedSize());
|
||||
}
|
||||
|
||||
inline void Metadata::SetAllocated(u32 stack, u64 size) {
|
||||
Thread *t = GetCurrentThread();
|
||||
u64 context = t ? t->unique_id() : kMainTid;
|
||||
context <<= 32;
|
||||
context += stack;
|
||||
requested_size_low = size & ((1ul << 32) - 1);
|
||||
requested_size_high = size >> 32;
|
||||
atomic_store(&alloc_context_id, context, memory_order_relaxed);
|
||||
atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
|
||||
}
|
||||
|
||||
inline void Metadata::SetUnallocated() {
|
||||
atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
|
||||
requested_size_low = 0;
|
||||
requested_size_high = 0;
|
||||
atomic_store(&alloc_context_id, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline bool Metadata::IsAllocated() const {
|
||||
return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
|
||||
}
|
||||
|
||||
inline u64 Metadata::GetRequestedSize() const {
|
||||
return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
|
||||
}
|
||||
|
||||
inline u32 Metadata::GetAllocStackId() const {
|
||||
return atomic_load(&alloc_context_id, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline u32 Metadata::GetAllocThreadId() const {
|
||||
u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
|
||||
u32 tid = context >> 32;
|
||||
return tid;
|
||||
}
|
||||
|
||||
void GetAllocatorStats(AllocatorStatCounters s) {
|
||||
allocator.GetStats(s);
|
||||
}
|
||||
|
||||
inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
|
||||
lsan_tag = tag;
|
||||
}
|
||||
|
||||
inline __lsan::ChunkTag Metadata::GetLsanTag() const {
|
||||
return static_cast<__lsan::ChunkTag>(lsan_tag);
|
||||
}
|
||||
|
||||
uptr GetAliasRegionStart() {
|
||||
#if defined(HWASAN_ALIASING_MODE)
|
||||
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
|
||||
|
@ -105,6 +153,12 @@ void HwasanAllocatorInit() {
|
|||
GetAliasRegionStart());
|
||||
for (uptr i = 0; i < sizeof(tail_magic); i++)
|
||||
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
|
||||
if (common_flags()->max_allocation_size_mb) {
|
||||
max_malloc_size = common_flags()->max_allocation_size_mb << 20;
|
||||
max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
|
||||
} else {
|
||||
max_malloc_size = kMaxAllowedMallocSize;
|
||||
}
|
||||
}
|
||||
|
||||
void HwasanAllocatorLock() { allocator.ForceLock(); }
|
||||
|
@ -124,13 +178,16 @@ static uptr TaggedSize(uptr size) {
|
|||
|
||||
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
||||
bool zeroise) {
|
||||
if (orig_size > kMaxAllowedMallocSize) {
|
||||
// Keep this consistent with LSAN and ASAN behavior.
|
||||
if (UNLIKELY(orig_size == 0))
|
||||
orig_size = 1;
|
||||
if (UNLIKELY(orig_size > max_malloc_size)) {
|
||||
if (AllocatorMayReturnNull()) {
|
||||
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
orig_size);
|
||||
return nullptr;
|
||||
}
|
||||
ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
|
||||
ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
|
||||
}
|
||||
if (UNLIKELY(IsRssLimitExceeded())) {
|
||||
if (AllocatorMayReturnNull())
|
||||
|
@ -155,11 +212,6 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
return nullptr;
|
||||
ReportOutOfMemory(size, stack);
|
||||
}
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
||||
meta->set_requested_size(orig_size);
|
||||
meta->alloc_context_id = StackDepotPut(*stack);
|
||||
meta->right_aligned = false;
|
||||
if (zeroise) {
|
||||
internal_memset(allocated, 0, size);
|
||||
} else if (flags()->max_malloc_fill_size > 0) {
|
||||
|
@ -199,6 +251,13 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
}
|
||||
}
|
||||
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
|
||||
: __lsan::kDirectlyLeaked);
|
||||
#endif
|
||||
meta->SetAllocated(StackDepotPut(*stack), orig_size);
|
||||
RunMallocHooks(user_ptr, size);
|
||||
return user_ptr;
|
||||
}
|
||||
|
@ -244,9 +303,10 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
|
||||
return;
|
||||
}
|
||||
uptr orig_size = meta->get_requested_size();
|
||||
uptr orig_size = meta->GetRequestedSize();
|
||||
u32 free_context_id = StackDepotPut(*stack);
|
||||
u32 alloc_context_id = meta->alloc_context_id;
|
||||
u32 alloc_context_id = meta->GetAllocStackId();
|
||||
u32 alloc_thread_id = meta->GetAllocThreadId();
|
||||
|
||||
// Check tail magic.
|
||||
uptr tagged_size = TaggedSize(orig_size);
|
||||
|
@ -265,8 +325,8 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
orig_size, tail_magic);
|
||||
}
|
||||
|
||||
meta->set_requested_size(0);
|
||||
meta->alloc_context_id = 0;
|
||||
// TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
|
||||
meta->SetUnallocated();
|
||||
// This memory will not be reused by anyone else, so we are free to keep it
|
||||
// poisoned.
|
||||
Thread *t = GetCurrentThread();
|
||||
|
@ -298,8 +358,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
if (t) {
|
||||
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
|
||||
if (auto *ha = t->heap_allocations())
|
||||
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
|
||||
free_context_id, static_cast<u32>(orig_size)});
|
||||
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
|
||||
alloc_context_id, free_context_id,
|
||||
static_cast<u32>(orig_size)});
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
|
@ -322,7 +383,7 @@ static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
|
|||
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
|
||||
internal_memcpy(
|
||||
UntagPtr(tagged_ptr_new), untagged_ptr_old,
|
||||
Min(new_size, static_cast<uptr>(meta->get_requested_size())));
|
||||
Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
|
||||
HwasanDeallocate(stack, tagged_ptr_old);
|
||||
}
|
||||
return tagged_ptr_new;
|
||||
|
@ -348,19 +409,30 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
|
|||
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
|
||||
}
|
||||
|
||||
static const void *AllocationBegin(const void *p) {
|
||||
const void *untagged_ptr = UntagPtr(p);
|
||||
if (!untagged_ptr)
|
||||
return nullptr;
|
||||
|
||||
const void *beg = allocator.GetBlockBegin(untagged_ptr);
|
||||
if (!beg)
|
||||
return nullptr;
|
||||
|
||||
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
|
||||
if (b->GetRequestedSize() == 0)
|
||||
return nullptr;
|
||||
|
||||
tag_t tag = GetTagFromPointer((uptr)p);
|
||||
return (const void *)AddTagToPointer((uptr)beg, tag);
|
||||
}
|
||||
|
||||
static uptr AllocationSize(const void *tagged_ptr) {
|
||||
const void *untagged_ptr = UntagPtr(tagged_ptr);
|
||||
if (!untagged_ptr) return 0;
|
||||
const void *beg = allocator.GetBlockBegin(untagged_ptr);
|
||||
Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
|
||||
if (b->right_aligned) {
|
||||
if (beg != reinterpret_cast<void *>(RoundDownTo(
|
||||
reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
|
||||
return 0;
|
||||
} else {
|
||||
if (beg != untagged_ptr) return 0;
|
||||
}
|
||||
return b->get_requested_size();
|
||||
if (beg != untagged_ptr) return 0;
|
||||
return b->GetRequestedSize();
|
||||
}
|
||||
|
||||
void *hwasan_malloc(uptr size, StackTrace *stack) {
|
||||
|
@ -451,6 +523,122 @@ void hwasan_free(void *ptr, StackTrace *stack) {
|
|||
|
||||
} // namespace __hwasan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
|
||||
void LockAllocator() {
|
||||
__hwasan::HwasanAllocatorLock();
|
||||
}
|
||||
|
||||
void UnlockAllocator() {
|
||||
__hwasan::HwasanAllocatorUnlock();
|
||||
}
|
||||
|
||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
|
||||
*begin = (uptr)&__hwasan::allocator;
|
||||
*end = *begin + sizeof(__hwasan::allocator);
|
||||
}
|
||||
|
||||
uptr PointsIntoChunk(void *p) {
|
||||
p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
uptr chunk =
|
||||
reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
|
||||
if (!chunk)
|
||||
return 0;
|
||||
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
|
||||
__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
|
||||
if (!metadata || !metadata->IsAllocated())
|
||||
return 0;
|
||||
if (addr < chunk + metadata->GetRequestedSize())
|
||||
return chunk;
|
||||
if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
|
||||
return chunk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr GetUserBegin(uptr chunk) {
|
||||
if (__hwasan::InTaggableRegion(chunk))
|
||||
CHECK_EQ(UntagAddr(chunk), chunk);
|
||||
void *block = __hwasan::allocator.GetBlockBeginFastLocked(
|
||||
reinterpret_cast<void *>(chunk));
|
||||
if (!block)
|
||||
return 0;
|
||||
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
|
||||
__hwasan::allocator.GetMetaData(block));
|
||||
if (!metadata || !metadata->IsAllocated())
|
||||
return 0;
|
||||
|
||||
return reinterpret_cast<uptr>(block);
|
||||
}
|
||||
|
||||
uptr GetUserAddr(uptr chunk) {
|
||||
tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
|
||||
if (!__hwasan::InTaggableRegion(chunk))
|
||||
return chunk;
|
||||
return AddTagToPointer(chunk, mem_tag);
|
||||
}
|
||||
|
||||
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||
if (__hwasan::InTaggableRegion(chunk))
|
||||
CHECK_EQ(UntagAddr(chunk), chunk);
|
||||
metadata_ =
|
||||
chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
bool LsanMetadata::allocated() const {
|
||||
if (!metadata_)
|
||||
return false;
|
||||
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
|
||||
return m->IsAllocated();
|
||||
}
|
||||
|
||||
ChunkTag LsanMetadata::tag() const {
|
||||
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
|
||||
return m->GetLsanTag();
|
||||
}
|
||||
|
||||
void LsanMetadata::set_tag(ChunkTag value) {
|
||||
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
|
||||
m->SetLsanTag(value);
|
||||
}
|
||||
|
||||
uptr LsanMetadata::requested_size() const {
|
||||
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
|
||||
return m->GetRequestedSize();
|
||||
}
|
||||
|
||||
u32 LsanMetadata::stack_trace_id() const {
|
||||
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
|
||||
return m->GetAllocStackId();
|
||||
}
|
||||
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
__hwasan::allocator.ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
IgnoreObjectResult IgnoreObject(const void *p) {
|
||||
p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
|
||||
if (!chunk)
|
||||
return kIgnoreObjectInvalid;
|
||||
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
|
||||
__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
|
||||
if (!metadata || !metadata->IsAllocated())
|
||||
return kIgnoreObjectInvalid;
|
||||
if (addr >= chunk + metadata->GetRequestedSize())
|
||||
return kIgnoreObjectInvalid;
|
||||
if (metadata->GetLsanTag() == kIgnored)
|
||||
return kIgnoreObjectAlreadyIgnored;
|
||||
|
||||
metadata->SetLsanTag(kIgnored);
|
||||
return kIgnoreObjectSuccess;
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
using namespace __hwasan;
|
||||
|
||||
void __hwasan_enable_allocator_tagging() {
|
||||
|
@ -481,4 +669,8 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
|
|||
|
||||
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
|
||||
|
||||
const void *__sanitizer_get_allocated_begin(const void *p) {
|
||||
return AllocationBegin(p);
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "hwasan_interface_internal.h"
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_poisoning.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
|
@ -31,18 +32,25 @@
|
|||
namespace __hwasan {
|
||||
|
||||
struct Metadata {
|
||||
private:
|
||||
atomic_uint64_t alloc_context_id;
|
||||
u32 requested_size_low;
|
||||
u32 requested_size_high : 31;
|
||||
u32 right_aligned : 1;
|
||||
u32 alloc_context_id;
|
||||
u64 get_requested_size() {
|
||||
return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
|
||||
}
|
||||
void set_requested_size(u64 size) {
|
||||
requested_size_low = size & ((1ul << 32) - 1);
|
||||
requested_size_high = size >> 32;
|
||||
}
|
||||
u16 requested_size_high;
|
||||
atomic_uint8_t chunk_state;
|
||||
u8 lsan_tag;
|
||||
|
||||
public:
|
||||
inline void SetAllocated(u32 stack, u64 size);
|
||||
inline void SetUnallocated();
|
||||
|
||||
inline bool IsAllocated() const;
|
||||
inline u64 GetRequestedSize() const;
|
||||
inline u32 GetAllocStackId() const;
|
||||
inline u32 GetAllocThreadId() const;
|
||||
inline void SetLsanTag(__lsan::ChunkTag tag);
|
||||
inline __lsan::ChunkTag GetLsanTag() const;
|
||||
};
|
||||
static_assert(sizeof(Metadata) == 16);
|
||||
|
||||
struct HwasanMapUnmapCallback {
|
||||
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
|
||||
|
@ -61,15 +69,21 @@ struct AP64 {
|
|||
|
||||
#if defined(HWASAN_ALIASING_MODE)
|
||||
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
|
||||
typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
|
||||
#elif SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
|
||||
#else
|
||||
static const uptr kSpaceSize = 0x2000000000ULL;
|
||||
#endif
|
||||
static const uptr kMetadataSize = sizeof(Metadata);
|
||||
static const uptr kSpaceSize = 0x2000000000ULL; // 128G.
|
||||
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
|
||||
#endif
|
||||
|
||||
static const uptr kMetadataSize = sizeof(Metadata);
|
||||
using AddressSpaceView = LocalAddressSpaceView;
|
||||
typedef HwasanMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator> Allocator;
|
||||
typedef Allocator::AllocatorCache AllocatorCache;
|
||||
|
@ -87,8 +101,12 @@ class HwasanChunkView {
|
|||
uptr UsedSize() const; // Size requested by the user
|
||||
uptr ActualSize() const; // Size allocated by the allocator.
|
||||
u32 GetAllocStackId() const;
|
||||
u32 GetAllocThreadId() const;
|
||||
bool FromSmallHeap() const;
|
||||
bool AddrIsInside(uptr addr) const;
|
||||
|
||||
private:
|
||||
friend class __lsan::LsanMetadata;
|
||||
uptr block_;
|
||||
Metadata *const metadata_;
|
||||
};
|
||||
|
@ -97,13 +115,12 @@ HwasanChunkView FindHeapChunkByAddress(uptr address);
|
|||
|
||||
// Information about one (de)allocation that happened in the past.
|
||||
// These are recorded in a thread-local ring buffer.
|
||||
// TODO: this is currently 24 bytes (20 bytes + alignment).
|
||||
// Compress it to 16 bytes or extend it to be more useful.
|
||||
struct HeapAllocationRecord {
|
||||
uptr tagged_addr;
|
||||
u32 alloc_context_id;
|
||||
u32 free_context_id;
|
||||
u32 requested_size;
|
||||
u32 alloc_thread_id;
|
||||
u32 alloc_context_id;
|
||||
u32 free_context_id;
|
||||
u32 requested_size;
|
||||
};
|
||||
|
||||
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
|
||||
|
|
|
@ -15,17 +15,49 @@
|
|||
|
||||
#include "hwasan_allocator.h"
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_registers.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
namespace __hwasan {
|
||||
template <unsigned X>
|
||||
|
||||
enum class ErrorAction { Abort, Recover };
|
||||
enum class AccessType { Load, Store };
|
||||
|
||||
// Used when the access size is known.
|
||||
constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT,
|
||||
unsigned LogSize) {
|
||||
return 0x20 * (EA == ErrorAction::Recover) +
|
||||
0x10 * (AT == AccessType::Store) + LogSize;
|
||||
}
|
||||
|
||||
// Used when the access size varies at runtime.
|
||||
constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT) {
|
||||
return SigTrapEncoding(EA, AT, 0xf);
|
||||
}
|
||||
|
||||
template <ErrorAction EA, AccessType AT, size_t LogSize>
|
||||
__attribute__((always_inline)) static void SigTrap(uptr p) {
|
||||
#if defined(__aarch64__)
|
||||
// Other platforms like linux can use signals for intercepting an exception
|
||||
// and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
|
||||
// use signals so we can call it here directly instead.
|
||||
#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
|
||||
auto regs = GetRegisters();
|
||||
size_t size = 2 << LogSize;
|
||||
AccessInfo access_info = {
|
||||
.addr = p,
|
||||
.size = size,
|
||||
.is_store = AT == AccessType::Store,
|
||||
.is_load = AT == AccessType::Load,
|
||||
.recover = EA == ErrorAction::Recover,
|
||||
};
|
||||
HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
|
||||
(uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
|
||||
#elif defined(__aarch64__)
|
||||
(void)p;
|
||||
// 0x900 is added to do not interfere with the kernel use of lower values of
|
||||
// brk immediate.
|
||||
register uptr x0 asm("x0") = p;
|
||||
asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
|
||||
asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + SigTrapEncoding(EA, AT, LogSize)));
|
||||
#elif defined(__x86_64__)
|
||||
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
|
||||
// total. The pointer is passed via rdi.
|
||||
|
@ -34,7 +66,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
|
|||
// different nop command, the three bytes one).
|
||||
asm volatile(
|
||||
"int3\n"
|
||||
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
|
||||
"nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT, LogSize)),
|
||||
"D"(p));
|
||||
#elif SANITIZER_RISCV64
|
||||
// Put pointer into x10
|
||||
|
@ -44,7 +76,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
|
|||
asm volatile(
|
||||
"ebreak\n"
|
||||
"addiw x0, x0, %1\n" ::"r"(x10),
|
||||
"I"(0x40 + X));
|
||||
"I"(0x40 + SigTrapEncoding(EA, AT, LogSize)));
|
||||
#else
|
||||
// FIXME: not always sigill.
|
||||
__builtin_trap();
|
||||
|
@ -53,17 +85,31 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
|
|||
}
|
||||
|
||||
// Version with access size which is not power of 2
|
||||
template <unsigned X>
|
||||
template <ErrorAction EA, AccessType AT>
|
||||
__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
|
||||
#if defined(__aarch64__)
|
||||
// Other platforms like linux can use signals for intercepting an exception
|
||||
// and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
|
||||
// use signals so we can call it here directly instead.
|
||||
#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
|
||||
auto regs = GetRegisters();
|
||||
AccessInfo access_info = {
|
||||
.addr = p,
|
||||
.size = size,
|
||||
.is_store = AT == AccessType::Store,
|
||||
.is_load = AT == AccessType::Load,
|
||||
.recover = EA == ErrorAction::Recover,
|
||||
};
|
||||
HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
|
||||
(uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
|
||||
#elif defined(__aarch64__)
|
||||
register uptr x0 asm("x0") = p;
|
||||
register uptr x1 asm("x1") = size;
|
||||
asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
|
||||
asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + SigTrapEncoding(EA, AT)));
|
||||
#elif defined(__x86_64__)
|
||||
// Size is stored in rsi.
|
||||
asm volatile(
|
||||
"int3\n"
|
||||
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
|
||||
"nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT)),
|
||||
"D"(p), "S"(size));
|
||||
#elif SANITIZER_RISCV64
|
||||
// Put access size into x11
|
||||
|
@ -72,7 +118,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
|
|||
asm volatile(
|
||||
"ebreak\n"
|
||||
"addiw x0, x0, %2\n" ::"r"(x10),
|
||||
"r"(x11), "I"(0x40 + X));
|
||||
"r"(x11), "I"(0x40 + SigTrapEncoding(EA, AT)));
|
||||
#else
|
||||
__builtin_trap();
|
||||
#endif
|
||||
|
@ -94,9 +140,6 @@ __attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
|
|||
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
|
||||
}
|
||||
|
||||
enum class ErrorAction { Abort, Recover };
|
||||
enum class AccessType { Load, Store };
|
||||
|
||||
template <ErrorAction EA, AccessType AT, unsigned LogSize>
|
||||
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
|
||||
if (!InTaggableRegion(p))
|
||||
|
@ -104,8 +147,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
|
|||
uptr ptr_raw = p & ~kAddressTagMask;
|
||||
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
|
||||
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
|
||||
SigTrap<0x20 * (EA == ErrorAction::Recover) +
|
||||
0x10 * (AT == AccessType::Store) + LogSize>(p);
|
||||
SigTrap<EA, AT, LogSize>(p);
|
||||
if (EA == ErrorAction::Abort)
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
@ -122,8 +164,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
|
|||
tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
|
||||
for (tag_t *t = shadow_first; t < shadow_last; ++t)
|
||||
if (UNLIKELY(ptr_tag != *t)) {
|
||||
SigTrap<0x20 * (EA == ErrorAction::Recover) +
|
||||
0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
|
||||
SigTrap<EA, AT>(p, sz);
|
||||
if (EA == ErrorAction::Abort)
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
@ -132,8 +173,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
|
|||
if (UNLIKELY(tail_sz != 0 &&
|
||||
!PossiblyShortTagMatches(
|
||||
*shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
|
||||
SigTrap<0x20 * (EA == ErrorAction::Recover) +
|
||||
0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
|
||||
SigTrap<EA, AT>(p, sz);
|
||||
if (EA == ErrorAction::Abort)
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
|
|
@ -23,6 +23,9 @@ HWASAN_FLAG(bool, tag_in_free, true, "")
|
|||
HWASAN_FLAG(bool, print_stats, false, "")
|
||||
HWASAN_FLAG(bool, halt_on_error, true, "")
|
||||
HWASAN_FLAG(bool, atexit, false, "")
|
||||
HWASAN_FLAG(
|
||||
bool, print_live_threads_info, true,
|
||||
"If set, prints the remaining threads in report as an extra information.")
|
||||
|
||||
// Test only flag to disable malloc/realloc/free memory tagging on startup.
|
||||
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
|
||||
|
|
|
@ -185,6 +185,8 @@ void InstallAtExitHandler() {}
|
|||
|
||||
void HwasanInstallAtForkHandler() {}
|
||||
|
||||
void InstallAtExitCheckLeaks() {}
|
||||
|
||||
void InitializeOsSupport() {
|
||||
#ifdef __aarch64__
|
||||
uint32_t features = 0;
|
||||
|
@ -202,6 +204,12 @@ void InitializeOsSupport() {
|
|||
|
||||
} // namespace __hwasan
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
bool UseExitcodeOnLeak() { return __hwasan::flags()->halt_on_error; }
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
extern "C" {
|
||||
|
||||
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
|
||||
|
|
|
@ -39,11 +39,19 @@ static void *HwasanThreadStartFunc(void *arg) {
|
|||
|
||||
INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
|
||||
void * param) {
|
||||
ScopedTaggingDisabler disabler;
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
ScopedTaggingDisabler tagging_disabler;
|
||||
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
|
||||
GetPageSizeCached(), "pthread_create"));
|
||||
*A = {callback, param};
|
||||
int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
|
||||
int res;
|
||||
{
|
||||
// ASAN uses the same approach to disable leaks from pthread_create.
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler lsan_disabler;
|
||||
# endif
|
||||
res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -220,6 +228,10 @@ INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
|
|||
namespace __hwasan {
|
||||
|
||||
int OnExit() {
|
||||
if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
|
||||
__lsan::HasReportedLeaks()) {
|
||||
return common_flags()->exitcode;
|
||||
}
|
||||
// FIXME: ask frontend whether we need to return failure.
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -541,6 +541,17 @@ void HwasanInstallAtForkHandler() {
|
|||
pthread_atfork(before, after, after);
|
||||
}
|
||||
|
||||
void InstallAtExitCheckLeaks() {
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
if (flags()->halt_on_error)
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
else
|
||||
Atexit(__lsan::DoRecoverableLeakCheckVoid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __hwasan
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
||||
|
|
|
@ -92,6 +92,14 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
|||
void *ptr, std::nothrow_t const &) {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||
void *ptr, size_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||
void *ptr, size_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
|
||||
#endif // OPERATOR_NEW_BODY
|
||||
|
||||
|
@ -134,5 +142,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
|||
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||
void *ptr, size_t, std::align_val_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||
void *ptr, size_t, std::align_val_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||
void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||
void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
|
||||
#endif // OPERATOR_NEW_ALIGN_BODY
|
||||
|
|
|
@ -26,3 +26,11 @@ uptr TagMemory(uptr p, uptr size, tag_t tag) {
|
|||
}
|
||||
|
||||
} // namespace __hwasan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
bool WordIsPoisoned(uptr addr) {
|
||||
// Fixme: implement actual tag checking.
|
||||
return false;
|
||||
}
|
||||
} // namespace __lsan
|
||||
|
|
56
libsanitizer/hwasan/hwasan_registers.h
Normal file
56
libsanitizer/hwasan/hwasan_registers.h
Normal file
|
@ -0,0 +1,56 @@
|
|||
//===-- hwasan_registers.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This describes the register state retrieved by hwasan when error reporting.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef HWASAN_REGISTERS_H
|
||||
#define HWASAN_REGISTERS_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
||||
# define CAN_GET_REGISTERS 1
|
||||
|
||||
struct Registers {
|
||||
uptr x[32];
|
||||
};
|
||||
|
||||
__attribute__((always_inline, unused)) static Registers GetRegisters() {
|
||||
Registers regs;
|
||||
__asm__ volatile(
|
||||
"stp x0, x1, [%1, #(8 * 0)]\n"
|
||||
"stp x2, x3, [%1, #(8 * 2)]\n"
|
||||
"stp x4, x5, [%1, #(8 * 4)]\n"
|
||||
"stp x6, x7, [%1, #(8 * 6)]\n"
|
||||
"stp x8, x9, [%1, #(8 * 8)]\n"
|
||||
"stp x10, x11, [%1, #(8 * 10)]\n"
|
||||
"stp x12, x13, [%1, #(8 * 12)]\n"
|
||||
"stp x14, x15, [%1, #(8 * 14)]\n"
|
||||
"stp x16, x17, [%1, #(8 * 16)]\n"
|
||||
"stp x18, x19, [%1, #(8 * 18)]\n"
|
||||
"stp x20, x21, [%1, #(8 * 20)]\n"
|
||||
"stp x22, x23, [%1, #(8 * 22)]\n"
|
||||
"stp x24, x25, [%1, #(8 * 24)]\n"
|
||||
"stp x26, x27, [%1, #(8 * 26)]\n"
|
||||
"stp x28, x29, [%1, #(8 * 28)]\n"
|
||||
: "=m"(regs)
|
||||
: "r"(regs.x));
|
||||
regs.x[30] = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
|
||||
regs.x[31] = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
|
||||
return regs;
|
||||
}
|
||||
|
||||
#else
|
||||
# define CAN_GET_REGISTERS 0
|
||||
#endif
|
||||
|
||||
#endif // HWASAN_REGISTERS_H
|
|
@ -102,6 +102,15 @@ static StackTrace GetStackTraceFromId(u32 id) {
|
|||
return res;
|
||||
}
|
||||
|
||||
static void MaybePrintAndroidHelpUrl() {
|
||||
#if SANITIZER_ANDROID
|
||||
Printf(
|
||||
"Learn more about HWASan reports: "
|
||||
"https://source.android.com/docs/security/test/memory-safety/"
|
||||
"hwasan-reports\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
// A RAII object that holds a copy of the current thread stack ring buffer.
|
||||
// The actual stack buffer may change while we are iterating over it (for
|
||||
// example, Printf may call syslog() which can itself be built with hwasan).
|
||||
|
@ -322,7 +331,7 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
|
|||
untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
|
||||
chunk.End());
|
||||
Printf("%s", d.Allocation());
|
||||
Printf("allocated here:\n");
|
||||
Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId());
|
||||
Printf("%s", d.Default());
|
||||
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
|
||||
return;
|
||||
|
@ -464,12 +473,12 @@ void PrintAddressDescription(
|
|||
har.requested_size, UntagAddr(har.tagged_addr),
|
||||
UntagAddr(har.tagged_addr) + har.requested_size);
|
||||
Printf("%s", d.Allocation());
|
||||
Printf("freed by thread T%zd here:\n", t->unique_id());
|
||||
Printf("freed by thread T%u here:\n", t->unique_id());
|
||||
Printf("%s", d.Default());
|
||||
GetStackTraceFromId(har.free_context_id).Print();
|
||||
|
||||
Printf("%s", d.Allocation());
|
||||
Printf("previously allocated here:\n", t);
|
||||
Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
|
||||
Printf("%s", d.Default());
|
||||
GetStackTraceFromId(har.alloc_context_id).Print();
|
||||
|
||||
|
@ -492,7 +501,8 @@ void PrintAddressDescription(
|
|||
}
|
||||
|
||||
// Print the remaining threads, as an extra information, 1 line per thread.
|
||||
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
|
||||
if (flags()->print_live_threads_info)
|
||||
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
|
||||
|
||||
if (!num_descriptions_printed)
|
||||
// We exhausted our possibilities. Bail out.
|
||||
|
@ -600,6 +610,7 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
|
|||
if (tag_ptr)
|
||||
PrintTagsAroundAddr(tag_ptr);
|
||||
|
||||
MaybePrintAndroidHelpUrl();
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
|
@ -673,6 +684,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
|
|||
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
|
||||
PrintTagsAroundAddr(tag_ptr);
|
||||
|
||||
MaybePrintAndroidHelpUrl();
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
|
@ -742,6 +754,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
|
|||
if (registers_frame)
|
||||
ReportRegisters(registers_frame, pc);
|
||||
|
||||
MaybePrintAndroidHelpUrl();
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ ASM_TYPE_FUNCTION(__interceptor_setjmp)
|
|||
__interceptor_setjmp:
|
||||
CFI_STARTPROC
|
||||
addi x11, x0, 0
|
||||
j __interceptor_sigsetjmp
|
||||
tail __interceptor_sigsetjmp
|
||||
CFI_ENDPROC
|
||||
ASM_SIZE(__interceptor_setjmp)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "hwasan_interface_internal.h"
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_poisoning.h"
|
||||
#include "hwasan_thread_list.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_file.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
|
@ -43,6 +44,8 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
|||
|
||||
static atomic_uint64_t unique_id;
|
||||
unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
|
||||
if (!IsMainThread())
|
||||
os_id_ = GetTid();
|
||||
|
||||
if (auto sz = flags()->heap_history_size)
|
||||
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
|
||||
|
@ -54,6 +57,7 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
|||
InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
|
||||
#endif
|
||||
InitStackAndTls(state);
|
||||
dtls_ = DTLS_Get();
|
||||
}
|
||||
|
||||
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
|
||||
|
@ -147,4 +151,58 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
|
|||
return tag;
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
auto *t = __hwasan::GetCurrentThread();
|
||||
if (t && (t->IsMainThread()))
|
||||
t->set_os_id(GetTid());
|
||||
}
|
||||
|
||||
} // namespace __hwasan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
|
||||
static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
|
||||
auto &tl = __hwasan::hwasanThreadList();
|
||||
tl.CheckLocked();
|
||||
return &tl;
|
||||
}
|
||||
|
||||
static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
|
||||
return GetHwasanThreadListLocked()->FindThreadLocked(
|
||||
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
|
||||
}
|
||||
|
||||
void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
|
||||
|
||||
void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
|
||||
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
auto *t = GetThreadByOsIDLocked(os_id);
|
||||
if (!t)
|
||||
return false;
|
||||
*stack_begin = t->stack_bottom();
|
||||
*stack_end = t->stack_top();
|
||||
*tls_begin = t->tls_begin();
|
||||
*tls_end = t->tls_end();
|
||||
// Fixme: is this correct for HWASan.
|
||||
*cache_begin = 0;
|
||||
*cache_end = 0;
|
||||
*dtls = t->dtls();
|
||||
return true;
|
||||
}
|
||||
|
||||
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
|
||||
|
||||
void GetThreadExtraStackRangesLocked(tid_t os_id,
|
||||
InternalMmapVector<Range> *ranges) {}
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
|
||||
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
|
||||
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -46,6 +46,7 @@ class Thread {
|
|||
uptr stack_size() { return stack_top() - stack_bottom(); }
|
||||
uptr tls_begin() { return tls_begin_; }
|
||||
uptr tls_end() { return tls_end_; }
|
||||
DTLS *dtls() { return dtls_; }
|
||||
bool IsMainThread() { return unique_id_ == 0; }
|
||||
|
||||
bool AddrIsInStack(uptr addr) {
|
||||
|
@ -61,13 +62,16 @@ class Thread {
|
|||
void DisableTagging() { tagging_disabled_++; }
|
||||
void EnableTagging() { tagging_disabled_--; }
|
||||
|
||||
u64 unique_id() const { return unique_id_; }
|
||||
u32 unique_id() const { return unique_id_; }
|
||||
void Announce() {
|
||||
if (announced_) return;
|
||||
announced_ = true;
|
||||
Print("Thread: ");
|
||||
}
|
||||
|
||||
tid_t os_id() const { return os_id_; }
|
||||
void set_os_id(tid_t os_id) { os_id_ = os_id; }
|
||||
|
||||
uptr &vfork_spill() { return vfork_spill_; }
|
||||
|
||||
private:
|
||||
|
@ -81,6 +85,7 @@ class Thread {
|
|||
uptr stack_bottom_;
|
||||
uptr tls_begin_;
|
||||
uptr tls_end_;
|
||||
DTLS *dtls_;
|
||||
|
||||
u32 random_state_;
|
||||
u32 random_buffer_;
|
||||
|
@ -89,7 +94,9 @@ class Thread {
|
|||
HeapAllocationsRingBuffer *heap_allocations_;
|
||||
StackAllocationsRingBuffer *stack_allocations_;
|
||||
|
||||
u64 unique_id_; // counting from zero.
|
||||
u32 unique_id_; // counting from zero.
|
||||
|
||||
tid_t os_id_;
|
||||
|
||||
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
|
||||
|
||||
|
@ -103,6 +110,9 @@ class Thread {
|
|||
Thread *GetCurrentThread();
|
||||
uptr *GetCurrentThreadLongPtr();
|
||||
|
||||
// Used to handle fork().
|
||||
void EnsureMainThreadIDIsCorrect();
|
||||
|
||||
struct ScopedTaggingDisabler {
|
||||
ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
|
||||
~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
|
||||
|
|
|
@ -71,7 +71,7 @@ struct ThreadStats {
|
|||
uptr total_stack_size;
|
||||
};
|
||||
|
||||
class HwasanThreadList {
|
||||
class SANITIZER_MUTEX HwasanThreadList {
|
||||
public:
|
||||
HwasanThreadList(uptr storage, uptr size)
|
||||
: free_space_(storage), free_space_end_(storage + size) {
|
||||
|
@ -85,7 +85,8 @@ class HwasanThreadList {
|
|||
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
|
||||
}
|
||||
|
||||
Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
|
||||
Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
|
||||
SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
|
||||
Thread *t = nullptr;
|
||||
{
|
||||
SpinMutexLock l(&free_list_mutex_);
|
||||
|
@ -114,7 +115,8 @@ class HwasanThreadList {
|
|||
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
|
||||
}
|
||||
|
||||
void RemoveThreadFromLiveList(Thread *t) {
|
||||
void RemoveThreadFromLiveList(Thread *t)
|
||||
SANITIZER_EXCLUDES(live_list_mutex_) {
|
||||
SpinMutexLock l(&live_list_mutex_);
|
||||
for (Thread *&t2 : live_list_)
|
||||
if (t2 == t) {
|
||||
|
@ -127,7 +129,7 @@ class HwasanThreadList {
|
|||
CHECK(0 && "thread not found in live list");
|
||||
}
|
||||
|
||||
void ReleaseThread(Thread *t) {
|
||||
void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
|
||||
RemoveThreadStats(t);
|
||||
t->Destroy();
|
||||
DontNeedThread(t);
|
||||
|
@ -149,30 +151,47 @@ class HwasanThreadList {
|
|||
}
|
||||
|
||||
template <class CB>
|
||||
void VisitAllLiveThreads(CB cb) {
|
||||
void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
|
||||
SpinMutexLock l(&live_list_mutex_);
|
||||
for (Thread *t : live_list_) cb(t);
|
||||
}
|
||||
|
||||
void AddThreadStats(Thread *t) {
|
||||
template <class CB>
|
||||
Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
|
||||
CheckLocked();
|
||||
for (Thread *t : live_list_)
|
||||
if (cb(t))
|
||||
return t;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
|
||||
SpinMutexLock l(&stats_mutex_);
|
||||
stats_.n_live_threads++;
|
||||
stats_.total_stack_size += t->stack_size();
|
||||
}
|
||||
|
||||
void RemoveThreadStats(Thread *t) {
|
||||
void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
|
||||
SpinMutexLock l(&stats_mutex_);
|
||||
stats_.n_live_threads--;
|
||||
stats_.total_stack_size -= t->stack_size();
|
||||
}
|
||||
|
||||
ThreadStats GetThreadStats() {
|
||||
ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
|
||||
SpinMutexLock l(&stats_mutex_);
|
||||
return stats_;
|
||||
}
|
||||
|
||||
uptr GetRingBufferSize() const { return ring_buffer_size_; }
|
||||
|
||||
void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
|
||||
void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
|
||||
live_list_mutex_.CheckLocked();
|
||||
}
|
||||
void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
|
||||
live_list_mutex_.Unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
Thread *AllocThread() {
|
||||
SpinMutexLock l(&free_space_mutex_);
|
||||
|
@ -191,12 +210,14 @@ class HwasanThreadList {
|
|||
uptr thread_alloc_size_;
|
||||
|
||||
SpinMutex free_list_mutex_;
|
||||
InternalMmapVector<Thread *> free_list_;
|
||||
InternalMmapVector<Thread *> free_list_
|
||||
SANITIZER_GUARDED_BY(free_list_mutex_);
|
||||
SpinMutex live_list_mutex_;
|
||||
InternalMmapVector<Thread *> live_list_;
|
||||
InternalMmapVector<Thread *> live_list_
|
||||
SANITIZER_GUARDED_BY(live_list_mutex_);
|
||||
|
||||
ThreadStats stats_;
|
||||
SpinMutex stats_mutex_;
|
||||
ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
|
||||
};
|
||||
|
||||
void InitThreadList(uptr storage, uptr size);
|
||||
|
|
|
@ -26,6 +26,10 @@ extern "C" {
|
|||
is not yet freed. */
|
||||
int __sanitizer_get_ownership(const volatile void *p);
|
||||
|
||||
/* If a pointer lies within an allocation, it will return the start address
|
||||
of the allocation. Otherwise, it returns nullptr. */
|
||||
const void *__sanitizer_get_allocated_begin(const void *p);
|
||||
|
||||
/* Returns the number of bytes reserved for the pointer p.
|
||||
Requires (get_ownership(p) == true) or (p == 0). */
|
||||
size_t __sanitizer_get_allocated_size(const volatile void *p);
|
||||
|
|
|
@ -159,6 +159,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg,
|
|||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
|
||||
/// Similar to <c>__sanitizer_annotate_contiguous_container</c>.
|
||||
///
|
||||
/// Annotates the current state of a contiguous container memory,
|
||||
/// such as <c>std::deque</c>'s single chunk, when the boundries are moved.
|
||||
///
|
||||
/// A contiguous chunk is a chunk that keeps all of its elements
|
||||
/// in a contiguous region of memory. The container owns the region of memory
|
||||
/// <c>[storage_beg, storage_end)</c>; the memory <c>[container_beg,
|
||||
/// container_end)</c> is used to store the current elements, and the memory
|
||||
/// <c>[storage_beg, container_beg), [container_end, storage_end)</c> is
|
||||
/// reserved for future elements (<c>storage_beg <= container_beg <=
|
||||
/// container_end <= storage_end</c>). For example, in <c> std::deque </c>:
|
||||
/// - chunk with a frist deques element will have container_beg equal to address
|
||||
/// of the first element.
|
||||
/// - in every next chunk with elements, true is <c> container_beg ==
|
||||
/// storage_beg </c>.
|
||||
///
|
||||
/// Argument requirements:
|
||||
/// During unpoisoning memory of empty container (before first element is
|
||||
/// added):
|
||||
/// - old_container_beg_p == old_container_end_p
|
||||
/// During poisoning after last element was removed:
|
||||
/// - new_container_beg_p == new_container_end_p
|
||||
/// \param storage_beg Beginning of memory region.
|
||||
/// \param storage_end End of memory region.
|
||||
/// \param old_container_beg Old beginning of used region.
|
||||
/// \param old_container_end End of used region.
|
||||
/// \param new_container_beg New beginning of used region.
|
||||
/// \param new_container_end New end of used region.
|
||||
void __sanitizer_annotate_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *storage_end,
|
||||
const void *old_container_beg, const void *old_container_end,
|
||||
const void *new_container_beg, const void *new_container_end);
|
||||
|
||||
/// Returns true if the contiguous container <c>[beg, end)</c> is properly
|
||||
/// poisoned.
|
||||
///
|
||||
|
@ -178,6 +212,31 @@ void __sanitizer_annotate_contiguous_container(const void *beg,
|
|||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
|
||||
/// Returns true if the double ended contiguous
|
||||
/// container <c>[storage_beg, storage_end)</c> is properly poisoned.
|
||||
///
|
||||
/// Proper poisoning could occur, for example, with
|
||||
/// <c>__sanitizer_annotate_double_ended_contiguous_container</c>), that is, if
|
||||
/// <c>[storage_beg, container_beg)</c> is not addressable, <c>[container_beg,
|
||||
/// container_end)</c> is addressable and <c>[container_end, end)</c> is
|
||||
/// unaddressable. Full verification requires O (<c>storage_end -
|
||||
/// storage_beg</c>) time; this function tries to avoid such complexity by
|
||||
/// touching only parts of the container around <c><i>storage_beg</i></c>,
|
||||
/// <c><i>container_beg</i></c>, <c><i>container_end</i></c>, and
|
||||
/// <c><i>storage_end</i></c>.
|
||||
///
|
||||
/// \param storage_beg Beginning of memory region.
|
||||
/// \param container_beg Beginning of used region.
|
||||
/// \param container_end End of used region.
|
||||
/// \param storage_end End of memory region.
|
||||
///
|
||||
/// \returns True if the double-ended contiguous container <c>[storage_beg,
|
||||
/// container_beg, container_end, end)</c> is properly poisoned - only
|
||||
/// [container_beg; container_end) is addressable.
|
||||
int __sanitizer_verify_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
/// Similar to <c>__sanitizer_verify_contiguous_container()</c> but also
|
||||
/// returns the address of the first improperly poisoned byte.
|
||||
///
|
||||
|
@ -192,6 +251,20 @@ const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
|||
const void *mid,
|
||||
const void *end);
|
||||
|
||||
/// returns the address of the first improperly poisoned byte.
|
||||
///
|
||||
/// Returns NULL if the area is poisoned properly.
|
||||
///
|
||||
/// \param storage_beg Beginning of memory region.
|
||||
/// \param container_beg Beginning of used region.
|
||||
/// \param container_end End of used region.
|
||||
/// \param storage_end End of memory region.
|
||||
///
|
||||
/// \returns The bad address or NULL.
|
||||
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
/// Prints the stack trace leading to this call (useful for calling from the
|
||||
/// debugger).
|
||||
void __sanitizer_print_stack_trace(void);
|
||||
|
|
|
@ -31,6 +31,14 @@ typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
|
|||
typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
|
||||
dfsan_origin origin);
|
||||
|
||||
/// Signature of the callback argument to dfsan_set_reaches_function_callback().
|
||||
/// The description is intended to hold the name of the variable.
|
||||
typedef void (*dfsan_reaches_function_callback_t)(dfsan_label label,
|
||||
dfsan_origin origin,
|
||||
const char *file,
|
||||
unsigned int line,
|
||||
const char *function);
|
||||
|
||||
/// Computes the union of \c l1 and \c l2, resulting in a union label.
|
||||
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
|
||||
|
||||
|
@ -91,6 +99,18 @@ void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
|
|||
/// This function returns all label bits seen in signal handler conditions.
|
||||
dfsan_label dfsan_get_labels_in_signal_conditional();
|
||||
|
||||
/// Sets a callback to be invoked when tainted data reaches a function.
|
||||
/// This could occur at function entry, or at a load instruction.
|
||||
/// These callbacks will only be added if -dfsan-reaches-function-callbacks=1.
|
||||
void dfsan_set_reaches_function_callback(
|
||||
dfsan_reaches_function_callback_t callback);
|
||||
|
||||
/// Making callbacks that handle signals well is tricky, so when
|
||||
/// -dfsan-reaches-function-callbacks=true, functions reached in signal
|
||||
/// handlers will add the labels they see into a global (bitwise-or together).
|
||||
/// This function returns all label bits seen during signal handlers.
|
||||
dfsan_label dfsan_get_labels_in_signal_reaches_function();
|
||||
|
||||
/// Interceptor hooks.
|
||||
/// Whenever a dfsan's custom function is called the corresponding
|
||||
/// hook is called it non-zero. The hooks should be defined by the user.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
|
||||
//===-- sanitizer/hwasan_interface.h ----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
|
|
|
@ -172,6 +172,12 @@ int __tsan_on_finalize(int failed);
|
|||
// Release TSan internal memory in a best-effort manner.
|
||||
void __tsan_flush_memory();
|
||||
|
||||
// User-provided default TSAN options.
|
||||
const char* __tsan_default_options(void);
|
||||
|
||||
// User-provided default TSAN suppressions.
|
||||
const char* __tsan_default_suppressions(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
|
@ -24,9 +24,9 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
|
|||
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
|
||||
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
|
||||
|
||||
#if !SANITIZER_APPLE
|
||||
# if SANITIZER_GLIBC || SANITIZER_ANDROID
|
||||
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
|
||||
#endif
|
||||
# endif
|
||||
|
||||
// The following are the cases when pread (and friends) is used instead of
|
||||
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
|
||||
|
|
|
@ -738,7 +738,7 @@ bool OverrideFunctionWithRedirectJump(
|
|||
return false;
|
||||
|
||||
if (orig_old_func) {
|
||||
uptr relative_offset = *(u32*)(old_func + 1);
|
||||
sptr relative_offset = *(s32 *)(old_func + 1);
|
||||
uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
|
||||
*orig_old_func = absolute_target;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
|
|||
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
|
||||
using namespace __lsan;
|
||||
uptr stack_top = 0, stack_bottom = 0;
|
||||
if (ThreadContext *t = CurrentThreadContext()) {
|
||||
if (ThreadContextLsanBase *t = GetCurrentThread()) {
|
||||
stack_top = t->stack_end();
|
||||
stack_bottom = t->stack_begin();
|
||||
}
|
||||
|
|
|
@ -145,6 +145,22 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
|
|||
*end = *begin + sizeof(AllocatorCache);
|
||||
}
|
||||
|
||||
static const void *GetMallocBegin(const void *p) {
|
||||
if (!p)
|
||||
return nullptr;
|
||||
void *beg = allocator.GetBlockBegin(p);
|
||||
if (!beg)
|
||||
return nullptr;
|
||||
ChunkMetadata *m = Metadata(beg);
|
||||
if (!m)
|
||||
return nullptr;
|
||||
if (!m->allocated)
|
||||
return nullptr;
|
||||
if (m->requested_size == 0)
|
||||
return nullptr;
|
||||
return (const void *)beg;
|
||||
}
|
||||
|
||||
uptr GetMallocUsableSize(const void *p) {
|
||||
if (!p)
|
||||
return 0;
|
||||
|
@ -275,6 +291,10 @@ uptr GetUserBegin(uptr chunk) {
|
|||
return chunk;
|
||||
}
|
||||
|
||||
uptr GetUserAddr(uptr chunk) {
|
||||
return chunk;
|
||||
}
|
||||
|
||||
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
|
||||
CHECK(metadata_);
|
||||
|
@ -304,7 +324,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
|||
allocator.ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||
IgnoreObjectResult IgnoreObject(const void *p) {
|
||||
void *chunk = allocator.GetBlockBegin(p);
|
||||
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
|
||||
ChunkMetadata *m = Metadata(chunk);
|
||||
|
@ -319,7 +339,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
|||
}
|
||||
}
|
||||
|
||||
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
|
||||
// This function can be used to treat memory reachable from `tctx` as live.
|
||||
// This is useful for threads that have been created but not yet started.
|
||||
|
||||
|
@ -359,6 +379,11 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void * __sanitizer_get_allocated_begin(const void *p) {
|
||||
return GetMallocBegin(p);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
return GetMallocUsableSize(p);
|
||||
|
|
|
@ -68,13 +68,13 @@ using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
|
|||
# if SANITIZER_FUCHSIA || defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
#elif defined(__s390x__)
|
||||
const uptr kAllocatorSpace = 0x40000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# else
|
||||
# elif SANITIZER_APPLE
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# endif
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x500000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# endif
|
||||
template <typename AddressSpaceViewTy>
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = kAllocatorSpace;
|
||||
|
|
|
@ -270,13 +270,20 @@ static inline bool MaybeUserPointer(uptr p) {
|
|||
if (p < kMinAddress)
|
||||
return false;
|
||||
# if defined(__x86_64__)
|
||||
// TODO: add logic similar to ARM when Intel LAM is available.
|
||||
// Accept only canonical form user-space addresses.
|
||||
return ((p >> 47) == 0);
|
||||
# elif defined(__mips64)
|
||||
return ((p >> 40) == 0);
|
||||
# elif defined(__aarch64__)
|
||||
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
|
||||
// address translation and can be used to store a tag.
|
||||
constexpr uptr kPointerMask = 255ULL << 48;
|
||||
// Accept up to 48 bit VMA.
|
||||
return ((p >> 48) == 0);
|
||||
return ((p & kPointerMask) == 0);
|
||||
# elif defined(__loongarch_lp64)
|
||||
// Allow 47-bit user-space VMA at current.
|
||||
return ((p >> 47) == 0);
|
||||
# else
|
||||
return true;
|
||||
# endif
|
||||
|
@ -350,9 +357,12 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
|
|||
}
|
||||
}
|
||||
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
|
||||
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
|
||||
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
|
||||
void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
|
||||
Frontier *frontier) {
|
||||
for (uptr i = 0; i < ranges.size(); i++) {
|
||||
ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
|
||||
kReachable);
|
||||
}
|
||||
}
|
||||
|
||||
# if SANITIZER_FUCHSIA
|
||||
|
@ -371,8 +381,7 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
|
|||
|
||||
static void ProcessThreadRegistry(Frontier *frontier) {
|
||||
InternalMmapVector<uptr> ptrs;
|
||||
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
GetAdditionalThreadContextPtrs, &ptrs);
|
||||
GetAdditionalThreadContextPtrsLocked(&ptrs);
|
||||
|
||||
for (uptr i = 0; i < ptrs.size(); ++i) {
|
||||
void *ptr = reinterpret_cast<void *>(ptrs[i]);
|
||||
|
@ -395,6 +404,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
|||
Frontier *frontier, tid_t caller_tid,
|
||||
uptr caller_sp) {
|
||||
InternalMmapVector<uptr> registers;
|
||||
InternalMmapVector<Range> extra_ranges;
|
||||
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
|
||||
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
|
||||
LOG_THREADS("Processing thread %llu.\n", os_id);
|
||||
|
@ -455,7 +465,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
|||
}
|
||||
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
|
||||
kReachable);
|
||||
ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
|
||||
extra_ranges.clear();
|
||||
GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
|
||||
ScanExtraStackRanges(extra_ranges, frontier);
|
||||
}
|
||||
|
||||
if (flags()->use_tls) {
|
||||
|
@ -669,18 +681,6 @@ void LeakSuppressionContext::PrintMatchedSuppressions() {
|
|||
Printf("%s\n\n", line);
|
||||
}
|
||||
|
||||
static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
|
||||
const InternalMmapVector<tid_t> &suspended_threads =
|
||||
*(const InternalMmapVector<tid_t> *)arg;
|
||||
if (tctx->status == ThreadStatusRunning) {
|
||||
uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
|
||||
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
|
||||
Report(
|
||||
"Running thread %llu was not suspended. False leaks are possible.\n",
|
||||
tctx->os_id);
|
||||
}
|
||||
}
|
||||
|
||||
# if SANITIZER_FUCHSIA
|
||||
|
||||
// Fuchsia provides a libc interface that guarantees all threads are
|
||||
|
@ -697,8 +697,16 @@ static void ReportUnsuspendedThreads(
|
|||
|
||||
Sort(threads.data(), threads.size());
|
||||
|
||||
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
&ReportIfNotSuspended, &threads);
|
||||
InternalMmapVector<tid_t> unsuspended;
|
||||
GetRunningThreadsLocked(&unsuspended);
|
||||
|
||||
for (auto os_id : unsuspended) {
|
||||
uptr i = InternalLowerBound(threads, os_id);
|
||||
if (i >= threads.size() || threads[i] != os_id)
|
||||
Report(
|
||||
"Running thread %zu was not suspended. False leaks are possible.\n",
|
||||
os_id);
|
||||
}
|
||||
}
|
||||
|
||||
# endif // !SANITIZER_FUCHSIA
|
||||
|
@ -741,8 +749,11 @@ static bool PrintResults(LeakReport &report) {
|
|||
}
|
||||
|
||||
static bool CheckForLeaks() {
|
||||
if (&__lsan_is_turned_off && __lsan_is_turned_off())
|
||||
if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
|
||||
VReport(1, "LeakSanitizer is disabled");
|
||||
return false;
|
||||
}
|
||||
VReport(1, "LeakSanitizer: checking for leaks");
|
||||
// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
|
||||
// suppressions. However if a stack id was previously suppressed, it should be
|
||||
// suppressed in future checks as well.
|
||||
|
@ -852,7 +863,7 @@ void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
|
|||
leaks_.push_back(leak);
|
||||
}
|
||||
if (flags()->report_objects) {
|
||||
LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
|
||||
LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
|
||||
leaked_objects_.push_back(obj);
|
||||
}
|
||||
}
|
||||
|
@ -937,7 +948,7 @@ void LeakReport::PrintSummary() {
|
|||
|
||||
uptr LeakReport::ApplySuppressions() {
|
||||
LeakSuppressionContext *suppressions = GetSuppressionContext();
|
||||
uptr new_suppressions = false;
|
||||
uptr new_suppressions = 0;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
|
||||
leaks_[i].total_size)) {
|
||||
|
@ -986,7 +997,7 @@ void __lsan_ignore_object(const void *p) {
|
|||
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
|
||||
// locked.
|
||||
Lock l(&global_mutex);
|
||||
IgnoreObjectResult res = IgnoreObjectLocked(p);
|
||||
IgnoreObjectResult res = IgnoreObject(p);
|
||||
if (res == kIgnoreObjectInvalid)
|
||||
VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
|
||||
if (res == kIgnoreObjectAlreadyIgnored)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
|
||||
// Also, LSan doesn't like 32 bit architectures
|
||||
|
@ -42,6 +43,8 @@
|
|||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__arm__) && SANITIZER_LINUX
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif SANITIZER_RISCV64 && SANITIZER_LINUX
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
|
||||
|
@ -57,6 +60,9 @@ class ThreadContextBase;
|
|||
struct DTLS;
|
||||
}
|
||||
|
||||
// This section defines function and class prototypes which must be implemented
|
||||
// by the parent tool linking in LSan. There are implementations provided by the
|
||||
// LSan library which will be linked in when LSan is used as a standalone tool.
|
||||
namespace __lsan {
|
||||
|
||||
// Chunk tags.
|
||||
|
@ -67,6 +73,106 @@ enum ChunkTag {
|
|||
kIgnored = 3
|
||||
};
|
||||
|
||||
enum IgnoreObjectResult {
|
||||
kIgnoreObjectSuccess,
|
||||
kIgnoreObjectAlreadyIgnored,
|
||||
kIgnoreObjectInvalid
|
||||
};
|
||||
|
||||
struct Range {
|
||||
uptr begin;
|
||||
uptr end;
|
||||
};
|
||||
|
||||
//// --------------------------------------------------------------------------
|
||||
//// Poisoning prototypes.
|
||||
//// --------------------------------------------------------------------------
|
||||
|
||||
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
||||
bool WordIsPoisoned(uptr addr);
|
||||
|
||||
//// --------------------------------------------------------------------------
|
||||
//// Thread prototypes.
|
||||
//// --------------------------------------------------------------------------
|
||||
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
// If called from the main thread, updates the main thread's TID in the thread
|
||||
// registry. We need this to handle processes that fork() without a subsequent
|
||||
// exec(), which invalidates the recorded TID. To update it, we must call
|
||||
// gettid() from the main thread. Our solution is to call this function before
|
||||
// leak checking and also before every call to pthread_create() (to handle cases
|
||||
// where leak checking is initiated from a non-main thread).
|
||||
void EnsureMainThreadIDIsCorrect();
|
||||
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls);
|
||||
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
|
||||
void GetThreadExtraStackRangesLocked(tid_t os_id,
|
||||
InternalMmapVector<Range> *ranges);
|
||||
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
|
||||
|
||||
//// --------------------------------------------------------------------------
|
||||
//// Allocator prototypes.
|
||||
//// --------------------------------------------------------------------------
|
||||
|
||||
// Wrappers for allocator's ForceLock()/ForceUnlock().
|
||||
void LockAllocator();
|
||||
void UnlockAllocator();
|
||||
|
||||
// Returns the address range occupied by the global allocator object.
|
||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
|
||||
// If p points into a chunk that has been allocated to the user, returns its
|
||||
// user-visible address. Otherwise, returns 0.
|
||||
uptr PointsIntoChunk(void *p);
|
||||
// Returns address of user-visible chunk contained in this allocator chunk.
|
||||
uptr GetUserBegin(uptr chunk);
|
||||
// Returns user-visible address for chunk. If memory tagging is used this
|
||||
// function will return the tagged address.
|
||||
uptr GetUserAddr(uptr chunk);
|
||||
|
||||
// Wrapper for chunk metadata operations.
|
||||
class LsanMetadata {
|
||||
public:
|
||||
// Constructor accepts address of user-visible chunk.
|
||||
explicit LsanMetadata(uptr chunk);
|
||||
bool allocated() const;
|
||||
ChunkTag tag() const;
|
||||
void set_tag(ChunkTag value);
|
||||
uptr requested_size() const;
|
||||
u32 stack_trace_id() const;
|
||||
|
||||
private:
|
||||
void *metadata_;
|
||||
};
|
||||
|
||||
// Iterate over all existing chunks. Allocator must be locked.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg);
|
||||
|
||||
// Helper for __lsan_ignore_object().
|
||||
IgnoreObjectResult IgnoreObject(const void *p);
|
||||
|
||||
// The rest of the LSan interface which is implemented by library.
|
||||
|
||||
struct ScopedStopTheWorldLock {
|
||||
ScopedStopTheWorldLock() {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
}
|
||||
|
||||
~ScopedStopTheWorldLock() {
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
}
|
||||
|
||||
ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
|
||||
ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
|
||||
};
|
||||
|
||||
struct Flags {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "lsan_flags.inc"
|
||||
|
@ -153,8 +259,6 @@ struct CheckForLeaksParam {
|
|||
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
|
||||
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
||||
uptr region_begin, uptr region_end, bool is_readable);
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
|
||||
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
|
||||
// Run stoptheworld while holding any platform-specific locks, as well as the
|
||||
// allocator and thread registry locks.
|
||||
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
||||
|
@ -164,12 +268,8 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
|||
Frontier *frontier,
|
||||
const char *region_type, ChunkTag tag);
|
||||
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
|
||||
|
||||
enum IgnoreObjectResult {
|
||||
kIgnoreObjectSuccess,
|
||||
kIgnoreObjectAlreadyIgnored,
|
||||
kIgnoreObjectInvalid
|
||||
};
|
||||
void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
|
||||
Frontier *frontier);
|
||||
|
||||
// Functions called from the parent tool.
|
||||
const char *MaybeCallLsanDefaultOptions();
|
||||
|
@ -221,57 +321,6 @@ inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
|
|||
#endif
|
||||
}
|
||||
|
||||
// The following must be implemented in the parent tool.
|
||||
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg);
|
||||
// Returns the address range occupied by the global allocator object.
|
||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
|
||||
// Wrappers for allocator's ForceLock()/ForceUnlock().
|
||||
void LockAllocator();
|
||||
void UnlockAllocator();
|
||||
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
||||
bool WordIsPoisoned(uptr addr);
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
|
||||
struct ScopedStopTheWorldLock {
|
||||
ScopedStopTheWorldLock() {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
}
|
||||
|
||||
~ScopedStopTheWorldLock() {
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
}
|
||||
|
||||
ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
|
||||
ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
|
||||
};
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked();
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls);
|
||||
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg);
|
||||
// If called from the main thread, updates the main thread's TID in the thread
|
||||
// registry. We need this to handle processes that fork() without a subsequent
|
||||
// exec(), which invalidates the recorded TID. To update it, we must call
|
||||
// gettid() from the main thread. Our solution is to call this function before
|
||||
// leak checking and also before every call to pthread_create() (to handle cases
|
||||
// where leak checking is initiated from a non-main thread).
|
||||
void EnsureMainThreadIDIsCorrect();
|
||||
// If p points into a chunk that has been allocated to the user, returns its
|
||||
// user-visible address. Otherwise, returns 0.
|
||||
uptr PointsIntoChunk(void *p);
|
||||
// Returns address of user-visible chunk contained in this allocator chunk.
|
||||
uptr GetUserBegin(uptr chunk);
|
||||
// Helper for __lsan_ignore_object().
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
||||
|
||||
// Return the linker module, if valid for the platform.
|
||||
LoadedModule *GetLinker();
|
||||
|
||||
|
@ -281,20 +330,6 @@ bool HasReportedLeaks();
|
|||
// Run platform-specific leak handlers.
|
||||
void HandleLeaks();
|
||||
|
||||
// Wrapper for chunk metadata operations.
|
||||
class LsanMetadata {
|
||||
public:
|
||||
// Constructor accepts address of user-visible chunk.
|
||||
explicit LsanMetadata(uptr chunk);
|
||||
bool allocated() const;
|
||||
ChunkTag tag() const;
|
||||
void set_tag(ChunkTag value);
|
||||
uptr requested_size() const;
|
||||
u32 stack_trace_id() const;
|
||||
private:
|
||||
void *metadata_;
|
||||
};
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
extern "C" {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#include "lsan_common.h"
|
||||
#include "lsan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
|
||||
|
@ -143,17 +144,13 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
|||
|
||||
// We don't use the thread registry at all for enumerating the threads
|
||||
// and their stacks, registers, and TLS regions. So use it separately
|
||||
// just for the allocator cache, and to call ForEachExtraStackRange,
|
||||
// just for the allocator cache, and to call ScanExtraStackRanges,
|
||||
// which ASan needs.
|
||||
if (flags()->use_stacks) {
|
||||
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *arg) {
|
||||
ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb,
|
||||
arg);
|
||||
},
|
||||
¶ms->argument->frontier);
|
||||
InternalMmapVector<Range> ranges;
|
||||
GetThreadExtraStackRangesLocked(&ranges);
|
||||
ScanExtraStackRanges(ranges, ¶ms->argument->frontier);
|
||||
}
|
||||
|
||||
params->callback(SuspendedThreadsListFuchsia(), params->argument);
|
||||
},
|
||||
¶ms);
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
# include "sanitizer_common/sanitizer_allocator_internal.h"
|
||||
namespace __lsan {
|
||||
|
||||
class ThreadContextLsanBase;
|
||||
|
||||
enum class SeenRegion {
|
||||
None = 0,
|
||||
AllocOnce = 1 << 0,
|
||||
|
@ -50,18 +52,18 @@ struct RegionScanState {
|
|||
|
||||
typedef struct {
|
||||
int disable_counter;
|
||||
u32 current_thread_id;
|
||||
ThreadContextLsanBase *current_thread;
|
||||
AllocatorCache cache;
|
||||
} thread_local_data_t;
|
||||
|
||||
static pthread_key_t key;
|
||||
static pthread_once_t key_once = PTHREAD_ONCE_INIT;
|
||||
|
||||
// The main thread destructor requires the current thread id,
|
||||
// so we can't destroy it until it's been used and reset to invalid tid
|
||||
// The main thread destructor requires the current thread,
|
||||
// so we can't destroy it until it's been used and reset.
|
||||
void restore_tid_data(void *ptr) {
|
||||
thread_local_data_t *data = (thread_local_data_t *)ptr;
|
||||
if (data->current_thread_id != kInvalidTid)
|
||||
if (data->current_thread)
|
||||
pthread_setspecific(key, data);
|
||||
}
|
||||
|
||||
|
@ -76,7 +78,7 @@ static thread_local_data_t *get_tls_val(bool alloc) {
|
|||
if (ptr == NULL && alloc) {
|
||||
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
|
||||
ptr->disable_counter = 0;
|
||||
ptr->current_thread_id = kInvalidTid;
|
||||
ptr->current_thread = nullptr;
|
||||
ptr->cache = AllocatorCache();
|
||||
pthread_setspecific(key, ptr);
|
||||
}
|
||||
|
@ -99,12 +101,14 @@ void EnableInThisThread() {
|
|||
--*disable_counter;
|
||||
}
|
||||
|
||||
u32 GetCurrentThread() {
|
||||
ThreadContextLsanBase *GetCurrentThread() {
|
||||
thread_local_data_t *data = get_tls_val(false);
|
||||
return data ? data->current_thread_id : kInvalidTid;
|
||||
return data ? data->current_thread : nullptr;
|
||||
}
|
||||
|
||||
void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
|
||||
void SetCurrentThread(ThreadContextLsanBase *tctx) {
|
||||
get_tls_val(true)->current_thread = tctx;
|
||||
}
|
||||
|
||||
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ struct OnStartedArgs {
|
|||
};
|
||||
|
||||
void ThreadContext::OnStarted(void *arg) {
|
||||
ThreadContextLsanBase::OnStarted(arg);
|
||||
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
|
||||
cache_begin_ = args->cache_begin;
|
||||
cache_end_ = args->cache_end;
|
||||
|
@ -68,7 +69,7 @@ void InitializeMainThread() {
|
|||
}
|
||||
|
||||
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
|
||||
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *arg) {
|
||||
auto ctx = static_cast<ThreadContext *>(tctx);
|
||||
static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
|
||||
|
@ -98,7 +99,7 @@ void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
|
|||
OnCreatedArgs args;
|
||||
args.stack_begin = reinterpret_cast<uptr>(stack_base);
|
||||
args.stack_end = args.stack_begin + stack_size;
|
||||
u32 parent_tid = GetCurrentThread();
|
||||
u32 parent_tid = GetCurrentThreadId();
|
||||
u32 tid = ThreadCreate(parent_tid, detached, &args);
|
||||
return reinterpret_cast<void *>(static_cast<uptr>(tid));
|
||||
}
|
||||
|
@ -110,7 +111,7 @@ void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
|
|||
// On success, there is nothing to do here.
|
||||
if (error != thrd_success) {
|
||||
// Clean up the thread registry for the thread creation that didn't happen.
|
||||
GetThreadRegistryLocked()->FinishThread(tid);
|
||||
GetLsanThreadRegistryLocked()->FinishThread(tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -468,7 +468,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
|||
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
}
|
||||
if (res == 0) {
|
||||
int tid = ThreadCreate(GetCurrentThread(), IsStateDetached(detached));
|
||||
int tid = ThreadCreate(GetCurrentThreadId(), IsStateDetached(detached));
|
||||
CHECK_NE(tid, kMainTid);
|
||||
atomic_store(&p.tid, tid, memory_order_release);
|
||||
while (atomic_load(&p.tid, memory_order_acquire) != 0)
|
||||
|
|
|
@ -14,13 +14,14 @@
|
|||
|
||||
#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
|
||||
|
||||
#include "lsan_allocator.h"
|
||||
# include "lsan_allocator.h"
|
||||
# include "lsan_thread.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
|
||||
u32 GetCurrentThread() { return current_thread_tid; }
|
||||
void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
|
||||
static THREADLOCAL ThreadContextLsanBase *current_thread = nullptr;
|
||||
ThreadContextLsanBase *GetCurrentThread() { return current_thread; }
|
||||
void SetCurrentThread(ThreadContextLsanBase *tctx) { current_thread = tctx; }
|
||||
|
||||
static THREADLOCAL AllocatorCache allocator_cache;
|
||||
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
|
||||
|
|
|
@ -67,10 +67,9 @@ typedef struct {
|
|||
|
||||
ALWAYS_INLINE
|
||||
void lsan_register_worker_thread(int parent_tid) {
|
||||
if (GetCurrentThread() == kInvalidTid) {
|
||||
if (GetCurrentThreadId() == kInvalidTid) {
|
||||
u32 tid = ThreadCreate(parent_tid, true);
|
||||
ThreadStart(tid, GetTid());
|
||||
SetCurrentThread(tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,7 +100,7 @@ extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
|
|||
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
|
||||
lsan_ctxt->block = ctxt;
|
||||
lsan_ctxt->func = func;
|
||||
lsan_ctxt->parent_tid = GetCurrentThread();
|
||||
lsan_ctxt->parent_tid = GetCurrentThreadId();
|
||||
return lsan_ctxt;
|
||||
}
|
||||
|
||||
|
@ -146,13 +145,13 @@ void dispatch_source_set_event_handler(dispatch_source_t ds,
|
|||
void (^work)(void));
|
||||
}
|
||||
|
||||
#define GET_LSAN_BLOCK(work) \
|
||||
void (^lsan_block)(void); \
|
||||
int parent_tid = GetCurrentThread(); \
|
||||
lsan_block = ^(void) { \
|
||||
lsan_register_worker_thread(parent_tid); \
|
||||
work(); \
|
||||
}
|
||||
# define GET_LSAN_BLOCK(work) \
|
||||
void (^lsan_block)(void); \
|
||||
int parent_tid = GetCurrentThreadId(); \
|
||||
lsan_block = ^(void) { \
|
||||
lsan_register_worker_thread(parent_tid); \
|
||||
work(); \
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#if SANITIZER_POSIX
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
|
@ -34,6 +35,7 @@ struct OnStartedArgs {
|
|||
};
|
||||
|
||||
void ThreadContext::OnStarted(void *arg) {
|
||||
ThreadContextLsanBase::OnStarted(arg);
|
||||
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
|
||||
stack_begin_ = args->stack_begin;
|
||||
stack_end_ = args->stack_end;
|
||||
|
@ -61,7 +63,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
|||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
ThreadContext *context = static_cast<ThreadContext *>(
|
||||
GetThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
|
||||
GetLsanThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
|
||||
if (!context)
|
||||
return false;
|
||||
*stack_begin = context->stack_begin();
|
||||
|
@ -87,7 +89,7 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
|
|||
}
|
||||
|
||||
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
|
||||
HandleDeadlySignal(siginfo, context, GetCurrentThreadId(), &OnStackUnwind,
|
||||
nullptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,12 @@ namespace __lsan {
|
|||
|
||||
static ThreadRegistry *thread_registry;
|
||||
|
||||
static Mutex mu_for_thread_context;
|
||||
static LowLevelAllocator allocator_for_thread_context;
|
||||
|
||||
static ThreadContextBase *CreateThreadContext(u32 tid) {
|
||||
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
|
||||
return new (mem) ThreadContext(tid);
|
||||
Lock lock(&mu_for_thread_context);
|
||||
return new (allocator_for_thread_context) ThreadContext(tid);
|
||||
}
|
||||
|
||||
void InitializeThreadRegistry() {
|
||||
|
@ -39,9 +42,12 @@ void InitializeThreadRegistry() {
|
|||
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
|
||||
: ThreadContextBase(tid) {}
|
||||
|
||||
void ThreadContextLsanBase::OnStarted(void *arg) { SetCurrentThread(this); }
|
||||
|
||||
void ThreadContextLsanBase::OnFinished() {
|
||||
AllocatorThreadFinish();
|
||||
DTLS_Destroy();
|
||||
SetCurrentThread(nullptr);
|
||||
}
|
||||
|
||||
u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
|
||||
|
@ -51,40 +57,39 @@ u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
|
|||
void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
|
||||
ThreadType thread_type, void *arg) {
|
||||
thread_registry->StartThread(tid, os_id, thread_type, arg);
|
||||
SetCurrentThread(tid);
|
||||
}
|
||||
|
||||
void ThreadFinish() {
|
||||
thread_registry->FinishThread(GetCurrentThread());
|
||||
SetCurrentThread(kInvalidTid);
|
||||
}
|
||||
|
||||
ThreadContext *CurrentThreadContext() {
|
||||
if (!thread_registry)
|
||||
return nullptr;
|
||||
if (GetCurrentThread() == kInvalidTid)
|
||||
return nullptr;
|
||||
// No lock needed when getting current thread.
|
||||
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
|
||||
}
|
||||
void ThreadFinish() { thread_registry->FinishThread(GetCurrentThreadId()); }
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
if (GetCurrentThread() == kMainTid)
|
||||
CurrentThreadContext()->os_id = GetTid();
|
||||
if (GetCurrentThreadId() == kMainTid)
|
||||
GetCurrentThread()->os_id = GetTid();
|
||||
}
|
||||
|
||||
///// Interface to the common LSan module. /////
|
||||
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg) {}
|
||||
void GetThreadExtraStackRangesLocked(tid_t os_id,
|
||||
InternalMmapVector<Range> *ranges) {}
|
||||
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
|
||||
|
||||
void LockThreadRegistry() { thread_registry->Lock(); }
|
||||
|
||||
void UnlockThreadRegistry() { thread_registry->Unlock(); }
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked() {
|
||||
ThreadRegistry *GetLsanThreadRegistryLocked() {
|
||||
thread_registry->CheckLocked();
|
||||
return thread_registry;
|
||||
}
|
||||
|
||||
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
|
||||
GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
[](ThreadContextBase *tctx, void *threads) {
|
||||
if (tctx->status == ThreadStatusRunning) {
|
||||
reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
|
||||
tctx->os_id);
|
||||
}
|
||||
},
|
||||
threads);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -21,6 +21,7 @@ namespace __lsan {
|
|||
class ThreadContextLsanBase : public ThreadContextBase {
|
||||
public:
|
||||
explicit ThreadContextLsanBase(int tid);
|
||||
void OnStarted(void *arg) override;
|
||||
void OnFinished() override;
|
||||
uptr stack_begin() { return stack_begin_; }
|
||||
uptr stack_end() { return stack_end_; }
|
||||
|
@ -45,12 +46,17 @@ class ThreadContext;
|
|||
void InitializeThreadRegistry();
|
||||
void InitializeMainThread();
|
||||
|
||||
ThreadRegistry *GetLsanThreadRegistryLocked();
|
||||
|
||||
u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
|
||||
void ThreadFinish();
|
||||
|
||||
u32 GetCurrentThread();
|
||||
void SetCurrentThread(u32 tid);
|
||||
ThreadContext *CurrentThreadContext();
|
||||
ThreadContextLsanBase *GetCurrentThread();
|
||||
inline u32 GetCurrentThreadId() {
|
||||
ThreadContextLsanBase *ctx = GetCurrentThread();
|
||||
return ctx ? ctx->tid : kInvalidTid;
|
||||
}
|
||||
void SetCurrentThread(ThreadContextLsanBase *tctx);
|
||||
void EnsureMainThreadIDIsCorrect();
|
||||
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -146,12 +146,10 @@ void *LowLevelAllocator::Allocate(uptr size) {
|
|||
size = RoundUpTo(size, low_level_alloc_min_alignment);
|
||||
if (allocated_end_ - allocated_current_ < (sptr)size) {
|
||||
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
|
||||
allocated_current_ =
|
||||
(char*)MmapOrDie(size_to_allocate, __func__);
|
||||
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
|
||||
allocated_end_ = allocated_current_ + size_to_allocate;
|
||||
if (low_level_alloc_callback) {
|
||||
low_level_alloc_callback((uptr)allocated_current_,
|
||||
size_to_allocate);
|
||||
low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
|
||||
}
|
||||
}
|
||||
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
|
||||
|
|
|
@ -134,7 +134,7 @@ class CombinedAllocator {
|
|||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(void *p) {
|
||||
void *GetBlockBeginFastLocked(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetBlockBegin(p);
|
||||
return secondary_.GetBlockBeginFastLocked(p);
|
||||
|
|
|
@ -21,6 +21,8 @@ extern "C" {
|
|||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_estimated_allocated_size(uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
|
||||
const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr
|
||||
__sanitizer_get_allocated_size(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
|
||||
|
|
|
@ -51,7 +51,6 @@ void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
|||
void InternalAllocatorLock();
|
||||
void InternalAllocatorUnlock();
|
||||
InternalAllocator *internal_allocator();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
|
|
|
@ -215,7 +215,7 @@ class LargeMmapAllocator {
|
|||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(void *ptr) {
|
||||
void *GetBlockBeginFastLocked(const void *ptr) {
|
||||
mutex_.CheckLocked();
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
uptr n = n_chunks_;
|
||||
|
|
|
@ -61,6 +61,26 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
|||
UNREACHABLE("unable to mmap");
|
||||
}
|
||||
|
||||
void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
|
||||
bool raw_report) {
|
||||
static int recursion_count;
|
||||
if (raw_report || recursion_count) {
|
||||
// If raw report is requested or we went into recursion just die. The
|
||||
// Report() and CHECK calls below may call munmap recursively and fail.
|
||||
RawWrite("ERROR: Failed to munmap\n");
|
||||
Die();
|
||||
}
|
||||
recursion_count++;
|
||||
Report(
|
||||
"ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
|
||||
"code: %d)\n",
|
||||
SanitizerToolName, size, size, addr, err);
|
||||
#if !SANITIZER_GO
|
||||
DumpProcessMap();
|
||||
#endif
|
||||
UNREACHABLE("unable to unmmap");
|
||||
}
|
||||
|
||||
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
|
||||
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
|
||||
|
||||
|
|
|
@ -211,6 +211,7 @@ class LowLevelAllocator {
|
|||
public:
|
||||
// Requires an external lock.
|
||||
void *Allocate(uptr size);
|
||||
|
||||
private:
|
||||
char *allocated_end_;
|
||||
char *allocated_current_;
|
||||
|
@ -315,6 +316,8 @@ CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
|
|||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type, error_t err,
|
||||
bool raw_report = false);
|
||||
void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
|
||||
bool raw_report = false);
|
||||
|
||||
// Returns true if the platform-specific error reported is an OOM error.
|
||||
bool ErrorIsOOM(error_t err);
|
||||
|
|
|
@ -23,10 +23,6 @@
|
|||
// COMMON_INTERCEPTOR_SET_THREAD_NAME
|
||||
// COMMON_INTERCEPTOR_DLOPEN
|
||||
// COMMON_INTERCEPTOR_ON_EXIT
|
||||
// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_REPAIR
|
||||
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
|
||||
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
|
||||
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
|
||||
|
@ -223,26 +219,6 @@ extern const short *_tolower_tab_;
|
|||
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR
|
||||
#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
|
||||
#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
|
||||
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
|
||||
#endif
|
||||
|
@ -1374,7 +1350,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
|
|||
char *name = (char *)arg5;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
|
||||
}
|
||||
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
|
||||
int res = REAL(prctl)(option, arg2, arg3, arg4, arg5);
|
||||
if (option == PR_SET_NAME) {
|
||||
char buff[16];
|
||||
internal_strncpy(buff, (char *)arg2, 15);
|
||||
|
@ -4475,90 +4451,13 @@ INTERCEPTOR(void, _exit, int status) {
|
|||
#define INIT__EXIT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_PTHREAD_MUTEX
|
||||
INTERCEPTOR(int, pthread_mutex_lock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
|
||||
int res = REAL(pthread_mutex_lock)(m);
|
||||
if (res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
|
||||
if (res == 0 || res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
|
||||
int res = REAL(pthread_mutex_unlock)(m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
|
||||
#define INIT_PTHREAD_MUTEX_UNLOCK \
|
||||
COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)
|
||||
#else
|
||||
#define INIT_PTHREAD_MUTEX_LOCK
|
||||
#define INIT_PTHREAD_MUTEX_UNLOCK
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
|
||||
INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
|
||||
int res = REAL(__pthread_mutex_lock)(m);
|
||||
if (res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
|
||||
if (res == 0 || res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
|
||||
int res = REAL(__pthread_mutex_unlock)(m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define INIT___PTHREAD_MUTEX_LOCK \
|
||||
COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)
|
||||
#define INIT___PTHREAD_MUTEX_UNLOCK \
|
||||
COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)
|
||||
#else
|
||||
#define INIT___PTHREAD_MUTEX_LOCK
|
||||
#define INIT___PTHREAD_MUTEX_UNLOCK
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT___LIBC_MUTEX
|
||||
INTERCEPTOR(int, __libc_mutex_lock, void *m)
|
||||
ALIAS(WRAPPER_NAME(pthread_mutex_lock));
|
||||
|
||||
INTERCEPTOR(int, __libc_mutex_unlock, void *m)
|
||||
ALIAS(WRAPPER_NAME(pthread_mutex_unlock));
|
||||
|
||||
INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
|
||||
ALIAS(WRAPPER_NAME(pthread_setcancelstate));
|
||||
|
||||
#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)
|
||||
#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)
|
||||
#define INIT___LIBC_THR_SETCANCELSTATE \
|
||||
COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
|
||||
#else
|
||||
#define INIT___LIBC_MUTEX_LOCK
|
||||
#define INIT___LIBC_MUTEX_UNLOCK
|
||||
#define INIT___LIBC_THR_SETCANCELSTATE
|
||||
#endif
|
||||
|
||||
|
@ -5864,8 +5763,10 @@ INTERCEPTOR(int, capget, void *hdrp, void *datap) {
|
|||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(capget)(hdrp, datap);
|
||||
if (res == 0 && datap)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
|
||||
if (res == 0 && datap) {
|
||||
unsigned datasz = __user_cap_data_struct_sz(hdrp);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, datasz);
|
||||
}
|
||||
// We can also return -1 and write to hdrp->version if the version passed in
|
||||
// hdrp->version is unsupported. But that's not a trivial condition to check,
|
||||
// and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
|
||||
|
@ -5876,8 +5777,10 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
|
|||
COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
|
||||
if (hdrp)
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
|
||||
if (datap)
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
|
||||
if (datap) {
|
||||
unsigned datasz = __user_cap_data_struct_sz(hdrp);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, datasz);
|
||||
}
|
||||
return REAL(capset)(hdrp, datap);
|
||||
}
|
||||
#define INIT_CAPGET \
|
||||
|
@ -10458,6 +10361,39 @@ INTERCEPTOR(int, __xuname, int size, void *utsname) {
|
|||
#define INIT___XUNAME
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_HEXDUMP
|
||||
INTERCEPTOR(void, hexdump, const void *ptr, int length, const char *header, int flags) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, hexdump, ptr, length, header, flags);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, length);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, header, internal_strlen(header) + 1);
|
||||
REAL(hexdump)(ptr, length, header, flags);
|
||||
}
|
||||
|
||||
#define INIT_HEXDUMP COMMON_INTERCEPT_FUNCTION(hexdump);
|
||||
#else
|
||||
#define INIT_HEXDUMP
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_ARGP_PARSE
|
||||
INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
|
||||
unsigned flags, int *arg_index, void *input) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, argp_parse, argp, argc, argv, flags, arg_index,
|
||||
input);
|
||||
for (int i = 0; i < argc; i++)
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argv[i], internal_strlen(argv[i]) + 1);
|
||||
int res = REAL(argp_parse)(argp, argc, argv, flags, arg_index, input);
|
||||
if (!res && arg_index)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg_index, sizeof(int));
|
||||
return res;
|
||||
}
|
||||
|
||||
#define INIT_ARGP_PARSE COMMON_INTERCEPT_FUNCTION(argp_parse);
|
||||
#else
|
||||
#define INIT_ARGP_PARSE
|
||||
#endif
|
||||
|
||||
#include "sanitizer_common_interceptors_netbsd_compat.inc"
|
||||
|
||||
static void InitializeCommonInterceptors() {
|
||||
|
@ -10604,12 +10540,6 @@ static void InitializeCommonInterceptors() {
|
|||
INIT_PTHREAD_SIGMASK;
|
||||
INIT_BACKTRACE;
|
||||
INIT__EXIT;
|
||||
INIT_PTHREAD_MUTEX_LOCK;
|
||||
INIT_PTHREAD_MUTEX_UNLOCK;
|
||||
INIT___PTHREAD_MUTEX_LOCK;
|
||||
INIT___PTHREAD_MUTEX_UNLOCK;
|
||||
INIT___LIBC_MUTEX_LOCK;
|
||||
INIT___LIBC_MUTEX_UNLOCK;
|
||||
INIT___LIBC_THR_SETCANCELSTATE;
|
||||
INIT_GETMNTENT;
|
||||
INIT_GETMNTENT_R;
|
||||
|
@ -10782,6 +10712,8 @@ static void InitializeCommonInterceptors() {
|
|||
INIT_PROCCTL
|
||||
INIT_UNAME;
|
||||
INIT___XUNAME;
|
||||
INIT_HEXDUMP;
|
||||
INIT_ARGP_PARSE;
|
||||
|
||||
INIT___PRINTF_CHK;
|
||||
}
|
||||
|
|
|
@ -9,12 +9,16 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_double_ended_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(
|
||||
__sanitizer_double_ended_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_double_ended_contiguous_container)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
|
@ -28,6 +32,7 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
|
|||
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
|
||||
// Allocator interface.
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
|
||||
|
|
|
@ -417,14 +417,14 @@ PRE_SYSCALL(capget)(void *header, void *dataptr) {
|
|||
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
|
||||
if (res >= 0)
|
||||
if (dataptr)
|
||||
POST_WRITE(dataptr, __user_cap_data_struct_sz);
|
||||
POST_WRITE(dataptr, __user_cap_data_struct_sz(header));
|
||||
}
|
||||
|
||||
PRE_SYSCALL(capset)(void *header, const void *data) {
|
||||
if (header)
|
||||
PRE_READ(header, __user_cap_header_struct_sz);
|
||||
if (data)
|
||||
PRE_READ(data, __user_cap_data_struct_sz);
|
||||
PRE_READ(data, __user_cap_data_struct_sz(header));
|
||||
}
|
||||
|
||||
POST_SYSCALL(capset)(long res, void *header, const void *data) {}
|
||||
|
@ -910,6 +910,17 @@ POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
|
|||
}
|
||||
}
|
||||
|
||||
PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
|
||||
|
||||
POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
|
||||
if (res >= 0) {
|
||||
if (buf)
|
||||
POST_WRITE(buf, struct_statfs_sz);
|
||||
}
|
||||
}
|
||||
# endif // !SANITIZER_ANDROID
|
||||
|
||||
# if SANITIZER_GLIBC
|
||||
PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
|
||||
if (path)
|
||||
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
|
||||
|
@ -922,15 +933,6 @@ POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
|
|||
}
|
||||
}
|
||||
|
||||
PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
|
||||
|
||||
POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
|
||||
if (res >= 0) {
|
||||
if (buf)
|
||||
POST_WRITE(buf, struct_statfs_sz);
|
||||
}
|
||||
}
|
||||
|
||||
PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
|
||||
|
||||
POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
|
||||
|
@ -939,7 +941,7 @@ POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
|
|||
POST_WRITE(buf, struct_statfs64_sz);
|
||||
}
|
||||
}
|
||||
# endif // !SANITIZER_ANDROID
|
||||
# endif // SANITIZER_GLIBC
|
||||
|
||||
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
|
||||
if (filename)
|
||||
|
@ -998,7 +1000,7 @@ POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
|
|||
}
|
||||
}
|
||||
|
||||
# if !SANITIZER_ANDROID
|
||||
# if SANITIZER_GLIBC
|
||||
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
|
||||
|
||||
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
|
||||
|
@ -1007,7 +1009,7 @@ POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
|
|||
POST_WRITE(ubuf, struct_ustat_sz);
|
||||
}
|
||||
}
|
||||
# endif // !SANITIZER_ANDROID
|
||||
# endif // SANITIZER_GLIBC
|
||||
|
||||
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
|
||||
if (filename)
|
||||
|
@ -2106,6 +2108,7 @@ PRE_SYSCALL(epoll_wait)
|
|||
POST_SYSCALL(epoll_wait)
|
||||
(long res, long epfd, void *events, long maxevents, long timeout) {
|
||||
if (res >= 0) {
|
||||
COMMON_SYSCALL_FD_ACQUIRE(epfd);
|
||||
if (events)
|
||||
POST_WRITE(events, res * struct_epoll_event_sz);
|
||||
}
|
||||
|
@ -2122,6 +2125,7 @@ POST_SYSCALL(epoll_pwait)
|
|||
(long res, long epfd, void *events, long maxevents, long timeout,
|
||||
const void *sigmask, long sigsetsize) {
|
||||
if (res >= 0) {
|
||||
COMMON_SYSCALL_FD_ACQUIRE(epfd);
|
||||
if (events)
|
||||
POST_WRITE(events, res * struct_epoll_event_sz);
|
||||
}
|
||||
|
@ -2142,6 +2146,7 @@ POST_SYSCALL(epoll_pwait2)
|
|||
const sanitizer_kernel_timespec *timeout, const void *sigmask,
|
||||
long sigsetsize) {
|
||||
if (res >= 0) {
|
||||
COMMON_SYSCALL_FD_ACQUIRE(epfd);
|
||||
if (events)
|
||||
POST_WRITE(events, res * struct_epoll_event_sz);
|
||||
}
|
||||
|
@ -2228,7 +2233,7 @@ POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
|
|||
}
|
||||
}
|
||||
|
||||
# if !SANITIZER_ANDROID
|
||||
# if SANITIZER_GLIBC
|
||||
PRE_SYSCALL(prlimit64)
|
||||
(long pid, long resource, const void *new_rlim, void *old_rlim) {
|
||||
if (new_rlim)
|
||||
|
@ -2512,7 +2517,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
|
|||
# if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
|
||||
SANITIZER_RISCV64)
|
||||
defined(__loongarch__) || SANITIZER_RISCV64)
|
||||
if (data) {
|
||||
if (request == ptrace_setregs) {
|
||||
PRE_READ((void *)data, struct_user_regs_struct_sz);
|
||||
|
@ -2534,7 +2539,7 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
|
|||
# if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
|
||||
SANITIZER_RISCV64)
|
||||
defined(__loongarch__) || SANITIZER_RISCV64)
|
||||
if (res >= 0 && data) {
|
||||
// Note that this is different from the interceptor in
|
||||
// sanitizer_common_interceptors.inc.
|
||||
|
|
|
@ -282,7 +282,14 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr* beg,
|
|||
// Weak definition for code instrumented with -fsanitize-coverage=stack-depth
|
||||
// and later linked with code containing a strong definition.
|
||||
// E.g., -fsanitize=fuzzer-no-link
|
||||
// FIXME: Update Apple deployment target so that thread_local is always
|
||||
// supported, and remove the #if.
|
||||
// FIXME: Figure out how this should work on Windows, exported thread_local
|
||||
// symbols are not supported:
|
||||
// "data with thread storage duration may not have dll interface"
|
||||
#if !SANITIZER_APPLE && !SANITIZER_WINDOWS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
|
||||
thread_local uptr __sancov_lowest_stack;
|
||||
#endif
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
|
|
|
@ -25,6 +25,7 @@ namespace __sanitizer {
|
|||
#define errno_EBUSY 16
|
||||
#define errno_EINVAL 22
|
||||
#define errno_ENAMETOOLONG 36
|
||||
#define errno_ENOSYS 38
|
||||
|
||||
// Those might not present or their value differ on different platforms.
|
||||
extern const int errno_EOWNERDEAD;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#ifndef SANITIZER_FILE_H
|
||||
#define SANITIZER_FILE_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#include "sanitizer_flag_parser.h"
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_flag_parser.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_libc.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#ifndef SANITIZER_FLAG_REGISTRY_H
|
||||
#define SANITIZER_FLAG_REGISTRY_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
|
|
@ -1267,8 +1267,6 @@ static void ioctl_table_fill() {
|
|||
_(TIOCGFLAGS, WRITE, sizeof(int));
|
||||
_(TIOCSFLAGS, READ, sizeof(int));
|
||||
_(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
|
||||
_(TIOCRCVFRAME, READ, sizeof(uptr));
|
||||
_(TIOCXMTFRAME, READ, sizeof(uptr));
|
||||
_(TIOCPTMGET, WRITE, struct_ptmget_sz);
|
||||
_(TIOCGRANTPT, NONE, 0);
|
||||
_(TIOCPTSNAME, WRITE, struct_ptmget_sz);
|
||||
|
|
|
@ -66,18 +66,30 @@ void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
|
|||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *storage_end,
|
||||
const void *old_container_beg, const void *old_container_end,
|
||||
const void *new_container_beg, const void *new_container_end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
||||
const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
|
||||
__sanitizer::uptr module_path_len,
|
||||
void **pc_offset);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
|
|
|
@ -37,15 +37,6 @@
|
|||
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
||||
#endif
|
||||
|
||||
// TLS is handled differently on different platforms
|
||||
#if SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_FREEBSD
|
||||
# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
|
||||
__attribute__((tls_model("initial-exec"))) thread_local
|
||||
#else
|
||||
# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
|
||||
#endif
|
||||
|
||||
//--------------------------- WEAK FUNCTIONS ---------------------------------//
|
||||
// When working with weak functions, to simplify the code and make it more
|
||||
// portable, when possible define a default implementation using this macro:
|
||||
|
|
|
@ -440,7 +440,7 @@ uptr internal_fstat(fd_t fd, void *buf) {
|
|||
return res;
|
||||
# elif SANITIZER_LINUX && defined(__loongarch__)
|
||||
struct statx bufx;
|
||||
int res = internal_syscall(SYSCALL(statx), fd, 0, AT_EMPTY_PATH,
|
||||
int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH,
|
||||
STATX_BASIC_STATS, (uptr)&bufx);
|
||||
statx_to_stat(&bufx, (struct stat *)buf);
|
||||
return res;
|
||||
|
@ -1502,6 +1502,47 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
|||
: "x30", "memory");
|
||||
return res;
|
||||
}
|
||||
#elif SANITIZER_LOONGARCH64
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
|
||||
register int res __asm__("$a0");
|
||||
register int __flags __asm__("$a0") = flags;
|
||||
register void *__stack __asm__("$a1") = child_stack;
|
||||
register int *__ptid __asm__("$a2") = parent_tidptr;
|
||||
register int *__ctid __asm__("$a3") = child_tidptr;
|
||||
register void *__tls __asm__("$a4") = newtls;
|
||||
register int (*__fn)(void *) __asm__("$a5") = fn;
|
||||
register void *__arg __asm__("$a6") = arg;
|
||||
register int nr_clone __asm__("$a7") = __NR_clone;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"syscall 0\n"
|
||||
|
||||
// if ($a0 != 0)
|
||||
// return $a0;
|
||||
"bnez $a0, 1f\n"
|
||||
|
||||
// In the child, now. Call "fn(arg)".
|
||||
"move $a0, $a6\n"
|
||||
"jirl $ra, $a5, 0\n"
|
||||
|
||||
// Call _exit($a0).
|
||||
"addi.d $a7, $zero, %9\n"
|
||||
"syscall 0\n"
|
||||
|
||||
"1:\n"
|
||||
|
||||
: "=r"(res)
|
||||
: "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls),
|
||||
"r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
|
||||
: "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8");
|
||||
return res;
|
||||
}
|
||||
#elif defined(__powerpc64__)
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
|
|
|
@ -77,9 +77,9 @@ uptr internal_arch_prctl(int option, uptr arg2);
|
|||
// internal_sigaction instead.
|
||||
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
|
||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
|
||||
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
|
||||
defined(__arm__) || SANITIZER_RISCV64
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
|
||||
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr);
|
||||
#endif
|
||||
|
@ -152,6 +152,9 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
|
|||
"rdhwr %0,$29\n" \
|
||||
".set pop\n" : "=r"(__v)); \
|
||||
__v; })
|
||||
#elif defined (__riscv)
|
||||
# define __get_tls() \
|
||||
({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
|
||||
#elif defined(__i386__)
|
||||
# define __get_tls() \
|
||||
({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
|
||||
|
|
|
@ -205,7 +205,8 @@ void InitTlsSize() {
|
|||
g_use_dlpi_tls_data =
|
||||
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
|
||||
|
||||
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
|
||||
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \
|
||||
defined(__loongarch__)
|
||||
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
|
||||
size_t tls_align;
|
||||
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
|
||||
|
@ -265,6 +266,8 @@ static uptr ThreadDescriptorSizeFallback() {
|
|||
#elif defined(__mips__)
|
||||
// TODO(sagarthakur): add more values as per different glibc versions.
|
||||
val = FIRST_32_SECOND_64(1152, 1776);
|
||||
#elif SANITIZER_LOONGARCH64
|
||||
val = 1856; // from glibc 2.36
|
||||
#elif SANITIZER_RISCV64
|
||||
int major;
|
||||
int minor;
|
||||
|
@ -304,7 +307,8 @@ uptr ThreadDescriptorSize() {
|
|||
return val;
|
||||
}
|
||||
|
||||
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
|
||||
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
|
||||
SANITIZER_LOONGARCH64
|
||||
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
|
||||
// head structure. It lies before the static tls blocks.
|
||||
static uptr TlsPreTcbSize() {
|
||||
|
@ -314,6 +318,8 @@ static uptr TlsPreTcbSize() {
|
|||
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
|
||||
#elif SANITIZER_RISCV64
|
||||
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
|
||||
#elif SANITIZER_LOONGARCH64
|
||||
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
|
||||
#endif
|
||||
const uptr kTlsAlign = 16;
|
||||
const uptr kTlsPreTcbSize =
|
||||
|
@ -500,6 +506,15 @@ static void GetTls(uptr *addr, uptr *size) {
|
|||
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
|
||||
ThreadDescriptorSize();
|
||||
*size = g_tls_size + ThreadDescriptorSize();
|
||||
#elif SANITIZER_GLIBC && defined(__loongarch__)
|
||||
# ifdef __clang__
|
||||
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
|
||||
ThreadDescriptorSize();
|
||||
# else
|
||||
asm("or %0,$tp,$zero" : "=r"(*addr));
|
||||
*addr -= ThreadDescriptorSize();
|
||||
# endif
|
||||
*size = g_tls_size + ThreadDescriptorSize();
|
||||
#elif SANITIZER_GLIBC && defined(__powerpc64__)
|
||||
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
|
||||
uptr tp;
|
||||
|
@ -568,6 +583,7 @@ static void GetTls(uptr *addr, uptr *size) {
|
|||
*addr = (uptr)tcb->tcb_dtv[1];
|
||||
}
|
||||
}
|
||||
#else
|
||||
#error "Unknown OS"
|
||||
#endif
|
||||
}
|
||||
|
@ -822,13 +838,9 @@ u32 GetNumberOfCPUs() {
|
|||
#elif SANITIZER_SOLARIS
|
||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#else
|
||||
#if defined(CPU_COUNT)
|
||||
cpu_set_t CPUs;
|
||||
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
||||
return CPU_COUNT(&CPUs);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -12,89 +12,81 @@
|
|||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_APPLE
|
||||
#include "sanitizer_mac.h"
|
||||
#include "interception/interception.h"
|
||||
# include "interception/interception.h"
|
||||
# include "sanitizer_mac.h"
|
||||
|
||||
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
|
||||
// the clients will most certainly use 64-bit ones as well.
|
||||
#ifndef _DARWIN_USE_64_BIT_INODE
|
||||
#define _DARWIN_USE_64_BIT_INODE 1
|
||||
#endif
|
||||
#include <stdio.h>
|
||||
# ifndef _DARWIN_USE_64_BIT_INODE
|
||||
# define _DARWIN_USE_64_BIT_INODE 1
|
||||
# endif
|
||||
# include <stdio.h>
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_file.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_platform_limits_posix.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
#include "sanitizer_ptrauth.h"
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_file.h"
|
||||
# include "sanitizer_flags.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
# include "sanitizer_internal_defs.h"
|
||||
# include "sanitizer_libc.h"
|
||||
# include "sanitizer_platform_limits_posix.h"
|
||||
# include "sanitizer_procmaps.h"
|
||||
# include "sanitizer_ptrauth.h"
|
||||
|
||||
#if !SANITIZER_IOS
|
||||
#include <crt_externs.h> // for _NSGetEnviron
|
||||
#else
|
||||
# if !SANITIZER_IOS
|
||||
# include <crt_externs.h> // for _NSGetEnviron
|
||||
# else
|
||||
extern char **environ;
|
||||
#endif
|
||||
# endif
|
||||
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
|
||||
#define SANITIZER_OS_TRACE 1
|
||||
#include <os/trace.h>
|
||||
#else
|
||||
#define SANITIZER_OS_TRACE 0
|
||||
#endif
|
||||
# if defined(__has_include) && __has_include(<os/trace.h>)
|
||||
# define SANITIZER_OS_TRACE 1
|
||||
# include <os/trace.h>
|
||||
# else
|
||||
# define SANITIZER_OS_TRACE 0
|
||||
# endif
|
||||
|
||||
// import new crash reporting api
|
||||
#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
|
||||
#define HAVE_CRASHREPORTERCLIENT_H 1
|
||||
#include <CrashReporterClient.h>
|
||||
#else
|
||||
#define HAVE_CRASHREPORTERCLIENT_H 0
|
||||
#endif
|
||||
# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
|
||||
# define HAVE_CRASHREPORTERCLIENT_H 1
|
||||
# include <CrashReporterClient.h>
|
||||
# else
|
||||
# define HAVE_CRASHREPORTERCLIENT_H 0
|
||||
# endif
|
||||
|
||||
#if !SANITIZER_IOS
|
||||
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
|
||||
#else
|
||||
# if !SANITIZER_IOS
|
||||
# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
|
||||
# else
|
||||
extern "C" {
|
||||
extern char ***_NSGetArgv(void);
|
||||
extern char ***_NSGetArgv(void);
|
||||
}
|
||||
#endif
|
||||
# endif
|
||||
|
||||
#include <asl.h>
|
||||
#include <dlfcn.h> // for dladdr()
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <libkern/OSAtomic.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach/mach.h>
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <malloc/malloc.h>
|
||||
#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
|
||||
# include <os/log.h>
|
||||
#else
|
||||
/* Without support for __builtin_os_log_format, fall back to the older
|
||||
method. */
|
||||
# define OS_LOG_DEFAULT 0
|
||||
# define os_log_error(A,B,C) \
|
||||
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <pthread/introspection.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
#include <spawn.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
#include <util.h>
|
||||
# include <asl.h>
|
||||
# include <dlfcn.h> // for dladdr()
|
||||
# include <errno.h>
|
||||
# include <fcntl.h>
|
||||
# include <libkern/OSAtomic.h>
|
||||
# include <mach-o/dyld.h>
|
||||
# include <mach/mach.h>
|
||||
# include <mach/mach_time.h>
|
||||
# include <mach/vm_statistics.h>
|
||||
# include <malloc/malloc.h>
|
||||
# include <os/log.h>
|
||||
# include <pthread.h>
|
||||
# include <pthread/introspection.h>
|
||||
# include <sched.h>
|
||||
# include <signal.h>
|
||||
# include <spawn.h>
|
||||
# include <stdlib.h>
|
||||
# include <sys/ioctl.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/resource.h>
|
||||
# include <sys/stat.h>
|
||||
# include <sys/sysctl.h>
|
||||
# include <sys/types.h>
|
||||
# include <sys/wait.h>
|
||||
# include <unistd.h>
|
||||
# include <util.h>
|
||||
|
||||
// From <crt_externs.h>, but we don't have that file on iOS.
|
||||
extern "C" {
|
||||
|
@ -997,7 +989,7 @@ static void VerifyInterceptorsWorking() {
|
|||
// "wrap_puts" within our own dylib.
|
||||
Dl_info info_puts, info_runtime;
|
||||
RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
|
||||
RAW_CHECK(dladdr((void *)__sanitizer_report_error_summary, &info_runtime));
|
||||
RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
|
||||
if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
|
||||
Report(
|
||||
"ERROR: Interceptors are not working. This may be because %s is "
|
||||
|
@ -1047,7 +1039,7 @@ static void StripEnv() {
|
|||
return;
|
||||
|
||||
Dl_info info;
|
||||
RAW_CHECK(dladdr((void *)__sanitizer_report_error_summary, &info));
|
||||
RAW_CHECK(dladdr((void *)&StripEnv, &info));
|
||||
const char *dylib_name = StripModuleName(info.dli_fname);
|
||||
bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
|
||||
if (!lib_is_in_env)
|
||||
|
|
|
@ -14,26 +14,6 @@
|
|||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
|
||||
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
|
||||
so there's no ambiguity). */
|
||||
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
|
||||
# define TARGET_OS_OSX 1
|
||||
#endif
|
||||
|
||||
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
|
||||
0 (we have no support for them; they are not valid targets anyway). */
|
||||
#ifndef TARGET_OS_IOS
|
||||
#define TARGET_OS_IOS 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_TV
|
||||
#define TARGET_OS_TV 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_WATCH
|
||||
#define TARGET_OS_WATCH 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
#include "sanitizer_posix.h"
|
||||
|
||||
|
|
38
libsanitizer/sanitizer_common/sanitizer_mallinfo.h
Normal file
38
libsanitizer/sanitizer_common/sanitizer_mallinfo.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
//===-- sanitizer_mallinfo.h ----------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of Sanitizer common code.
|
||||
//
|
||||
// Definition for mallinfo on different platforms.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_MALLINFO_H
|
||||
#define SANITIZER_MALLINFO_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
|
||||
struct __sanitizer_struct_mallinfo {
|
||||
uptr v[10];
|
||||
};
|
||||
|
||||
#elif SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_FUCHSIA
|
||||
|
||||
struct __sanitizer_struct_mallinfo {
|
||||
int v[10];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_MALLINFO_H
|
|
@ -279,15 +279,14 @@
|
|||
#endif
|
||||
|
||||
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
|
||||
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
|
||||
// does not work well and we need to fallback to SizeClassAllocator32.
|
||||
// But in some cases SizeClassAllocator64 does not work well and we need to
|
||||
// fallback to SizeClassAllocator32.
|
||||
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
|
||||
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
|
||||
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
|
||||
# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 1
|
||||
# elif defined(__mips64) || defined(__arm__) || defined(__i386__) || \
|
||||
SANITIZER_RISCV64 || defined(__hexagon__)
|
||||
# if SANITIZER_RISCV64 || SANITIZER_IOS
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 0
|
||||
# elif defined(__mips64) || defined(__hexagon__)
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 0
|
||||
# else
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
|
||||
|
|
|
@ -269,11 +269,11 @@
|
|||
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
|
||||
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
|
||||
#define SANITIZER_INTERCEPT_READDIR64 SI_GLIBC || SI_SOLARIS32
|
||||
#if SI_LINUX_NOT_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__s390__) || SANITIZER_RISCV64)
|
||||
defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
|
||||
#define SANITIZER_INTERCEPT_PTRACE 1
|
||||
#else
|
||||
#define SANITIZER_INTERCEPT_PTRACE 0
|
||||
|
@ -308,7 +308,7 @@
|
|||
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT_SCANDIR \
|
||||
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
|
||||
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
|
||||
#define SANITIZER_INTERCEPT_SCANDIR64 SI_GLIBC || SI_SOLARIS32
|
||||
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_POLL SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
|
||||
|
@ -330,10 +330,10 @@
|
|||
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT_STATFS \
|
||||
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
|
||||
#define SANITIZER_INTERCEPT_STATFS64 SI_LINUX_NOT_ANDROID && SANITIZER_HAS_STATFS64
|
||||
#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64
|
||||
#define SANITIZER_INTERCEPT_STATVFS \
|
||||
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
|
||||
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC
|
||||
#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_ETHER_HOST \
|
||||
|
@ -396,8 +396,6 @@
|
|||
#define SANITIZER_INTERCEPT__EXIT \
|
||||
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
|
||||
|
||||
#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
|
||||
#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
|
||||
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
|
||||
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
|
||||
|
@ -471,9 +469,9 @@
|
|||
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT___XSTAT \
|
||||
((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC
|
||||
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
|
||||
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT___LXSTAT64 SI_GLIBC
|
||||
|
||||
#define SANITIZER_INTERCEPT_UTMP \
|
||||
(SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
|
||||
|
@ -484,7 +482,7 @@
|
|||
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
|
||||
|
||||
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID || SI_SOLARIS
|
||||
#define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS
|
||||
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
|
||||
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
|
||||
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
|
||||
|
@ -593,6 +591,8 @@
|
|||
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
|
||||
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
|
||||
#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
|
||||
#define SANITIZER_INTERCEPT_HEXDUMP SI_FREEBSD
|
||||
#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
|
||||
|
||||
// This macro gives a way for downstream users to override the above
|
||||
// interceptor macros irrespective of the platform they are on. They have
|
||||
|
|
|
@ -26,10 +26,7 @@
|
|||
|
||||
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
||||
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
||||
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
|
||||
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
|
||||
// Also, for some platforms (e.g. mips) there are additional members in the
|
||||
// <sys/stat.h> struct stat:s.
|
||||
// fine with newer headers, too.
|
||||
#include <linux/posix_types.h>
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
|
||||
# include <sys/stat.h>
|
||||
|
|
|
@ -2342,8 +2342,6 @@ unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
|
|||
unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
|
||||
unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
|
||||
unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
|
||||
unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
|
||||
unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
|
||||
unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
|
||||
unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
|
||||
unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
|
||||
|
|
|
@ -2195,8 +2195,6 @@ extern unsigned IOCTL_TIOCDRAIN;
|
|||
extern unsigned IOCTL_TIOCGFLAGS;
|
||||
extern unsigned IOCTL_TIOCSFLAGS;
|
||||
extern unsigned IOCTL_TIOCDCDTIMESTAMP;
|
||||
extern unsigned IOCTL_TIOCRCVFRAME;
|
||||
extern unsigned IOCTL_TIOCXMTFRAME;
|
||||
extern unsigned IOCTL_TIOCPTMGET;
|
||||
extern unsigned IOCTL_TIOCGRANTPT;
|
||||
extern unsigned IOCTL_TIOCPTSNAME;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
// depends on _FILE_OFFSET_BITS setting.
|
||||
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
|
||||
#undef _FILE_OFFSET_BITS
|
||||
#undef _TIME_BITS
|
||||
#endif
|
||||
|
||||
// Must go after undef _FILE_OFFSET_BITS.
|
||||
|
@ -94,7 +95,7 @@
|
|||
# include <utime.h>
|
||||
# include <sys/ptrace.h>
|
||||
# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__hexagon__) || SANITIZER_RISCV64
|
||||
defined(__hexagon__) || defined(__loongarch__) ||SANITIZER_RISCV64
|
||||
# include <asm/ptrace.h>
|
||||
# ifdef __arm__
|
||||
typedef struct user_fpregs elf_fpregset_t;
|
||||
|
@ -247,7 +248,23 @@ namespace __sanitizer {
|
|||
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
|
||||
unsigned __user_cap_header_struct_sz =
|
||||
sizeof(struct __user_cap_header_struct);
|
||||
unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
|
||||
unsigned __user_cap_data_struct_sz(void *hdrp) {
|
||||
int u32s = 0;
|
||||
if (hdrp) {
|
||||
switch (((struct __user_cap_header_struct *)hdrp)->version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
u32s = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
u32s = _LINUX_CAPABILITY_U32S_2;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_3:
|
||||
u32s = _LINUX_CAPABILITY_U32S_3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return sizeof(struct __user_cap_data_struct) * u32s;
|
||||
}
|
||||
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
|
||||
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
|
||||
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
|
||||
|
@ -260,7 +277,7 @@ namespace __sanitizer {
|
|||
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#if SANITIZER_GLIBC
|
||||
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
|
||||
// has been removed from glibc 2.28.
|
||||
#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
|
||||
|
@ -281,7 +298,7 @@ namespace __sanitizer {
|
|||
unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
|
||||
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
|
||||
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
|
||||
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#endif // SANITIZER_GLIBC
|
||||
|
||||
#if SANITIZER_INTERCEPT_CRYPT_R
|
||||
unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
|
||||
|
@ -352,7 +369,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
|||
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__s390__) || SANITIZER_RISCV64)
|
||||
defined(__s390__) || defined(__loongarch__)|| SANITIZER_RISCV64)
|
||||
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
|
||||
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
|
||||
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
|
||||
|
@ -362,21 +379,24 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
|||
#elif defined(__aarch64__)
|
||||
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
|
||||
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
|
||||
#elif defined(__loongarch__)
|
||||
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
|
||||
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state);
|
||||
#elif defined(__s390__)
|
||||
unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
|
||||
unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
|
||||
#else
|
||||
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
|
||||
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
|
||||
#endif // __mips64 || __powerpc64__ || __aarch64__
|
||||
#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch__
|
||||
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
|
||||
defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
|
||||
SANITIZER_RISCV64
|
||||
defined(__loongarch__) || SANITIZER_RISCV64
|
||||
unsigned struct_user_fpxregs_struct_sz = 0;
|
||||
#else
|
||||
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
|
||||
#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
|
||||
// || __s390__
|
||||
// || __s390__ || __loongarch__
|
||||
#ifdef __arm__
|
||||
unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
|
||||
#else
|
||||
|
@ -1089,7 +1109,7 @@ CHECK_SIZE_AND_OFFSET(dirent, d_off);
|
|||
#endif
|
||||
CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#if SANITIZER_GLIBC
|
||||
COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
|
||||
CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
|
||||
CHECK_SIZE_AND_OFFSET(dirent64, d_off);
|
||||
|
@ -1122,6 +1142,15 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
|
|||
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
|
||||
#endif
|
||||
|
||||
#if SANITIZER_HAS_SIGINFO
|
||||
COMPILER_CHECK(alignof(siginfo_t) == alignof(__sanitizer_siginfo));
|
||||
using __sanitizer_siginfo_t = __sanitizer_siginfo;
|
||||
CHECK_TYPE_SIZE(siginfo_t);
|
||||
CHECK_SIZE_AND_OFFSET(siginfo_t, si_signo);
|
||||
CHECK_SIZE_AND_OFFSET(siginfo_t, si_errno);
|
||||
CHECK_SIZE_AND_OFFSET(siginfo_t, si_code);
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
CHECK_TYPE_SIZE(__sysctl_args);
|
||||
CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_platform.h"
|
||||
#include "sanitizer_mallinfo.h"
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
#include <sys/cdefs.h>
|
||||
|
@ -28,8 +29,7 @@
|
|||
#define SANITIZER_HAS_STAT64 0
|
||||
#define SANITIZER_HAS_STATFS64 0
|
||||
#endif
|
||||
#else
|
||||
// Must be SANITIZER_LINUX then
|
||||
#elif SANITIZER_GLIBC || SANITIZER_ANDROID
|
||||
#define SANITIZER_HAS_STAT64 1
|
||||
#define SANITIZER_HAS_STATFS64 1
|
||||
#endif
|
||||
|
@ -136,7 +136,7 @@ struct __sanitizer_perf_event_attr {
|
|||
extern unsigned struct_epoll_event_sz;
|
||||
extern unsigned struct_sysinfo_sz;
|
||||
extern unsigned __user_cap_header_struct_sz;
|
||||
extern unsigned __user_cap_data_struct_sz;
|
||||
extern unsigned __user_cap_data_struct_sz(void *hdrp);
|
||||
extern unsigned struct_new_utsname_sz;
|
||||
extern unsigned struct_old_utsname_sz;
|
||||
extern unsigned struct_oldold_utsname_sz;
|
||||
|
@ -205,17 +205,7 @@ struct __sanitizer_sem_t {
|
|||
};
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
struct __sanitizer_struct_mallinfo {
|
||||
uptr v[10];
|
||||
};
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
struct __sanitizer_struct_mallinfo {
|
||||
int v[10];
|
||||
};
|
||||
|
||||
extern unsigned struct_ustat_sz;
|
||||
extern unsigned struct_rlimit64_sz;
|
||||
extern unsigned struct_statvfs64_sz;
|
||||
|
@ -517,7 +507,7 @@ struct __sanitizer_dirent {
|
|||
};
|
||||
# endif
|
||||
|
||||
# if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
# if SANITIZER_GLIBC
|
||||
struct __sanitizer_dirent64 {
|
||||
unsigned long long d_ino;
|
||||
unsigned long long d_off;
|
||||
|
@ -587,11 +577,31 @@ struct __sanitizer_sigset_t {
|
|||
};
|
||||
#endif
|
||||
|
||||
struct __sanitizer_siginfo {
|
||||
// The size is determined by looking at sizeof of real siginfo_t on linux.
|
||||
u64 opaque[128 / sizeof(u64)];
|
||||
struct __sanitizer_siginfo_pad {
|
||||
// Require uptr, because siginfo_t is always pointer-size aligned on Linux.
|
||||
uptr pad[128 / sizeof(uptr)];
|
||||
};
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
# define SANITIZER_HAS_SIGINFO 1
|
||||
union __sanitizer_siginfo {
|
||||
struct {
|
||||
int si_signo;
|
||||
# if SANITIZER_MIPS
|
||||
int si_code;
|
||||
int si_errno;
|
||||
# else
|
||||
int si_errno;
|
||||
int si_code;
|
||||
# endif
|
||||
};
|
||||
__sanitizer_siginfo_pad pad;
|
||||
};
|
||||
#else
|
||||
# define SANITIZER_HAS_SIGINFO 0
|
||||
typedef __sanitizer_siginfo_pad __sanitizer_siginfo;
|
||||
#endif
|
||||
|
||||
using __sanitizer_sighandler_ptr = void (*)(int sig);
|
||||
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
|
||||
__sanitizer_siginfo *siginfo,
|
||||
|
@ -843,7 +853,7 @@ typedef void __sanitizer_FILE;
|
|||
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__s390__) || SANITIZER_RISCV64)
|
||||
defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
|
||||
extern unsigned struct_user_regs_struct_sz;
|
||||
extern unsigned struct_user_fpregs_struct_sz;
|
||||
extern unsigned struct_user_fpxregs_struct_sz;
|
||||
|
|
|
@ -57,11 +57,9 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
|
|||
void UnmapOrDie(void *addr, uptr size) {
|
||||
if (!addr || !size) return;
|
||||
uptr res = internal_munmap(addr, size);
|
||||
if (UNLIKELY(internal_iserror(res))) {
|
||||
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
|
||||
SanitizerToolName, size, size, addr);
|
||||
CHECK("unable to unmap" && 0);
|
||||
}
|
||||
int reserrno;
|
||||
if (UNLIKELY(internal_iserror(res, &reserrno)))
|
||||
ReportMunmapFailureAndDie(addr, size, reserrno);
|
||||
DecreaseTotalMmap(size);
|
||||
}
|
||||
|
||||
|
@ -87,19 +85,26 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
|||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = size + alignment;
|
||||
// mmap maps entire pages and rounds up map_size needs to be a an integral
|
||||
// number of pages.
|
||||
// We need to be aware of this size for calculating end and for unmapping
|
||||
// fragments before and after the alignment region.
|
||||
map_size = RoundUpTo(map_size, GetPageSizeCached());
|
||||
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
|
||||
if (UNLIKELY(!map_res))
|
||||
return nullptr;
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr res = map_res;
|
||||
if (!IsAligned(res, alignment)) {
|
||||
res = (map_res + alignment - 1) & ~(alignment - 1);
|
||||
UnmapOrDie((void*)map_res, res - map_res);
|
||||
}
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr end = res + size;
|
||||
end = RoundUpTo(end, GetPageSizeCached());
|
||||
if (end != map_end)
|
||||
if (end != map_end) {
|
||||
CHECK_LT(end, map_end);
|
||||
UnmapOrDie((void*)end, map_end - end);
|
||||
}
|
||||
return (void*)res;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,8 @@ class MemoryMappedSegment {
|
|||
MemoryMappedSegmentData *data_;
|
||||
};
|
||||
|
||||
struct ImageHeader;
|
||||
|
||||
class MemoryMappingLayoutBase {
|
||||
public:
|
||||
virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
|
||||
|
@ -75,10 +77,22 @@ class MemoryMappingLayoutBase {
|
|||
~MemoryMappingLayoutBase() {}
|
||||
};
|
||||
|
||||
class MemoryMappingLayout final : public MemoryMappingLayoutBase {
|
||||
class MemoryMappingLayout : public MemoryMappingLayoutBase {
|
||||
public:
|
||||
explicit MemoryMappingLayout(bool cache_enabled);
|
||||
|
||||
// This destructor cannot be virtual, as it would cause an operator new() linking
|
||||
// failures in hwasan test cases. However non-virtual destructors emit warnings
|
||||
// in macOS build, hence disabling those
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
|
||||
#endif
|
||||
~MemoryMappingLayout();
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
virtual bool Next(MemoryMappedSegment *segment) override;
|
||||
virtual bool Error() const override;
|
||||
virtual void Reset() override;
|
||||
|
@ -90,10 +104,14 @@ class MemoryMappingLayout final : public MemoryMappingLayoutBase {
|
|||
// Adds all mapped objects into a vector.
|
||||
void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
|
||||
|
||||
protected:
|
||||
#if SANITIZER_APPLE
|
||||
virtual const ImageHeader *CurrentImageHeader();
|
||||
#endif
|
||||
MemoryMappingLayoutData data_;
|
||||
|
||||
private:
|
||||
void LoadFromCache();
|
||||
|
||||
MemoryMappingLayoutData data_;
|
||||
};
|
||||
|
||||
// Returns code range for the specified module.
|
||||
|
|
|
@ -39,6 +39,22 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
|
||||
const int Mib[] = {
|
||||
CTL_KERN,
|
||||
KERN_PROC,
|
||||
KERN_PROC_PID,
|
||||
getpid()
|
||||
};
|
||||
|
||||
struct kinfo_proc InfoProc;
|
||||
uptr Len = sizeof(InfoProc);
|
||||
CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
|
||||
cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
|
||||
}
|
||||
#endif
|
||||
|
||||
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
|
||||
const int Mib[] = {
|
||||
#if SANITIZER_FREEBSD
|
||||
|
|
|
@ -145,6 +145,7 @@ void MemoryMappingLayout::DumpListOfModules(
|
|||
}
|
||||
}
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS || SANITIZER_NETBSD
|
||||
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
|
||||
char *smaps = nullptr;
|
||||
uptr smaps_cap = 0;
|
||||
|
@ -184,6 +185,7 @@ void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
|
|||
while (*pos++ != '\n') {}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
|
|
|
@ -146,13 +146,8 @@ static bool IsDyldHdr(const mach_header *hdr) {
|
|||
// until we hit a Mach header matching dyld instead. These recurse
|
||||
// calls are expensive, but the first memory map generation occurs
|
||||
// early in the process, when dyld is one of the only images loaded,
|
||||
// so it will be hit after only a few iterations. These assumptions don't
|
||||
// hold on macOS 13+ anymore (dyld itself has moved into the shared cache).
|
||||
|
||||
// FIXME: Unfortunately, the upstream revised version to deal with macOS 13+
|
||||
// is incompatible with GCC and also uses APIs not available on earlier
|
||||
// systems which we support; backed out for now.
|
||||
|
||||
// so it will be hit after only a few iterations. These assumptions don't hold
|
||||
// on macOS 13+ anymore (dyld itself has moved into the shared cache).
|
||||
static mach_header *GetDyldImageHeaderViaVMRegion() {
|
||||
vm_address_t address = 0;
|
||||
|
||||
|
@ -176,17 +171,64 @@ static mach_header *GetDyldImageHeaderViaVMRegion() {
|
|||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
struct dyld_shared_cache_dylib_text_info {
|
||||
uint64_t version; // current version 2
|
||||
// following fields all exist in version 1
|
||||
uint64_t loadAddressUnslid;
|
||||
uint64_t textSegmentSize;
|
||||
uuid_t dylibUuid;
|
||||
const char *path; // pointer invalid at end of iterations
|
||||
// following fields all exist in version 2
|
||||
uint64_t textSegmentOffset; // offset from start of cache
|
||||
};
|
||||
typedef struct dyld_shared_cache_dylib_text_info
|
||||
dyld_shared_cache_dylib_text_info;
|
||||
|
||||
extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
|
||||
extern const void *_dyld_get_shared_cache_range(size_t *length);
|
||||
extern int dyld_shared_cache_iterate_text(
|
||||
const uuid_t cacheUuid,
|
||||
void (^callback)(const dyld_shared_cache_dylib_text_info *info));
|
||||
} // extern "C"
|
||||
|
||||
static mach_header *GetDyldImageHeaderViaSharedCache() {
|
||||
uuid_t uuid;
|
||||
bool hasCache = _dyld_get_shared_cache_uuid(uuid);
|
||||
if (!hasCache)
|
||||
return nullptr;
|
||||
|
||||
size_t cacheLength;
|
||||
__block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
|
||||
CHECK(cacheStart && cacheLength);
|
||||
|
||||
__block mach_header *dyldHdr = nullptr;
|
||||
int res = dyld_shared_cache_iterate_text(
|
||||
uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
|
||||
CHECK_GE(info->version, 2);
|
||||
mach_header *hdr =
|
||||
(mach_header *)(cacheStart + info->textSegmentOffset);
|
||||
if (IsDyldHdr(hdr))
|
||||
dyldHdr = hdr;
|
||||
});
|
||||
CHECK_EQ(res, 0);
|
||||
|
||||
return dyldHdr;
|
||||
}
|
||||
|
||||
const mach_header *get_dyld_hdr() {
|
||||
if (!dyld_hdr) {
|
||||
// On macOS 13+, dyld itself has moved into the shared cache. Looking it up
|
||||
// via vm_region_recurse_64() causes spins/hangs/crashes.
|
||||
// FIXME: find a way to do this compatible with GCC.
|
||||
if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
|
||||
dyld_hdr = GetDyldImageHeaderViaSharedCache();
|
||||
if (!dyld_hdr) {
|
||||
VReport(1,
|
||||
"looking up the dyld image header in the shared cache on "
|
||||
"macOS 13+ is not yet supported. Falling back to "
|
||||
"Failed to lookup the dyld image header in the shared cache on "
|
||||
"macOS 13+ (or no shared cache in use). Falling back to "
|
||||
"lookup via vm_region_recurse_64().\n");
|
||||
dyld_hdr = GetDyldImageHeaderViaVMRegion();
|
||||
}
|
||||
} else {
|
||||
dyld_hdr = GetDyldImageHeaderViaVMRegion();
|
||||
}
|
||||
|
@ -208,7 +250,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
|
|||
MemoryMappedSegmentData *seg_data,
|
||||
MemoryMappingLayoutData *layout_data) {
|
||||
const char *lc = layout_data->current_load_cmd_addr;
|
||||
|
||||
layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
|
||||
layout_data->current_load_cmd_count--;
|
||||
if (((const load_command *)lc)->cmd == kLCSegment) {
|
||||
const SegmentCommand* sc = (const SegmentCommand *)lc;
|
||||
uptr base_virt_addr, addr_mask;
|
||||
|
@ -316,11 +360,16 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
|
|||
return false;
|
||||
}
|
||||
|
||||
const ImageHeader *MemoryMappingLayout::CurrentImageHeader() {
|
||||
const mach_header *hdr = (data_.current_image == kDyldImageIdx)
|
||||
? get_dyld_hdr()
|
||||
: _dyld_get_image_header(data_.current_image);
|
||||
return (const ImageHeader *)hdr;
|
||||
}
|
||||
|
||||
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
||||
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
|
||||
const mach_header *hdr = (data_.current_image == kDyldImageIdx)
|
||||
? get_dyld_hdr()
|
||||
: _dyld_get_image_header(data_.current_image);
|
||||
const mach_header *hdr = (const mach_header *)CurrentImageHeader();
|
||||
if (!hdr) continue;
|
||||
if (data_.current_load_cmd_count < 0) {
|
||||
// Set up for this image;
|
||||
|
@ -350,7 +399,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
|||
(const load_command *)data_.current_load_cmd_addr);
|
||||
}
|
||||
|
||||
for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
|
||||
while (data_.current_load_cmd_count > 0) {
|
||||
switch (data_.current_magic) {
|
||||
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
|
||||
#ifdef MH_MAGIC_64
|
||||
|
@ -371,6 +420,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
|||
}
|
||||
// If we get here, no more load_cmd's in this image talk about
|
||||
// segments. Go on to the next image.
|
||||
data_.current_load_cmd_count = -1; // This will trigger loading next image
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -87,8 +87,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
|
|||
// Nope, this does not look right either. This means the frame after next does
|
||||
// not have a valid frame pointer, but we can still extract the caller PC.
|
||||
// Unfortunately, there is no way to decide between GCC and LLVM frame
|
||||
// layouts. Assume GCC.
|
||||
return bp_prev - 1;
|
||||
// layouts. Assume LLVM.
|
||||
return bp_prev;
|
||||
#else
|
||||
return (uhwptr*)bp;
|
||||
#endif
|
||||
|
@ -111,21 +111,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
|
|||
IsAligned((uptr)frame, sizeof(*frame)) &&
|
||||
size < max_depth) {
|
||||
#ifdef __powerpc__
|
||||
// PowerPC ABIs specify that the return address is saved on the
|
||||
// *caller's* stack frame. Thus we must dereference the back chain
|
||||
// to find the caller frame before extracting it.
|
||||
// PowerPC ABIs specify that the return address is saved at offset
|
||||
// 16 of the *caller's* stack frame. Thus we must dereference the
|
||||
// back chain to find the caller frame before extracting it.
|
||||
uhwptr *caller_frame = (uhwptr*)frame[0];
|
||||
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
|
||||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
|
||||
break;
|
||||
// For most ABIs the offset where the return address is saved is two
|
||||
// register sizes. The exception is the SVR4 ABI, which uses an
|
||||
// offset of only one register size.
|
||||
#ifdef _CALL_SYSV
|
||||
uhwptr pc1 = caller_frame[1];
|
||||
#else
|
||||
uhwptr pc1 = caller_frame[2];
|
||||
#endif
|
||||
#elif defined(__s390__)
|
||||
uhwptr pc1 = frame[14];
|
||||
#elif defined(__loongarch__) || defined(__riscv)
|
||||
|
|
|
@ -91,10 +91,10 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
|
|||
#elif defined(__sparc__) || defined(__mips__)
|
||||
return pc - 8;
|
||||
#elif SANITIZER_RISCV64
|
||||
// RV-64 has variable instruciton length...
|
||||
// RV-64 has variable instruction length...
|
||||
// C extentions gives us 2-byte instructoins
|
||||
// RV-64 has 4-byte instructions
|
||||
// + RISCV architecture allows instructions up to 8 bytes
|
||||
// + RISC-V architecture allows instructions up to 8 bytes
|
||||
// It seems difficult to figure out the exact instruction length -
|
||||
// pc - 2 seems like a safe option for the purposes of stack tracing
|
||||
return pc - 2;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#if SANITIZER_LINUX && \
|
||||
(defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
|
||||
defined(__arm__) || SANITIZER_RISCV64)
|
||||
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
|
||||
|
||||
#include "sanitizer_stoptheworld.h"
|
||||
|
||||
|
@ -31,7 +31,8 @@
|
|||
#include <sys/types.h> // for pid_t
|
||||
#include <sys/uio.h> // for iovec
|
||||
#include <elf.h> // for NT_PRSTATUS
|
||||
#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
|
||||
#if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \
|
||||
!SANITIZER_ANDROID
|
||||
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
|
||||
# include <asm/ptrace.h>
|
||||
#endif
|
||||
|
@ -514,6 +515,12 @@ typedef struct user_pt_regs regs_struct;
|
|||
static constexpr uptr kExtraRegs[] = {0};
|
||||
#define ARCH_IOVEC_FOR_GETREGSET
|
||||
|
||||
#elif defined(__loongarch__)
|
||||
typedef struct user_pt_regs regs_struct;
|
||||
#define REG_SP regs[3]
|
||||
static constexpr uptr kExtraRegs[] = {0};
|
||||
#define ARCH_IOVEC_FOR_GETREGSET
|
||||
|
||||
#elif SANITIZER_RISCV64
|
||||
typedef struct user_regs_struct regs_struct;
|
||||
// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
|
||||
|
@ -621,3 +628,4 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
|
|||
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
|
||||
// || defined(__aarch64__) || defined(__powerpc64__)
|
||||
// || defined(__s390__) || defined(__i386__) || defined(__arm__)
|
||||
// || SANITIZER_LOONGARCH64
|
||||
|
|
|
@ -86,6 +86,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
|
|||
}
|
||||
|
||||
Parse(file_contents);
|
||||
UnmapOrDie(file_contents, contents_size);
|
||||
}
|
||||
|
||||
bool SuppressionContext::Match(const char *str, const char *type,
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
|
||||
#define SANITIZER_SYMBOLIZER_INTERNAL_H
|
||||
|
||||
#include "sanitizer_symbolizer.h"
|
||||
#include "sanitizer_file.h"
|
||||
#include "sanitizer_symbolizer.h"
|
||||
#include "sanitizer_vector.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue