libsanitizer: Merge with upstream
Merged revision: 7704fedfff6ef5676adb6415f3be0ac927d1a746
This commit is contained in:
parent
8bf5b49ebd
commit
90e46074e6
133 changed files with 2285 additions and 2709 deletions
|
@ -1,4 +1,4 @@
|
||||||
f58e0513dd95944b81ce7a6e7b49ba656de7d75f
|
7704fedfff6ef5676adb6415f3be0ac927d1a746
|
||||||
|
|
||||||
The first line of this file holds the git revision number of the
|
The first line of this file holds the git revision number of the
|
||||||
last merge done from the master library sources.
|
last merge done from the master library sources.
|
||||||
|
|
|
@ -38,7 +38,6 @@ asan_files = \
|
||||||
asan_posix.cpp \
|
asan_posix.cpp \
|
||||||
asan_premap_shadow.cpp \
|
asan_premap_shadow.cpp \
|
||||||
asan_report.cpp \
|
asan_report.cpp \
|
||||||
asan_rtems.cpp \
|
|
||||||
asan_rtl.cpp \
|
asan_rtl.cpp \
|
||||||
asan_shadow_setup.cpp \
|
asan_shadow_setup.cpp \
|
||||||
asan_stack.cpp \
|
asan_stack.cpp \
|
||||||
|
|
|
@ -156,9 +156,9 @@ am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
|
||||||
asan_interceptors_memintrinsics.lo asan_linux.lo asan_mac.lo \
|
asan_interceptors_memintrinsics.lo asan_linux.lo asan_mac.lo \
|
||||||
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
|
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
|
||||||
asan_memory_profile.lo asan_new_delete.lo asan_poisoning.lo \
|
asan_memory_profile.lo asan_new_delete.lo asan_poisoning.lo \
|
||||||
asan_posix.lo asan_premap_shadow.lo asan_report.lo \
|
asan_posix.lo asan_premap_shadow.lo asan_report.lo asan_rtl.lo \
|
||||||
asan_rtems.lo asan_rtl.lo asan_shadow_setup.lo asan_stack.lo \
|
asan_shadow_setup.lo asan_stack.lo asan_stats.lo \
|
||||||
asan_stats.lo asan_suppressions.lo asan_thread.lo asan_win.lo \
|
asan_suppressions.lo asan_thread.lo asan_win.lo \
|
||||||
asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo \
|
asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo \
|
||||||
asan_interceptors_vfork.lo
|
asan_interceptors_vfork.lo
|
||||||
am_libasan_la_OBJECTS = $(am__objects_1)
|
am_libasan_la_OBJECTS = $(am__objects_1)
|
||||||
|
@ -446,7 +446,6 @@ asan_files = \
|
||||||
asan_posix.cpp \
|
asan_posix.cpp \
|
||||||
asan_premap_shadow.cpp \
|
asan_premap_shadow.cpp \
|
||||||
asan_report.cpp \
|
asan_report.cpp \
|
||||||
asan_rtems.cpp \
|
|
||||||
asan_rtl.cpp \
|
asan_rtl.cpp \
|
||||||
asan_shadow_setup.cpp \
|
asan_shadow_setup.cpp \
|
||||||
asan_stack.cpp \
|
asan_stack.cpp \
|
||||||
|
@ -604,7 +603,6 @@ distclean-compile:
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_premap_shadow.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_premap_shadow.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_report.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_report.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtems.Plo@am__quote@
|
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_shadow_setup.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_shadow_setup.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
|
||||||
|
|
|
@ -852,12 +852,12 @@ struct Allocator {
|
||||||
quarantine.PrintStats();
|
quarantine.PrintStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceLock() {
|
void ForceLock() ACQUIRE(fallback_mutex) {
|
||||||
allocator.ForceLock();
|
allocator.ForceLock();
|
||||||
fallback_mutex.Lock();
|
fallback_mutex.Lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceUnlock() {
|
void ForceUnlock() RELEASE(fallback_mutex) {
|
||||||
fallback_mutex.Unlock();
|
fallback_mutex.Unlock();
|
||||||
allocator.ForceUnlock();
|
allocator.ForceUnlock();
|
||||||
}
|
}
|
||||||
|
@ -1081,11 +1081,9 @@ uptr asan_mz_size(const void *ptr) {
|
||||||
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void asan_mz_force_lock() {
|
void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
|
||||||
instance.ForceLock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void asan_mz_force_unlock() {
|
void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
instance.ForceUnlock();
|
instance.ForceUnlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -533,7 +533,6 @@ static void PrintLegend(InternalScopedString *str) {
|
||||||
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
|
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
|
||||||
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
|
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
|
||||||
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
|
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
|
||||||
PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
||||||
|
|
|
@ -187,7 +187,7 @@ void SetTLSFakeStack(FakeStack *fs) { }
|
||||||
static FakeStack *GetFakeStack() {
|
static FakeStack *GetFakeStack() {
|
||||||
AsanThread *t = GetCurrentThread();
|
AsanThread *t = GetCurrentThread();
|
||||||
if (!t) return nullptr;
|
if (!t) return nullptr;
|
||||||
return t->fake_stack();
|
return t->get_or_create_fake_stack();
|
||||||
}
|
}
|
||||||
|
|
||||||
static FakeStack *GetFakeStackFast() {
|
static FakeStack *GetFakeStackFast() {
|
||||||
|
@ -198,7 +198,13 @@ static FakeStack *GetFakeStackFast() {
|
||||||
return GetFakeStack();
|
return GetFakeStack();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
|
static FakeStack *GetFakeStackFastAlways() {
|
||||||
|
if (FakeStack *fs = GetTLSFakeStack())
|
||||||
|
return fs;
|
||||||
|
return GetFakeStack();
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
|
||||||
FakeStack *fs = GetFakeStackFast();
|
FakeStack *fs = GetFakeStackFast();
|
||||||
if (!fs) return 0;
|
if (!fs) return 0;
|
||||||
uptr local_stack;
|
uptr local_stack;
|
||||||
|
@ -210,7 +216,21 @@ ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
|
static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
|
||||||
|
FakeStack *fs = GetFakeStackFastAlways();
|
||||||
|
if (!fs)
|
||||||
|
return 0;
|
||||||
|
uptr local_stack;
|
||||||
|
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
|
||||||
|
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
|
||||||
|
if (!ff)
|
||||||
|
return 0; // Out of fake stack.
|
||||||
|
uptr ptr = reinterpret_cast<uptr>(ff);
|
||||||
|
SetShadow(ptr, size, class_id, 0);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
|
||||||
FakeStack::Deallocate(ptr, class_id);
|
FakeStack::Deallocate(ptr, class_id);
|
||||||
SetShadow(ptr, size, class_id, kMagic8);
|
SetShadow(ptr, size, class_id, kMagic8);
|
||||||
}
|
}
|
||||||
|
@ -224,6 +244,10 @@ using namespace __asan;
|
||||||
__asan_stack_malloc_##class_id(uptr size) { \
|
__asan_stack_malloc_##class_id(uptr size) { \
|
||||||
return OnMalloc(class_id, size); \
|
return OnMalloc(class_id, size); \
|
||||||
} \
|
} \
|
||||||
|
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
|
||||||
|
__asan_stack_malloc_always_##class_id(uptr size) { \
|
||||||
|
return OnMallocAlways(class_id, size); \
|
||||||
|
} \
|
||||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
|
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
|
||||||
uptr ptr, uptr size) { \
|
uptr ptr, uptr size) { \
|
||||||
OnFree(ptr, class_id, size); \
|
OnFree(ptr, class_id, size); \
|
||||||
|
@ -240,7 +264,11 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
|
||||||
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
|
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
|
||||||
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
|
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
|
||||||
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
|
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
// TODO: remove this method and fix tests that use it by setting
|
||||||
|
// -asan-use-after-return=never, after modal UAR flag lands
|
||||||
|
// (https://github.com/google/sanitizers/issues/1394)
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
|
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
|
||||||
|
|
||||||
|
|
|
@ -155,10 +155,6 @@ void InitializeFlags() {
|
||||||
CHECK_LE(f->max_redzone, 2048);
|
CHECK_LE(f->max_redzone, 2048);
|
||||||
CHECK(IsPowerOfTwo(f->redzone));
|
CHECK(IsPowerOfTwo(f->redzone));
|
||||||
CHECK(IsPowerOfTwo(f->max_redzone));
|
CHECK(IsPowerOfTwo(f->max_redzone));
|
||||||
if (SANITIZER_RTEMS) {
|
|
||||||
CHECK(!f->unmap_shadow_on_exit);
|
|
||||||
CHECK(!f->protect_shadow_gap);
|
|
||||||
}
|
|
||||||
|
|
||||||
// quarantine_size is deprecated but we still honor it.
|
// quarantine_size is deprecated but we still honor it.
|
||||||
// quarantine_size can not be used together with quarantine_size_mb.
|
// quarantine_size can not be used together with quarantine_size_mb.
|
||||||
|
|
|
@ -87,8 +87,7 @@ ASAN_FLAG(bool, check_malloc_usable_size, true,
|
||||||
"295.*.")
|
"295.*.")
|
||||||
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
|
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
|
||||||
"If set, explicitly unmaps the (huge) shadow at exit.")
|
"If set, explicitly unmaps the (huge) shadow at exit.")
|
||||||
ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS,
|
ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
|
||||||
"If set, mprotect the shadow gap")
|
|
||||||
ASAN_FLAG(bool, print_stats, false,
|
ASAN_FLAG(bool, print_stats, false,
|
||||||
"Print various statistics after printing an error message or if "
|
"Print various statistics after printing an error message or if "
|
||||||
"atexit=1.")
|
"atexit=1.")
|
||||||
|
|
|
@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||||
|
// We use this method in case compiler doesn't use private aliases for global
|
||||||
|
// variables.
|
||||||
|
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||||
|
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||||
|
// This check may not be enough: if the first global is much larger
|
||||||
|
// the entire redzone of the second global may be within the first global.
|
||||||
|
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||||
|
if (g->beg == l->g->beg &&
|
||||||
|
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||||
|
!IsODRViolationSuppressed(g->name))
|
||||||
|
ReportODRViolation(g, FindRegistrationSite(g),
|
||||||
|
l->g, FindRegistrationSite(l->g));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Clang provides two different ways for global variables protection:
|
// Clang provides two different ways for global variables protection:
|
||||||
// it can poison the global itself or its private alias. In former
|
// it can poison the global itself or its private alias. In former
|
||||||
// case we may poison same symbol multiple times, that can help us to
|
// case we may poison same symbol multiple times, that can help us to
|
||||||
|
@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
|
||||||
// where two globals with the same name are defined in different modules.
|
// where two globals with the same name are defined in different modules.
|
||||||
if (UseODRIndicator(g))
|
if (UseODRIndicator(g))
|
||||||
CheckODRViolationViaIndicator(g);
|
CheckODRViolationViaIndicator(g);
|
||||||
|
else
|
||||||
|
CheckODRViolationViaPoisoning(g);
|
||||||
}
|
}
|
||||||
if (CanPoisonMemory())
|
if (CanPoisonMemory())
|
||||||
PoisonRedZones(*g);
|
PoisonRedZones(*g);
|
||||||
|
|
|
@ -23,10 +23,10 @@
|
||||||
#include "lsan/lsan_common.h"
|
#include "lsan/lsan_common.h"
|
||||||
#include "sanitizer_common/sanitizer_libc.h"
|
#include "sanitizer_common/sanitizer_libc.h"
|
||||||
|
|
||||||
// There is no general interception at all on Fuchsia and RTEMS.
|
// There is no general interception at all on Fuchsia.
|
||||||
// Only the functions in asan_interceptors_memintrinsics.cpp are
|
// Only the functions in asan_interceptors_memintrinsics.cpp are
|
||||||
// really defined to replace libc functions.
|
// really defined to replace libc functions.
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#if !SANITIZER_FUCHSIA
|
||||||
|
|
||||||
# if SANITIZER_POSIX
|
# if SANITIZER_POSIX
|
||||||
# include "sanitizer_common/sanitizer_posix.h"
|
# include "sanitizer_common/sanitizer_posix.h"
|
||||||
|
|
|
@ -34,10 +34,10 @@ void InitializePlatformInterceptors();
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
// There is no general interception at all on Fuchsia and RTEMS.
|
// There is no general interception at all on Fuchsia.
|
||||||
// Only the functions in asan_interceptors_memintrinsics.h are
|
// Only the functions in asan_interceptors_memintrinsics.h are
|
||||||
// really defined to replace libc functions.
|
// really defined to replace libc functions.
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#if !SANITIZER_FUCHSIA
|
||||||
|
|
||||||
// Use macro to describe if specific function should be
|
// Use macro to describe if specific function should be
|
||||||
// intercepted on a given platform.
|
// intercepted on a given platform.
|
||||||
|
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
|
||||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||||
!SANITIZER_NETBSD
|
!SANITIZER_NETBSD
|
||||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
|
||||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
|
||||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||||
# else
|
|
||||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
|
||||||
# endif
|
|
||||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||||
# else
|
# else
|
||||||
|
|
|
@ -30,9 +30,9 @@ void *__asan_memmove(void *to, const void *from, uptr size) {
|
||||||
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
|
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
|
#if SANITIZER_FUCHSIA
|
||||||
|
|
||||||
// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but
|
// Fuchsia doesn't use sanitizer_common_interceptors.inc, but
|
||||||
// the only things there it wants are these three. Just define them
|
// the only things there it wants are these three. Just define them
|
||||||
// as aliases here rather than repeating the contents.
|
// as aliases here rather than repeating the contents.
|
||||||
|
|
||||||
|
@ -40,4 +40,4 @@ extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
|
||||||
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
|
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
|
||||||
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
|
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
|
||||||
|
|
||||||
#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS
|
#endif // SANITIZER_FUCHSIA
|
||||||
|
|
|
@ -134,6 +134,17 @@ INTERFACE_FUNCTION(__asan_stack_malloc_7)
|
||||||
INTERFACE_FUNCTION(__asan_stack_malloc_8)
|
INTERFACE_FUNCTION(__asan_stack_malloc_8)
|
||||||
INTERFACE_FUNCTION(__asan_stack_malloc_9)
|
INTERFACE_FUNCTION(__asan_stack_malloc_9)
|
||||||
INTERFACE_FUNCTION(__asan_stack_malloc_10)
|
INTERFACE_FUNCTION(__asan_stack_malloc_10)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_0)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_1)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_2)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_3)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_4)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_5)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_6)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_7)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_8)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_9)
|
||||||
|
INTERFACE_FUNCTION(__asan_stack_malloc_always_10)
|
||||||
INTERFACE_FUNCTION(__asan_store1)
|
INTERFACE_FUNCTION(__asan_store1)
|
||||||
INTERFACE_FUNCTION(__asan_store2)
|
INTERFACE_FUNCTION(__asan_store2)
|
||||||
INTERFACE_FUNCTION(__asan_store4)
|
INTERFACE_FUNCTION(__asan_store4)
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||||
// will be changed towards less memory overhead.
|
// will be changed towards less memory overhead.
|
||||||
#ifndef ASAN_LOW_MEMORY
|
#ifndef ASAN_LOW_MEMORY
|
||||||
# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS
|
# if SANITIZER_IOS || SANITIZER_ANDROID
|
||||||
# define ASAN_LOW_MEMORY 1
|
# define ASAN_LOW_MEMORY 1
|
||||||
# else
|
# else
|
||||||
# define ASAN_LOW_MEMORY 0
|
# define ASAN_LOW_MEMORY 0
|
||||||
|
@ -77,7 +77,7 @@ void InitializeShadowMemory();
|
||||||
// asan_malloc_linux.cpp / asan_malloc_mac.cpp
|
// asan_malloc_linux.cpp / asan_malloc_mac.cpp
|
||||||
void ReplaceSystemMalloc();
|
void ReplaceSystemMalloc();
|
||||||
|
|
||||||
// asan_linux.cpp / asan_mac.cpp / asan_rtems.cpp / asan_win.cpp
|
// asan_linux.cpp / asan_mac.cpp / asan_win.cpp
|
||||||
uptr FindDynamicShadowStart();
|
uptr FindDynamicShadowStart();
|
||||||
void *AsanDoesNotSupportStaticLinkage();
|
void *AsanDoesNotSupportStaticLinkage();
|
||||||
void AsanCheckDynamicRTPrereqs();
|
void AsanCheckDynamicRTPrereqs();
|
||||||
|
@ -159,9 +159,6 @@ const int kAsanArrayCookieMagic = 0xac;
|
||||||
const int kAsanIntraObjectRedzone = 0xbb;
|
const int kAsanIntraObjectRedzone = 0xbb;
|
||||||
const int kAsanAllocaLeftMagic = 0xca;
|
const int kAsanAllocaLeftMagic = 0xca;
|
||||||
const int kAsanAllocaRightMagic = 0xcb;
|
const int kAsanAllocaRightMagic = 0xcb;
|
||||||
// Used to populate the shadow gap for systems without memory
|
|
||||||
// protection there (i.e. Myriad).
|
|
||||||
const int kAsanShadowGap = 0xcc;
|
|
||||||
|
|
||||||
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
|
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
|
||||||
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
|
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
|
||||||
|
|
|
@ -15,23 +15,22 @@
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_platform.h"
|
#include "sanitizer_common/sanitizer_platform.h"
|
||||||
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
|
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
|
||||||
SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS
|
SANITIZER_NETBSD || SANITIZER_SOLARIS
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
|
||||||
#include "sanitizer_common/sanitizer_errno.h"
|
|
||||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
|
||||||
# include "asan_allocator.h"
|
# include "asan_allocator.h"
|
||||||
# include "asan_interceptors.h"
|
# include "asan_interceptors.h"
|
||||||
# include "asan_internal.h"
|
# include "asan_internal.h"
|
||||||
#include "asan_malloc_local.h"
|
|
||||||
# include "asan_stack.h"
|
# include "asan_stack.h"
|
||||||
|
# include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||||
|
# include "sanitizer_common/sanitizer_errno.h"
|
||||||
|
# include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||||
|
|
||||||
// ---------------------- Replacement functions ---------------- {{{1
|
// ---------------------- Replacement functions ---------------- {{{1
|
||||||
using namespace __asan;
|
using namespace __asan;
|
||||||
|
|
||||||
static uptr allocated_for_dlsym;
|
static uptr allocated_for_dlsym;
|
||||||
static uptr last_dlsym_alloc_size_in_words;
|
static uptr last_dlsym_alloc_size_in_words;
|
||||||
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
|
static const uptr kDlsymAllocPoolSize = 1024;
|
||||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||||
|
|
||||||
static inline bool IsInDlsymAllocPool(const void *ptr) {
|
static inline bool IsInDlsymAllocPool(const void *ptr) {
|
||||||
|
@ -82,27 +81,12 @@ static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
void* MemalignFromLocalPool(uptr alignment, uptr size) {
|
|
||||||
void *ptr = nullptr;
|
|
||||||
alignment = Max(alignment, kWordSize);
|
|
||||||
PosixMemalignFromLocalPool(&ptr, alignment, size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsFromLocalPool(const void *ptr) {
|
|
||||||
return IsInDlsymAllocPool(ptr);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline bool MaybeInDlsym() {
|
static inline bool MaybeInDlsym() {
|
||||||
// Fuchsia doesn't use dlsym-based interceptors.
|
// Fuchsia doesn't use dlsym-based interceptors.
|
||||||
return !SANITIZER_FUCHSIA && asan_init_is_running;
|
return !SANITIZER_FUCHSIA && asan_init_is_running;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool UseLocalPool() {
|
static inline bool UseLocalPool() { return MaybeInDlsym(); }
|
||||||
return EarlyMalloc() || MaybeInDlsym();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *ReallocFromLocalPool(void *ptr, uptr size) {
|
static void *ReallocFromLocalPool(void *ptr, uptr size) {
|
||||||
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
|
||||||
//
|
|
||||||
// Provide interfaces to check for and handle local pool memory allocation.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef ASAN_MALLOC_LOCAL_H
|
|
||||||
#define ASAN_MALLOC_LOCAL_H
|
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_platform.h"
|
|
||||||
#include "asan_internal.h"
|
|
||||||
|
|
||||||
static inline bool EarlyMalloc() {
|
|
||||||
return SANITIZER_RTEMS &&
|
|
||||||
(!__asan::asan_inited || __asan::asan_init_is_running);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
|
|
||||||
bool IsFromLocalPool(const void *ptr);
|
|
||||||
void *MemalignFromLocalPool(uptr alignment, uptr size);
|
|
||||||
|
|
||||||
// On RTEMS, we use the local pool to handle memory allocation when the ASan
|
|
||||||
// run-time is not up. This macro is expanded in the context of the operator new
|
|
||||||
// implementation.
|
|
||||||
#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow) \
|
|
||||||
do { \
|
|
||||||
if (UNLIKELY(EarlyMalloc())) { \
|
|
||||||
void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size); \
|
|
||||||
if (!nothrow) \
|
|
||||||
CHECK(res); \
|
|
||||||
return res; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
|
|
||||||
|
|
||||||
#else // SANITIZER_RTEMS
|
|
||||||
|
|
||||||
#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow)
|
|
||||||
#define IS_FROM_LOCAL_POOL(ptr) 0
|
|
||||||
|
|
||||||
#endif // SANITIZER_RTEMS
|
|
||||||
|
|
||||||
#endif // ASAN_MALLOC_LOCAL_H
|
|
|
@ -150,17 +150,11 @@
|
||||||
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
|
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
|
||||||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
|
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
|
||||||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
||||||
//
|
|
||||||
// Shadow mapping on Myriad2 (for shadow scale 5):
|
|
||||||
// || `[0x9ff80000, 0x9fffffff]` || ShadowGap ||
|
|
||||||
// || `[0x9f000000, 0x9ff7ffff]` || LowShadow ||
|
|
||||||
// || `[0x80000000, 0x9effffff]` || LowMem ||
|
|
||||||
// || `[0x00000000, 0x7fffffff]` || Ignored ||
|
|
||||||
|
|
||||||
#if defined(ASAN_SHADOW_SCALE)
|
#if defined(ASAN_SHADOW_SCALE)
|
||||||
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
|
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
|
||||||
#else
|
#else
|
||||||
static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3;
|
static const u64 kDefaultShadowScale = 3;
|
||||||
#endif
|
#endif
|
||||||
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
||||||
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||||
|
@ -171,7 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||||
static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
|
static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
|
||||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
|
||||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||||
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
|
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
|
||||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||||
|
@ -180,15 +174,6 @@ static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||||
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||||
|
|
||||||
static const u64 kMyriadMemoryOffset32 = 0x80000000ULL;
|
|
||||||
static const u64 kMyriadMemorySize32 = 0x20000000ULL;
|
|
||||||
static const u64 kMyriadMemoryEnd32 =
|
|
||||||
kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1;
|
|
||||||
static const u64 kMyriadShadowOffset32 =
|
|
||||||
(kMyriadMemoryOffset32 + kMyriadMemorySize32 -
|
|
||||||
(kMyriadMemorySize32 >> kDefaultShadowScale));
|
|
||||||
static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
|
|
||||||
|
|
||||||
#define SHADOW_SCALE kDefaultShadowScale
|
#define SHADOW_SCALE kDefaultShadowScale
|
||||||
|
|
||||||
#if SANITIZER_FUCHSIA
|
#if SANITIZER_FUCHSIA
|
||||||
|
@ -206,8 +191,6 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
|
||||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||||
# elif SANITIZER_IOS
|
# elif SANITIZER_IOS
|
||||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||||
# elif SANITIZER_MYRIAD2
|
|
||||||
# define SHADOW_OFFSET kMyriadShadowOffset32
|
|
||||||
# else
|
# else
|
||||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||||
# endif
|
# endif
|
||||||
|
@ -278,9 +261,7 @@ extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
#if SANITIZER_MYRIAD2
|
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
|
||||||
#include "asan_mapping_myriad.h"
|
|
||||||
#elif defined(__sparc__) && SANITIZER_WORDSIZE == 64
|
|
||||||
# include "asan_mapping_sparc64.h"
|
# include "asan_mapping_sparc64.h"
|
||||||
#else
|
#else
|
||||||
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
||||||
|
@ -363,7 +344,7 @@ static inline bool AddrIsInShadowGap(uptr a) {
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
#endif // SANITIZER_MYRIAD2
|
#endif
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
@ -393,8 +374,6 @@ static inline bool AddrIsAlignedByGranularity(uptr a) {
|
||||||
|
|
||||||
static inline bool AddressIsPoisoned(uptr a) {
|
static inline bool AddressIsPoisoned(uptr a) {
|
||||||
PROFILE_ASAN_MAPPING();
|
PROFILE_ASAN_MAPPING();
|
||||||
if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a))
|
|
||||||
return false;
|
|
||||||
const uptr kAccessSize = 1;
|
const uptr kAccessSize = 1;
|
||||||
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
|
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
|
||||||
s8 shadow_value = *shadow_address;
|
s8 shadow_value = *shadow_address;
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
|
||||||
//
|
|
||||||
// Myriad-specific definitions for ASan memory mapping.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
#ifndef ASAN_MAPPING_MYRIAD_H
|
|
||||||
#define ASAN_MAPPING_MYRIAD_H
|
|
||||||
|
|
||||||
#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32)
|
|
||||||
#define MEM_TO_SHADOW(mem) \
|
|
||||||
(((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
|
||||||
|
|
||||||
#define kLowMemBeg kMyriadMemoryOffset32
|
|
||||||
#define kLowMemEnd (SHADOW_OFFSET - 1)
|
|
||||||
|
|
||||||
#define kLowShadowBeg SHADOW_OFFSET
|
|
||||||
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
|
|
||||||
|
|
||||||
#define kHighMemBeg 0
|
|
||||||
|
|
||||||
#define kHighShadowBeg 0
|
|
||||||
#define kHighShadowEnd 0
|
|
||||||
|
|
||||||
#define kMidShadowBeg 0
|
|
||||||
#define kMidShadowEnd 0
|
|
||||||
|
|
||||||
#define kShadowGapBeg (kLowShadowEnd + 1)
|
|
||||||
#define kShadowGapEnd kMyriadMemoryEnd32
|
|
||||||
|
|
||||||
#define kShadowGap2Beg 0
|
|
||||||
#define kShadowGap2End 0
|
|
||||||
|
|
||||||
#define kShadowGap3Beg 0
|
|
||||||
#define kShadowGap3End 0
|
|
||||||
|
|
||||||
namespace __asan {
|
|
||||||
|
|
||||||
static inline bool AddrIsInLowMem(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
a = RAW_ADDR(a);
|
|
||||||
return a >= kLowMemBeg && a <= kLowMemEnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInLowShadow(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
a = RAW_ADDR(a);
|
|
||||||
return a >= kLowShadowBeg && a <= kLowShadowEnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInMidMem(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInMidShadow(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInHighMem(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInHighShadow(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool AddrIsInShadowGap(uptr a) {
|
|
||||||
PROFILE_ASAN_MAPPING();
|
|
||||||
a = RAW_ADDR(a);
|
|
||||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __asan
|
|
||||||
|
|
||||||
#endif // ASAN_MAPPING_MYRIAD_H
|
|
|
@ -11,16 +11,14 @@
|
||||||
// Interceptors for operators new and delete.
|
// Interceptors for operators new and delete.
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
#include "asan_allocator.h"
|
#include "asan_allocator.h"
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_malloc_local.h"
|
|
||||||
#include "asan_report.h"
|
#include "asan_report.h"
|
||||||
#include "asan_stack.h"
|
#include "asan_stack.h"
|
||||||
|
|
||||||
#include "interception/interception.h"
|
#include "interception/interception.h"
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
// C++ operators can't have dllexport attributes on Windows. We export them
|
// C++ operators can't have dllexport attributes on Windows. We export them
|
||||||
// anyway by passing extra -export flags to the linker, which is exactly that
|
// anyway by passing extra -export flags to the linker, which is exactly that
|
||||||
// dllexport would normally do. We need to export them in order to make the
|
// dllexport would normally do. We need to export them in order to make the
|
||||||
|
@ -72,14 +70,12 @@ enum class align_val_t: size_t {};
|
||||||
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
|
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
|
||||||
// allocator behavior.
|
// allocator behavior.
|
||||||
#define OPERATOR_NEW_BODY(type, nothrow) \
|
#define OPERATOR_NEW_BODY(type, nothrow) \
|
||||||
MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
|
|
||||||
GET_STACK_TRACE_MALLOC; \
|
GET_STACK_TRACE_MALLOC; \
|
||||||
void *res = asan_memalign(0, size, &stack, type); \
|
void *res = asan_memalign(0, size, &stack, type); \
|
||||||
if (!nothrow && UNLIKELY(!res)) \
|
if (!nothrow && UNLIKELY(!res)) \
|
||||||
ReportOutOfMemory(size, &stack); \
|
ReportOutOfMemory(size, &stack); \
|
||||||
return res;
|
return res;
|
||||||
#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
|
#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
|
||||||
MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
|
|
||||||
GET_STACK_TRACE_MALLOC; \
|
GET_STACK_TRACE_MALLOC; \
|
||||||
void *res = asan_memalign((uptr)align, size, &stack, type); \
|
void *res = asan_memalign((uptr)align, size, &stack, type); \
|
||||||
if (!nothrow && UNLIKELY(!res)) \
|
if (!nothrow && UNLIKELY(!res)) \
|
||||||
|
@ -135,22 +131,18 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
|
||||||
#endif // !SANITIZER_MAC
|
#endif // !SANITIZER_MAC
|
||||||
|
|
||||||
#define OPERATOR_DELETE_BODY(type) \
|
#define OPERATOR_DELETE_BODY(type) \
|
||||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
|
||||||
GET_STACK_TRACE_FREE; \
|
GET_STACK_TRACE_FREE; \
|
||||||
asan_delete(ptr, 0, 0, &stack, type);
|
asan_delete(ptr, 0, 0, &stack, type);
|
||||||
|
|
||||||
#define OPERATOR_DELETE_BODY_SIZE(type) \
|
#define OPERATOR_DELETE_BODY_SIZE(type) \
|
||||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
|
||||||
GET_STACK_TRACE_FREE; \
|
GET_STACK_TRACE_FREE; \
|
||||||
asan_delete(ptr, size, 0, &stack, type);
|
asan_delete(ptr, size, 0, &stack, type);
|
||||||
|
|
||||||
#define OPERATOR_DELETE_BODY_ALIGN(type) \
|
#define OPERATOR_DELETE_BODY_ALIGN(type) \
|
||||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
|
||||||
GET_STACK_TRACE_FREE; \
|
GET_STACK_TRACE_FREE; \
|
||||||
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
|
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
|
||||||
|
|
||||||
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
|
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
|
||||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
|
||||||
GET_STACK_TRACE_FREE; \
|
GET_STACK_TRACE_FREE; \
|
||||||
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
|
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
|
||||||
|
|
||||||
|
|
|
@ -173,17 +173,13 @@ int __asan_address_is_poisoned(void const volatile *addr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
||||||
if (!size) return 0;
|
if (!size)
|
||||||
|
return 0;
|
||||||
uptr end = beg + size;
|
uptr end = beg + size;
|
||||||
if (SANITIZER_MYRIAD2) {
|
if (!AddrIsInMem(beg))
|
||||||
// On Myriad, address not in DRAM range need to be treated as
|
return beg;
|
||||||
// unpoisoned.
|
if (!AddrIsInMem(end))
|
||||||
if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
|
return end;
|
||||||
if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
|
|
||||||
} else {
|
|
||||||
if (!AddrIsInMem(beg)) return beg;
|
|
||||||
if (!AddrIsInMem(end)) return end;
|
|
||||||
}
|
|
||||||
CHECK_LT(beg, end);
|
CHECK_LT(beg, end);
|
||||||
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
|
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
|
||||||
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
|
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||||
|
@ -192,8 +188,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
||||||
// First check the first and the last application bytes,
|
// First check the first and the last application bytes,
|
||||||
// then check the SHADOW_GRANULARITY-aligned region by calling
|
// then check the SHADOW_GRANULARITY-aligned region by calling
|
||||||
// mem_is_zero on the corresponding shadow.
|
// mem_is_zero on the corresponding shadow.
|
||||||
if (!__asan::AddressIsPoisoned(beg) &&
|
if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
|
||||||
!__asan::AddressIsPoisoned(end - 1) &&
|
|
||||||
(shadow_end <= shadow_beg ||
|
(shadow_end <= shadow_beg ||
|
||||||
__sanitizer::mem_is_zero((const char *)shadow_beg,
|
__sanitizer::mem_is_zero((const char *)shadow_beg,
|
||||||
shadow_end - shadow_beg)))
|
shadow_end - shadow_beg)))
|
||||||
|
|
|
@ -51,9 +51,6 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
||||||
// probably provide higher-level interface for these operations.
|
// probably provide higher-level interface for these operations.
|
||||||
// For now, just memset on Windows.
|
// For now, just memset on Windows.
|
||||||
if (value || SANITIZER_WINDOWS == 1 ||
|
if (value || SANITIZER_WINDOWS == 1 ||
|
||||||
// RTEMS doesn't have have pages, let alone a fast way to zero
|
|
||||||
// them, so default to memset.
|
|
||||||
SANITIZER_RTEMS == 1 ||
|
|
||||||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
|
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
|
||||||
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
|
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,266 +0,0 @@
|
||||||
//===-- asan_rtems.cpp ----------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
|
||||||
//
|
|
||||||
// RTEMS-specific details.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_rtems.h"
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
|
|
||||||
#include "asan_internal.h"
|
|
||||||
#include "asan_interceptors.h"
|
|
||||||
#include "asan_mapping.h"
|
|
||||||
#include "asan_poisoning.h"
|
|
||||||
#include "asan_report.h"
|
|
||||||
#include "asan_stack.h"
|
|
||||||
#include "sanitizer_common/sanitizer_common.h"
|
|
||||||
#include "sanitizer_common/sanitizer_libc.h"
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
namespace __asan {
|
|
||||||
|
|
||||||
static void ResetShadowMemory() {
|
|
||||||
uptr shadow_start = SHADOW_OFFSET;
|
|
||||||
uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32);
|
|
||||||
uptr gap_start = MEM_TO_SHADOW(shadow_start);
|
|
||||||
uptr gap_end = MEM_TO_SHADOW(shadow_end);
|
|
||||||
|
|
||||||
REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start);
|
|
||||||
REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeShadowMemory() {
|
|
||||||
kHighMemEnd = 0;
|
|
||||||
kMidMemBeg = 0;
|
|
||||||
kMidMemEnd = 0;
|
|
||||||
|
|
||||||
ResetShadowMemory();
|
|
||||||
}
|
|
||||||
|
|
||||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
|
||||||
UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
|
||||||
// Since asan's mapping is compacting, the shadow chunk may be
|
|
||||||
// not page-aligned, so we only flush the page-aligned portion.
|
|
||||||
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
|
|
||||||
}
|
|
||||||
|
|
||||||
void AsanCheckDynamicRTPrereqs() {}
|
|
||||||
void AsanCheckIncompatibleRT() {}
|
|
||||||
void InitializeAsanInterceptors() {}
|
|
||||||
void InitializePlatformInterceptors() {}
|
|
||||||
void InitializePlatformExceptionHandlers() {}
|
|
||||||
|
|
||||||
// RTEMS only support static linking; it sufficies to return with no
|
|
||||||
// error.
|
|
||||||
void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
|
|
||||||
|
|
||||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
|
||||||
UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool PlatformUnpoisonStacks() { return false; }
|
|
||||||
|
|
||||||
void EarlyInit() {
|
|
||||||
// Provide early initialization of shadow memory so that
|
|
||||||
// instrumented code running before full initialzation will not
|
|
||||||
// report spurious errors.
|
|
||||||
ResetShadowMemory();
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can use a plain thread_local variable for TSD.
|
|
||||||
static thread_local void *per_thread;
|
|
||||||
|
|
||||||
void *AsanTSDGet() { return per_thread; }
|
|
||||||
|
|
||||||
void AsanTSDSet(void *tsd) { per_thread = tsd; }
|
|
||||||
|
|
||||||
// There's no initialization needed, and the passed-in destructor
|
|
||||||
// will never be called. Instead, our own thread destruction hook
|
|
||||||
// (below) will call AsanThread::TSDDtor directly.
|
|
||||||
void AsanTSDInit(void (*destructor)(void *tsd)) {
|
|
||||||
DCHECK(destructor == &PlatformTSDDtor);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
|
|
||||||
|
|
||||||
//
|
|
||||||
// Thread registration. We provide an API similar to the Fushia port.
|
|
||||||
//
|
|
||||||
|
|
||||||
struct AsanThread::InitOptions {
|
|
||||||
uptr stack_bottom, stack_size, tls_bottom, tls_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Shared setup between thread creation and startup for the initial thread.
|
|
||||||
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
|
|
||||||
uptr user_id, bool detached,
|
|
||||||
uptr stack_bottom, uptr stack_size,
|
|
||||||
uptr tls_bottom, uptr tls_size) {
|
|
||||||
// In lieu of AsanThread::Create.
|
|
||||||
AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__);
|
|
||||||
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
|
|
||||||
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
|
|
||||||
|
|
||||||
// On other systems, AsanThread::Init() is called from the new
|
|
||||||
// thread itself. But on RTEMS we already know the stack address
|
|
||||||
// range beforehand, so we can do most of the setup right now.
|
|
||||||
const AsanThread::InitOptions options = {stack_bottom, stack_size,
|
|
||||||
tls_bottom, tls_size};
|
|
||||||
thread->Init(&options);
|
|
||||||
return thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This gets the same arguments passed to Init by CreateAsanThread, above.
|
|
||||||
// We're in the creator thread before the new thread is actually started, but
|
|
||||||
// its stack and tls address range are already known.
|
|
||||||
void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
|
|
||||||
DCHECK_NE(GetCurrentThread(), this);
|
|
||||||
DCHECK_NE(GetCurrentThread(), nullptr);
|
|
||||||
CHECK_NE(options->stack_bottom, 0);
|
|
||||||
CHECK_NE(options->stack_size, 0);
|
|
||||||
stack_bottom_ = options->stack_bottom;
|
|
||||||
stack_top_ = options->stack_bottom + options->stack_size;
|
|
||||||
tls_begin_ = options->tls_bottom;
|
|
||||||
tls_end_ = options->tls_bottom + options->tls_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the
|
|
||||||
// main thread on RTEMS does not require special treatment; its AsanThread is
|
|
||||||
// already created by the provided hooks. This function simply looks up and
|
|
||||||
// returns the created thread.
|
|
||||||
AsanThread *CreateMainThread() {
|
|
||||||
return GetThreadContextByTidLocked(0)->thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is called before each thread creation is attempted. So, in
|
|
||||||
// its first call, the calling thread is the initial and sole thread.
|
|
||||||
static void *BeforeThreadCreateHook(uptr user_id, bool detached,
|
|
||||||
uptr stack_bottom, uptr stack_size,
|
|
||||||
uptr tls_bottom, uptr tls_size) {
|
|
||||||
EnsureMainThreadIDIsCorrect();
|
|
||||||
// Strict init-order checking is thread-hostile.
|
|
||||||
if (flags()->strict_init_order) StopInitOrderChecking();
|
|
||||||
|
|
||||||
GET_STACK_TRACE_THREAD;
|
|
||||||
u32 parent_tid = GetCurrentTidOrInvalid();
|
|
||||||
|
|
||||||
return CreateAsanThread(&stack, parent_tid, user_id, detached,
|
|
||||||
stack_bottom, stack_size, tls_bottom, tls_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is called after creating a new thread (in the creating thread),
|
|
||||||
// with the pointer returned by BeforeThreadCreateHook (above).
|
|
||||||
static void ThreadCreateHook(void *hook, bool aborted) {
|
|
||||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
|
||||||
if (!aborted) {
|
|
||||||
// The thread was created successfully.
|
|
||||||
// ThreadStartHook is already running in the new thread.
|
|
||||||
} else {
|
|
||||||
// The thread wasn't created after all.
|
|
||||||
// Clean up everything we set up in BeforeThreadCreateHook.
|
|
||||||
asanThreadRegistry().FinishThread(thread->tid());
|
|
||||||
UnmapOrDie(thread, sizeof(AsanThread));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is called (1) in the newly-created thread before it runs anything else,
|
|
||||||
// with the pointer returned by BeforeThreadCreateHook (above). (2) before a
|
|
||||||
// thread restart.
|
|
||||||
static void ThreadStartHook(void *hook, uptr os_id) {
|
|
||||||
if (!hook)
|
|
||||||
return;
|
|
||||||
|
|
||||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
|
||||||
SetCurrentThread(thread);
|
|
||||||
|
|
||||||
ThreadStatus status =
|
|
||||||
asanThreadRegistry().GetThreadLocked(thread->tid())->status;
|
|
||||||
DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning);
|
|
||||||
// Determine whether we are starting or restarting the thread.
|
|
||||||
if (status == ThreadStatusCreated) {
|
|
||||||
// In lieu of AsanThread::ThreadStart.
|
|
||||||
asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular,
|
|
||||||
nullptr);
|
|
||||||
} else {
|
|
||||||
// In a thread restart, a thread may resume execution at an
|
|
||||||
// arbitrary function entry point, with its stack and TLS state
|
|
||||||
// reset. We unpoison the stack in that case.
|
|
||||||
PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each thread runs this just before it exits,
|
|
||||||
// with the pointer returned by BeforeThreadCreateHook (above).
|
|
||||||
// All per-thread destructors have already been called.
|
|
||||||
static void ThreadExitHook(void *hook, uptr os_id) {
|
|
||||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
|
||||||
if (thread)
|
|
||||||
AsanThread::TSDDtor(thread->context());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void HandleExit() {
|
|
||||||
// Disable ASan by setting it to uninitialized. Also reset the
|
|
||||||
// shadow memory to avoid reporting errors after the run-time has
|
|
||||||
// been desroyed.
|
|
||||||
if (asan_inited) {
|
|
||||||
asan_inited = false;
|
|
||||||
ResetShadowMemory();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HandleDlopenInit() {
|
|
||||||
// Not supported on this platform.
|
|
||||||
static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
|
|
||||||
"Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} // namespace __asan
|
|
||||||
|
|
||||||
// These are declared (in extern "C") by <some_path/sanitizer.h>.
|
|
||||||
// The system runtime will call our definitions directly.
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
void __sanitizer_early_init() {
|
|
||||||
__asan::EarlyInit();
|
|
||||||
}
|
|
||||||
|
|
||||||
void *__sanitizer_before_thread_create_hook(uptr thread, bool detached,
|
|
||||||
const char *name,
|
|
||||||
void *stack_base, size_t stack_size,
|
|
||||||
void *tls_base, size_t tls_size) {
|
|
||||||
return __asan::BeforeThreadCreateHook(
|
|
||||||
thread, detached,
|
|
||||||
reinterpret_cast<uptr>(stack_base), stack_size,
|
|
||||||
reinterpret_cast<uptr>(tls_base), tls_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) {
|
|
||||||
__asan::ThreadCreateHook(handle, status != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_thread_start_hook(void *handle, uptr self) {
|
|
||||||
__asan::ThreadStartHook(handle, self);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_thread_exit_hook(void *handle, uptr self) {
|
|
||||||
__asan::ThreadExitHook(handle, self);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_exit() {
|
|
||||||
__asan::HandleExit();
|
|
||||||
}
|
|
||||||
} // "C"
|
|
||||||
|
|
||||||
#endif // SANITIZER_RTEMS
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
#include "asan_activation.h"
|
#include "asan_activation.h"
|
||||||
#include "asan_allocator.h"
|
#include "asan_allocator.h"
|
||||||
|
#include "asan_fake_stack.h"
|
||||||
#include "asan_interceptors.h"
|
#include "asan_interceptors.h"
|
||||||
#include "asan_interface_internal.h"
|
#include "asan_interface_internal.h"
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
|
@ -23,11 +24,11 @@
|
||||||
#include "asan_stats.h"
|
#include "asan_stats.h"
|
||||||
#include "asan_suppressions.h"
|
#include "asan_suppressions.h"
|
||||||
#include "asan_thread.h"
|
#include "asan_thread.h"
|
||||||
|
#include "lsan/lsan_common.h"
|
||||||
#include "sanitizer_common/sanitizer_atomic.h"
|
#include "sanitizer_common/sanitizer_atomic.h"
|
||||||
#include "sanitizer_common/sanitizer_flags.h"
|
#include "sanitizer_common/sanitizer_flags.h"
|
||||||
#include "sanitizer_common/sanitizer_libc.h"
|
#include "sanitizer_common/sanitizer_libc.h"
|
||||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||||
#include "lsan/lsan_common.h"
|
|
||||||
#include "ubsan/ubsan_init.h"
|
#include "ubsan/ubsan_init.h"
|
||||||
#include "ubsan/ubsan_platform.h"
|
#include "ubsan/ubsan_platform.h"
|
||||||
|
|
||||||
|
@ -137,8 +138,6 @@ ASAN_REPORT_ERROR_N(load, false)
|
||||||
ASAN_REPORT_ERROR_N(store, true)
|
ASAN_REPORT_ERROR_N(store, true)
|
||||||
|
|
||||||
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
|
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
|
||||||
if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
|
|
||||||
return; \
|
|
||||||
uptr sp = MEM_TO_SHADOW(addr); \
|
uptr sp = MEM_TO_SHADOW(addr); \
|
||||||
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
||||||
: *reinterpret_cast<u16 *>(sp); \
|
: *reinterpret_cast<u16 *>(sp); \
|
||||||
|
@ -150,8 +149,7 @@ ASAN_REPORT_ERROR_N(store, true)
|
||||||
*__asan_test_only_reported_buggy_pointer = addr; \
|
*__asan_test_only_reported_buggy_pointer = addr; \
|
||||||
} else { \
|
} else { \
|
||||||
GET_CALLER_PC_BP_SP; \
|
GET_CALLER_PC_BP_SP; \
|
||||||
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \
|
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal); \
|
||||||
fatal); \
|
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
@ -305,7 +303,6 @@ static void asan_atexit() {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void InitializeHighMemEnd() {
|
static void InitializeHighMemEnd() {
|
||||||
#if !SANITIZER_MYRIAD2
|
|
||||||
#if !ASAN_FIXED_MAPPING
|
#if !ASAN_FIXED_MAPPING
|
||||||
kHighMemEnd = GetMaxUserVirtualAddress();
|
kHighMemEnd = GetMaxUserVirtualAddress();
|
||||||
// Increase kHighMemEnd to make sure it's properly
|
// Increase kHighMemEnd to make sure it's properly
|
||||||
|
@ -313,7 +310,6 @@ static void InitializeHighMemEnd() {
|
||||||
kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
|
kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
|
||||||
#endif // !ASAN_FIXED_MAPPING
|
#endif // !ASAN_FIXED_MAPPING
|
||||||
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
||||||
#endif // !SANITIZER_MYRIAD2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintAddressSpaceLayout() {
|
void PrintAddressSpaceLayout() {
|
||||||
|
@ -569,9 +565,6 @@ static void UnpoisonDefaultStack() {
|
||||||
const uptr page_size = GetPageSizeCached();
|
const uptr page_size = GetPageSizeCached();
|
||||||
top = curr_thread->stack_top();
|
top = curr_thread->stack_top();
|
||||||
bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
|
bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
|
||||||
} else if (SANITIZER_RTEMS) {
|
|
||||||
// Give up On RTEMS.
|
|
||||||
return;
|
|
||||||
} else {
|
} else {
|
||||||
CHECK(!SANITIZER_FUCHSIA);
|
CHECK(!SANITIZER_FUCHSIA);
|
||||||
// If we haven't seen this thread, try asking the OS for stack bounds.
|
// If we haven't seen this thread, try asking the OS for stack bounds.
|
||||||
|
@ -586,8 +579,12 @@ static void UnpoisonDefaultStack() {
|
||||||
|
|
||||||
static void UnpoisonFakeStack() {
|
static void UnpoisonFakeStack() {
|
||||||
AsanThread *curr_thread = GetCurrentThread();
|
AsanThread *curr_thread = GetCurrentThread();
|
||||||
if (curr_thread && curr_thread->has_fake_stack())
|
if (!curr_thread)
|
||||||
curr_thread->fake_stack()->HandleNoReturn();
|
return;
|
||||||
|
FakeStack *stack = curr_thread->get_fake_stack();
|
||||||
|
if (!stack)
|
||||||
|
return;
|
||||||
|
stack->HandleNoReturn();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
|
@ -13,9 +13,8 @@
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_platform.h"
|
#include "sanitizer_common/sanitizer_platform.h"
|
||||||
|
|
||||||
// asan_fuchsia.cpp and asan_rtems.cpp have their own
|
// asan_fuchsia.cpp has their own InitializeShadowMemory implementation.
|
||||||
// InitializeShadowMemory implementation.
|
#if !SANITIZER_FUCHSIA
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
|
||||||
|
|
||||||
# include "asan_internal.h"
|
# include "asan_internal.h"
|
||||||
# include "asan_mapping.h"
|
# include "asan_mapping.h"
|
||||||
|
@ -123,4 +122,4 @@ void InitializeShadowMemory() {
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#endif // !SANITIZER_FUCHSIA
|
||||||
|
|
|
@ -74,7 +74,8 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
|
||||||
if (SANITIZER_MIPS && t &&
|
if (SANITIZER_MIPS && t &&
|
||||||
!IsValidFrame(bp, t->stack_top(), t->stack_bottom()))
|
!IsValidFrame(bp, t->stack_top(), t->stack_bottom()))
|
||||||
return;
|
return;
|
||||||
Unwind(max_depth, pc, bp, context, 0, 0, false);
|
Unwind(max_depth, pc, bp, context, t ? t->stack_top() : 0,
|
||||||
|
t ? t->stack_bottom() : 0, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------ Interface -------------- {{{1
|
// ------------------ Interface -------------- {{{1
|
||||||
|
|
|
@ -60,8 +60,8 @@ ThreadRegistry &asanThreadRegistry() {
|
||||||
// in TSD and can't reliably tell when no more TSD destructors will
|
// in TSD and can't reliably tell when no more TSD destructors will
|
||||||
// be called. It would be wrong to reuse AsanThreadContext for another
|
// be called. It would be wrong to reuse AsanThreadContext for another
|
||||||
// thread before all TSD destructors will be called for it.
|
// thread before all TSD destructors will be called for it.
|
||||||
asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
|
asan_thread_registry =
|
||||||
GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
|
new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
|
||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
return *asan_thread_registry;
|
return *asan_thread_registry;
|
||||||
|
@ -257,10 +257,9 @@ void AsanThread::Init(const InitOptions *options) {
|
||||||
&local);
|
&local);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fuchsia and RTEMS don't use ThreadStart.
|
// Fuchsia doesn't use ThreadStart.
|
||||||
// asan_fuchsia.c/asan_rtems.c define CreateMainThread and
|
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
|
||||||
// SetThreadStackAndTls.
|
#if !SANITIZER_FUCHSIA
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
|
||||||
|
|
||||||
thread_return_t AsanThread::ThreadStart(tid_t os_id) {
|
thread_return_t AsanThread::ThreadStart(tid_t os_id) {
|
||||||
Init();
|
Init();
|
||||||
|
@ -317,7 +316,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#endif // !SANITIZER_FUCHSIA
|
||||||
|
|
||||||
void AsanThread::ClearShadowForThreadStackAndTLS() {
|
void AsanThread::ClearShadowForThreadStackAndTLS() {
|
||||||
if (stack_top_ != stack_bottom_)
|
if (stack_top_ != stack_bottom_)
|
||||||
|
@ -339,8 +338,8 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||||
uptr bottom = 0;
|
uptr bottom = 0;
|
||||||
if (AddrIsInStack(addr)) {
|
if (AddrIsInStack(addr)) {
|
||||||
bottom = stack_bottom();
|
bottom = stack_bottom();
|
||||||
} else if (has_fake_stack()) {
|
} else if (FakeStack *fake_stack = get_fake_stack()) {
|
||||||
bottom = fake_stack()->AddrIsInFakeStack(addr);
|
bottom = fake_stack->AddrIsInFakeStack(addr);
|
||||||
CHECK(bottom);
|
CHECK(bottom);
|
||||||
access->offset = addr - bottom;
|
access->offset = addr - bottom;
|
||||||
access->frame_pc = ((uptr*)bottom)[2];
|
access->frame_pc = ((uptr*)bottom)[2];
|
||||||
|
@ -380,8 +379,8 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
|
||||||
uptr bottom = 0;
|
uptr bottom = 0;
|
||||||
if (AddrIsInStack(addr)) {
|
if (AddrIsInStack(addr)) {
|
||||||
bottom = stack_bottom();
|
bottom = stack_bottom();
|
||||||
} else if (has_fake_stack()) {
|
} else if (FakeStack *fake_stack = get_fake_stack()) {
|
||||||
bottom = fake_stack()->AddrIsInFakeStack(addr);
|
bottom = fake_stack->AddrIsInFakeStack(addr);
|
||||||
if (bottom == 0) {
|
if (bottom == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -411,17 +410,17 @@ static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
|
||||||
void *addr) {
|
void *addr) {
|
||||||
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base);
|
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base);
|
||||||
AsanThread *t = tctx->thread;
|
AsanThread *t = tctx->thread;
|
||||||
if (!t) return false;
|
if (!t)
|
||||||
if (t->AddrIsInStack((uptr)addr)) return true;
|
|
||||||
if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
|
|
||||||
return true;
|
|
||||||
return false;
|
return false;
|
||||||
|
if (t->AddrIsInStack((uptr)addr))
|
||||||
|
return true;
|
||||||
|
FakeStack *fake_stack = t->get_fake_stack();
|
||||||
|
if (!fake_stack)
|
||||||
|
return false;
|
||||||
|
return fake_stack->AddrIsInFakeStack((uptr)addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
AsanThread *GetCurrentThread() {
|
AsanThread *GetCurrentThread() {
|
||||||
if (SANITIZER_RTEMS && !asan_inited)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
AsanThreadContext *context =
|
AsanThreadContext *context =
|
||||||
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
|
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
|
||||||
if (!context) {
|
if (!context) {
|
||||||
|
@ -503,8 +502,12 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
|
||||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||||
void *arg) {
|
void *arg) {
|
||||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||||
if (t && t->has_fake_stack())
|
if (!t)
|
||||||
t->fake_stack()->ForEachFakeFrame(callback, arg);
|
return;
|
||||||
|
__asan::FakeStack *fake_stack = t->get_fake_stack();
|
||||||
|
if (!fake_stack)
|
||||||
|
return;
|
||||||
|
fake_stack->ForEachFakeFrame(callback, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LockThreadRegistry() {
|
void LockThreadRegistry() {
|
||||||
|
|
|
@ -28,8 +28,6 @@ struct DTLS;
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
const u32 kMaxNumberOfThreads = (1 << 22); // 4M
|
|
||||||
|
|
||||||
class AsanThread;
|
class AsanThread;
|
||||||
|
|
||||||
// These objects are created for every thread and are never deleted,
|
// These objects are created for every thread and are never deleted,
|
||||||
|
@ -104,17 +102,18 @@ class AsanThread {
|
||||||
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
|
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
|
||||||
uptr *size_old);
|
uptr *size_old);
|
||||||
|
|
||||||
bool has_fake_stack() {
|
FakeStack *get_fake_stack() {
|
||||||
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
|
|
||||||
(reinterpret_cast<uptr>(fake_stack_) > 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
FakeStack *fake_stack() {
|
|
||||||
if (!__asan_option_detect_stack_use_after_return)
|
|
||||||
return nullptr;
|
|
||||||
if (atomic_load(&stack_switching_, memory_order_relaxed))
|
if (atomic_load(&stack_switching_, memory_order_relaxed))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (!has_fake_stack())
|
if (reinterpret_cast<uptr>(fake_stack_) <= 1)
|
||||||
|
return nullptr;
|
||||||
|
return fake_stack_;
|
||||||
|
}
|
||||||
|
|
||||||
|
FakeStack *get_or_create_fake_stack() {
|
||||||
|
if (atomic_load(&stack_switching_, memory_order_relaxed))
|
||||||
|
return nullptr;
|
||||||
|
if (reinterpret_cast<uptr>(fake_stack_) <= 1)
|
||||||
return AsyncSignalSafeLazyInitFakeStack();
|
return AsyncSignalSafeLazyInitFakeStack();
|
||||||
return fake_stack_;
|
return fake_stack_;
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,11 +13,13 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
|
||||||
toolexeclib_LTLIBRARIES = libhwasan.la
|
toolexeclib_LTLIBRARIES = libhwasan.la
|
||||||
|
|
||||||
hwasan_files = \
|
hwasan_files = \
|
||||||
|
hwasan_allocation_functions.cpp \
|
||||||
hwasan_allocator.cpp \
|
hwasan_allocator.cpp \
|
||||||
hwasan.cpp \
|
hwasan.cpp \
|
||||||
hwasan_dynamic_shadow.cpp \
|
hwasan_dynamic_shadow.cpp \
|
||||||
hwasan_exceptions.cpp \
|
hwasan_exceptions.cpp \
|
||||||
hwasan_flags.inc \
|
hwasan_flags.inc \
|
||||||
|
hwasan_fuchsia.cpp \
|
||||||
hwasan_globals.cpp \
|
hwasan_globals.cpp \
|
||||||
hwasan_interceptors.cpp \
|
hwasan_interceptors.cpp \
|
||||||
hwasan_interceptors_vfork.S \
|
hwasan_interceptors_vfork.S \
|
||||||
|
|
|
@ -146,8 +146,9 @@ am__DEPENDENCIES_1 =
|
||||||
libhwasan_la_DEPENDENCIES = \
|
libhwasan_la_DEPENDENCIES = \
|
||||||
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
|
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
|
||||||
$(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
|
$(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
|
||||||
am__objects_1 = hwasan_allocator.lo hwasan.lo hwasan_dynamic_shadow.lo \
|
am__objects_1 = hwasan_allocation_functions.lo hwasan_allocator.lo \
|
||||||
hwasan_exceptions.lo hwasan_globals.lo hwasan_interceptors.lo \
|
hwasan.lo hwasan_dynamic_shadow.lo hwasan_exceptions.lo \
|
||||||
|
hwasan_fuchsia.lo hwasan_globals.lo hwasan_interceptors.lo \
|
||||||
hwasan_interceptors_vfork.lo hwasan_linux.lo \
|
hwasan_interceptors_vfork.lo hwasan_linux.lo \
|
||||||
hwasan_memintrinsics.lo hwasan_new_delete.lo \
|
hwasan_memintrinsics.lo hwasan_new_delete.lo \
|
||||||
hwasan_poisoning.lo hwasan_report.lo hwasan_setjmp.lo \
|
hwasan_poisoning.lo hwasan_report.lo hwasan_setjmp.lo \
|
||||||
|
@ -411,11 +412,13 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
|
||||||
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
|
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
|
||||||
toolexeclib_LTLIBRARIES = libhwasan.la
|
toolexeclib_LTLIBRARIES = libhwasan.la
|
||||||
hwasan_files = \
|
hwasan_files = \
|
||||||
|
hwasan_allocation_functions.cpp \
|
||||||
hwasan_allocator.cpp \
|
hwasan_allocator.cpp \
|
||||||
hwasan.cpp \
|
hwasan.cpp \
|
||||||
hwasan_dynamic_shadow.cpp \
|
hwasan_dynamic_shadow.cpp \
|
||||||
hwasan_exceptions.cpp \
|
hwasan_exceptions.cpp \
|
||||||
hwasan_flags.inc \
|
hwasan_flags.inc \
|
||||||
|
hwasan_fuchsia.cpp \
|
||||||
hwasan_globals.cpp \
|
hwasan_globals.cpp \
|
||||||
hwasan_interceptors.cpp \
|
hwasan_interceptors.cpp \
|
||||||
hwasan_interceptors_vfork.S \
|
hwasan_interceptors_vfork.S \
|
||||||
|
@ -554,9 +557,11 @@ distclean-compile:
|
||||||
-rm -f *.tab.c
|
-rm -f *.tab.c
|
||||||
|
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_allocation_functions.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_allocator.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_allocator.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_dynamic_shadow.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_dynamic_shadow.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_exceptions.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_exceptions.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_fuchsia.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_globals.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_globals.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_interceptors.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_interceptors.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_interceptors_vfork.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_interceptors_vfork.Plo@am__quote@
|
||||||
|
|
|
@ -50,6 +50,11 @@ bool hwasan_init_is_running;
|
||||||
|
|
||||||
int hwasan_report_count = 0;
|
int hwasan_report_count = 0;
|
||||||
|
|
||||||
|
uptr kLowShadowStart;
|
||||||
|
uptr kLowShadowEnd;
|
||||||
|
uptr kHighShadowStart;
|
||||||
|
uptr kHighShadowEnd;
|
||||||
|
|
||||||
void Flags::SetDefaults() {
|
void Flags::SetDefaults() {
|
||||||
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||||
#include "hwasan_flags.inc"
|
#include "hwasan_flags.inc"
|
||||||
|
@ -177,6 +182,65 @@ void UpdateMemoryUsage() {
|
||||||
void UpdateMemoryUsage() {}
|
void UpdateMemoryUsage() {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void HwasanAtExit() {
|
||||||
|
if (common_flags()->print_module_map)
|
||||||
|
DumpProcessMap();
|
||||||
|
if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
|
||||||
|
ReportStats();
|
||||||
|
if (hwasan_report_count > 0) {
|
||||||
|
// ReportAtExitStatistics();
|
||||||
|
if (common_flags()->exitcode)
|
||||||
|
internal__exit(common_flags()->exitcode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
|
||||||
|
uptr *registers_frame) {
|
||||||
|
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
|
||||||
|
BufferedStackTrace *stack = stack_buffer.data();
|
||||||
|
stack->Reset();
|
||||||
|
stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
|
||||||
|
|
||||||
|
// The second stack frame contains the failure __hwasan_check function, as
|
||||||
|
// we have a stack frame for the registers saved in __hwasan_tag_mismatch that
|
||||||
|
// we wish to ignore. This (currently) only occurs on AArch64, as x64
|
||||||
|
// implementations use SIGTRAP to implement the failure, and thus do not go
|
||||||
|
// through the stack saver.
|
||||||
|
if (registers_frame && stack->trace && stack->size > 0) {
|
||||||
|
stack->trace++;
|
||||||
|
stack->size--;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool fatal = flags()->halt_on_error || !ai.recover;
|
||||||
|
ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
|
||||||
|
registers_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
|
||||||
|
size_t outsize) {
|
||||||
|
__hwasan::AccessInfo ai;
|
||||||
|
ai.is_store = access_info & 0x10;
|
||||||
|
ai.is_load = !ai.is_store;
|
||||||
|
ai.recover = access_info & 0x20;
|
||||||
|
ai.addr = addr;
|
||||||
|
if ((access_info & 0xf) == 0xf)
|
||||||
|
ai.size = outsize;
|
||||||
|
else
|
||||||
|
ai.size = 1 << (access_info & 0xf);
|
||||||
|
|
||||||
|
HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
|
||||||
|
(uptr)__builtin_frame_address(0), nullptr, registers_frame);
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread *GetCurrentThread() {
|
||||||
|
uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
|
||||||
|
if (UNLIKELY(*ThreadLongPtr == 0))
|
||||||
|
return nullptr;
|
||||||
|
auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
|
||||||
|
return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
||||||
using namespace __hwasan;
|
using namespace __hwasan;
|
||||||
|
@ -216,7 +280,7 @@ static void InitLoadedGlobals() {
|
||||||
static void InitInstrumentation() {
|
static void InitInstrumentation() {
|
||||||
if (hwasan_instrumentation_inited) return;
|
if (hwasan_instrumentation_inited) return;
|
||||||
|
|
||||||
InitPrctl();
|
InitializeOsSupport();
|
||||||
|
|
||||||
if (!InitShadow()) {
|
if (!InitShadow()) {
|
||||||
Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
|
Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
|
||||||
|
@ -225,7 +289,6 @@ static void InitInstrumentation() {
|
||||||
}
|
}
|
||||||
|
|
||||||
InitThreads();
|
InitThreads();
|
||||||
hwasanThreadList().CreateCurrentThread();
|
|
||||||
|
|
||||||
hwasan_instrumentation_inited = 1;
|
hwasan_instrumentation_inited = 1;
|
||||||
}
|
}
|
||||||
|
@ -495,7 +558,7 @@ void __hwasan_print_memory_usage() {
|
||||||
Printf("%s\n", s.data());
|
Printf("%s\n", s.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
static const u8 kFallbackTag = 0xBB;
|
static const u8 kFallbackTag = 0xBB & kTagMask;
|
||||||
|
|
||||||
u8 __hwasan_generate_tag() {
|
u8 __hwasan_generate_tag() {
|
||||||
Thread *t = GetCurrentThread();
|
Thread *t = GetCurrentThread();
|
||||||
|
@ -516,4 +579,12 @@ void __sanitizer_print_stack_trace() {
|
||||||
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
|
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
|
||||||
stack.Print();
|
stack.Print();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
|
||||||
|
// rest of the mismatch handling code (C++).
|
||||||
|
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
|
||||||
|
size_t outsize) {
|
||||||
|
__hwasan::HwasanTagMismatch(addr, access_info, registers_frame, outsize);
|
||||||
|
}
|
||||||
|
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
|
@ -36,7 +36,10 @@
|
||||||
|
|
||||||
typedef u8 tag_t;
|
typedef u8 tag_t;
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(HWASAN_ALIASING_MODE)
|
||||||
|
# if !defined(__x86_64__)
|
||||||
|
# error Aliasing mode is only supported on x86_64
|
||||||
|
# endif
|
||||||
// Tags are done in middle bits using userspace aliasing.
|
// Tags are done in middle bits using userspace aliasing.
|
||||||
constexpr unsigned kAddressTagShift = 39;
|
constexpr unsigned kAddressTagShift = 39;
|
||||||
constexpr unsigned kTagBits = 3;
|
constexpr unsigned kTagBits = 3;
|
||||||
|
@ -49,12 +52,16 @@ constexpr unsigned kTagBits = 3;
|
||||||
// simpler/faster shadow calculation.
|
// simpler/faster shadow calculation.
|
||||||
constexpr unsigned kTaggableRegionCheckShift =
|
constexpr unsigned kTaggableRegionCheckShift =
|
||||||
__sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
|
__sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
|
||||||
|
#elif defined(__x86_64__)
|
||||||
|
// Tags are done in upper bits using Intel LAM.
|
||||||
|
constexpr unsigned kAddressTagShift = 57;
|
||||||
|
constexpr unsigned kTagBits = 6;
|
||||||
#else
|
#else
|
||||||
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
|
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
|
||||||
// translation and can be used to store a tag.
|
// translation and can be used to store a tag.
|
||||||
constexpr unsigned kAddressTagShift = 56;
|
constexpr unsigned kAddressTagShift = 56;
|
||||||
constexpr unsigned kTagBits = 8;
|
constexpr unsigned kTagBits = 8;
|
||||||
#endif // defined(__x86_64__)
|
#endif // defined(HWASAN_ALIASING_MODE)
|
||||||
|
|
||||||
// Mask for extracting tag bits from the lower 8 bits.
|
// Mask for extracting tag bits from the lower 8 bits.
|
||||||
constexpr uptr kTagMask = (1UL << kTagBits) - 1;
|
constexpr uptr kTagMask = (1UL << kTagBits) - 1;
|
||||||
|
@ -95,7 +102,7 @@ extern bool hwasan_init_is_running;
|
||||||
extern int hwasan_report_count;
|
extern int hwasan_report_count;
|
||||||
|
|
||||||
bool InitShadow();
|
bool InitShadow();
|
||||||
void InitPrctl();
|
void InitializeOsSupport();
|
||||||
void InitThreads();
|
void InitThreads();
|
||||||
void InitializeInterceptors();
|
void InitializeInterceptors();
|
||||||
|
|
||||||
|
@ -129,6 +136,7 @@ void InstallAtExitHandler();
|
||||||
|
|
||||||
void HwasanTSDInit();
|
void HwasanTSDInit();
|
||||||
void HwasanTSDThreadInit();
|
void HwasanTSDThreadInit();
|
||||||
|
void HwasanAtExit();
|
||||||
|
|
||||||
void HwasanOnDeadlySignal(int signo, void *info, void *context);
|
void HwasanOnDeadlySignal(int signo, void *info, void *context);
|
||||||
|
|
||||||
|
@ -138,6 +146,26 @@ void AppendToErrorMessageBuffer(const char *buffer);
|
||||||
|
|
||||||
void AndroidTestTlsSlot();
|
void AndroidTestTlsSlot();
|
||||||
|
|
||||||
|
// This is a compiler-generated struct that can be shared between hwasan
|
||||||
|
// implementations.
|
||||||
|
struct AccessInfo {
|
||||||
|
uptr addr;
|
||||||
|
uptr size;
|
||||||
|
bool is_store;
|
||||||
|
bool is_load;
|
||||||
|
bool recover;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Given access info and frame information, unwind the stack and report the tag
|
||||||
|
// mismatch.
|
||||||
|
void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
|
||||||
|
uptr *registers_frame = nullptr);
|
||||||
|
|
||||||
|
// This dispatches to HandleTagMismatch but sets up the AccessInfo, program
|
||||||
|
// counter, and frame pointer.
|
||||||
|
void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
|
||||||
|
size_t outsize);
|
||||||
|
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
||||||
#define HWASAN_MALLOC_HOOK(ptr, size) \
|
#define HWASAN_MALLOC_HOOK(ptr, size) \
|
||||||
|
@ -175,4 +203,12 @@ typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
|
||||||
typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
|
typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
|
||||||
#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
|
#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
|
||||||
|
|
||||||
|
#define ENSURE_HWASAN_INITED() \
|
||||||
|
do { \
|
||||||
|
CHECK(!hwasan_init_is_running); \
|
||||||
|
if (!hwasan_inited) { \
|
||||||
|
__hwasan_init(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#endif // HWASAN_H
|
#endif // HWASAN_H
|
||||||
|
|
172
libsanitizer/hwasan/hwasan_allocation_functions.cpp
Normal file
172
libsanitizer/hwasan/hwasan_allocation_functions.cpp
Normal file
|
@ -0,0 +1,172 @@
|
||||||
|
//===-- hwasan_allocation_functions.cpp -----------------------------------===//
|
||||||
|
//
|
||||||
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
|
// See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of HWAddressSanitizer.
|
||||||
|
//
|
||||||
|
// Definitions for __sanitizer allocation functions.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "hwasan.h"
|
||||||
|
#include "interception/interception.h"
|
||||||
|
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||||
|
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||||
|
|
||||||
|
using namespace __hwasan;
|
||||||
|
|
||||||
|
static uptr allocated_for_dlsym;
|
||||||
|
static const uptr kDlsymAllocPoolSize = 1024;
|
||||||
|
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||||
|
|
||||||
|
static bool IsInDlsymAllocPool(const void *ptr) {
|
||||||
|
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||||
|
return off < sizeof(alloc_memory_for_dlsym);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||||
|
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
||||||
|
void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||||
|
allocated_for_dlsym += size_in_words;
|
||||||
|
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
||||||
|
return mem;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
CHECK_NE(memptr, 0);
|
||||||
|
int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_memalign(uptr alignment, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
return hwasan_memalign(alignment, size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
return hwasan_aligned_alloc(alignment, size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
void *ptr = hwasan_memalign(alignment, size, &stack);
|
||||||
|
if (ptr)
|
||||||
|
DTLS_on_libc_memalign(ptr, size);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_valloc(uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
return hwasan_valloc(size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_pvalloc(uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
return hwasan_pvalloc(size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __sanitizer_free(void *ptr) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||||
|
return;
|
||||||
|
hwasan_free(ptr, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __sanitizer_cfree(void *ptr) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||||
|
return;
|
||||||
|
hwasan_free(ptr, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr __sanitizer_malloc_usable_size(const void *ptr) {
|
||||||
|
return __sanitizer_get_allocated_size(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
|
||||||
|
__sanitizer_struct_mallinfo sret;
|
||||||
|
internal_memset(&sret, 0, sizeof(sret));
|
||||||
|
return sret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __sanitizer_mallopt(int cmd, int value) { return 0; }
|
||||||
|
|
||||||
|
void __sanitizer_malloc_stats(void) {
|
||||||
|
// FIXME: implement, but don't call REAL(malloc_stats)!
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_calloc(uptr nmemb, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
if (UNLIKELY(!hwasan_inited))
|
||||||
|
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||||
|
return AllocateFromLocalPool(nmemb * size);
|
||||||
|
return hwasan_calloc(nmemb, size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_realloc(void *ptr, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||||
|
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||||
|
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||||
|
void *new_ptr;
|
||||||
|
if (UNLIKELY(!hwasan_inited)) {
|
||||||
|
new_ptr = AllocateFromLocalPool(copy_size);
|
||||||
|
} else {
|
||||||
|
copy_size = size;
|
||||||
|
new_ptr = hwasan_malloc(copy_size, &stack);
|
||||||
|
}
|
||||||
|
internal_memcpy(new_ptr, ptr, copy_size);
|
||||||
|
return new_ptr;
|
||||||
|
}
|
||||||
|
return hwasan_realloc(ptr, size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
return hwasan_reallocarray(ptr, nmemb, size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__sanitizer_malloc(uptr size) {
|
||||||
|
GET_MALLOC_STACK_TRACE;
|
||||||
|
if (UNLIKELY(!hwasan_init_is_running))
|
||||||
|
ENSURE_HWASAN_INITED();
|
||||||
|
if (UNLIKELY(!hwasan_inited))
|
||||||
|
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
||||||
|
return AllocateFromLocalPool(size);
|
||||||
|
return hwasan_malloc(size, &stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if HWASAN_WITH_INTERCEPTORS
|
||||||
|
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
||||||
|
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
|
||||||
|
ALIAS("__sanitizer_" #FN); \
|
||||||
|
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
||||||
|
ARGS) ALIAS("__sanitizer_" #FN)
|
||||||
|
|
||||||
|
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
|
||||||
|
SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void, free, void *ptr);
|
||||||
|
INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
|
||||||
|
INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
|
||||||
|
|
||||||
|
# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
|
||||||
|
INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
|
||||||
|
INTERCEPTOR_ALIAS(void, cfree, void *ptr);
|
||||||
|
INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
|
||||||
|
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
|
||||||
|
INTERCEPTOR_ALIAS(void, malloc_stats, void);
|
||||||
|
# endif
|
||||||
|
#endif // #if HWASAN_WITH_INTERCEPTORS
|
|
@ -80,12 +80,29 @@ void GetAllocatorStats(AllocatorStatCounters s) {
|
||||||
allocator.GetStats(s);
|
allocator.GetStats(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uptr GetAliasRegionStart() {
|
||||||
|
#if defined(HWASAN_ALIASING_MODE)
|
||||||
|
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
|
||||||
|
uptr AliasRegionStart =
|
||||||
|
__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
|
||||||
|
|
||||||
|
CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
|
||||||
|
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
||||||
|
CHECK_EQ(
|
||||||
|
(AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
|
||||||
|
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
||||||
|
return AliasRegionStart;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void HwasanAllocatorInit() {
|
void HwasanAllocatorInit() {
|
||||||
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
|
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
|
||||||
!flags()->disable_allocator_tagging);
|
!flags()->disable_allocator_tagging);
|
||||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||||
allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
|
allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
|
||||||
kAliasRegionStart);
|
GetAliasRegionStart());
|
||||||
for (uptr i = 0; i < sizeof(tail_magic); i++)
|
for (uptr i = 0; i < sizeof(tail_magic); i++)
|
||||||
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
|
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
|
||||||
}
|
}
|
||||||
|
@ -196,6 +213,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
||||||
: tagged_ptr;
|
: tagged_ptr;
|
||||||
void *aligned_ptr = reinterpret_cast<void *>(
|
void *aligned_ptr = reinterpret_cast<void *>(
|
||||||
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
|
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
|
||||||
|
tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
|
||||||
Metadata *meta =
|
Metadata *meta =
|
||||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
|
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
|
||||||
uptr orig_size = meta->get_requested_size();
|
uptr orig_size = meta->get_requested_size();
|
||||||
|
@ -229,7 +247,20 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
||||||
flags()->tag_in_free && malloc_bisect(stack, 0) &&
|
flags()->tag_in_free && malloc_bisect(stack, 0) &&
|
||||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
||||||
// Always store full 8-bit tags on free to maximize UAF detection.
|
// Always store full 8-bit tags on free to maximize UAF detection.
|
||||||
tag_t tag = t ? t->GenerateRandomTag(/*num_bits=*/8) : kFallbackFreeTag;
|
tag_t tag;
|
||||||
|
if (t) {
|
||||||
|
// Make sure we are not using a short granule tag as a poison tag. This
|
||||||
|
// would make us attempt to read the memory on a UaF.
|
||||||
|
// The tag can be zero if tagging is disabled on this thread.
|
||||||
|
do {
|
||||||
|
tag = t->GenerateRandomTag(/*num_bits=*/8);
|
||||||
|
} while (
|
||||||
|
UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
|
||||||
|
} else {
|
||||||
|
static_assert(kFallbackFreeTag >= kShadowAlignment,
|
||||||
|
"fallback tag must not be a short granule tag.");
|
||||||
|
tag = kFallbackFreeTag;
|
||||||
|
}
|
||||||
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
|
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
|
||||||
tag);
|
tag);
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#include "hwasan.h"
|
#include "hwasan.h"
|
||||||
#include "hwasan_interface_internal.h"
|
#include "hwasan_interface_internal.h"
|
||||||
|
#include "hwasan_mapping.h"
|
||||||
#include "hwasan_poisoning.h"
|
#include "hwasan_poisoning.h"
|
||||||
#include "sanitizer_common/sanitizer_allocator.h"
|
#include "sanitizer_common/sanitizer_allocator.h"
|
||||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||||
|
@ -58,7 +59,7 @@ static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
|
||||||
struct AP64 {
|
struct AP64 {
|
||||||
static const uptr kSpaceBeg = ~0ULL;
|
static const uptr kSpaceBeg = ~0ULL;
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(HWASAN_ALIASING_MODE)
|
||||||
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
|
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
|
||||||
#else
|
#else
|
||||||
static const uptr kSpaceSize = 0x2000000000ULL;
|
static const uptr kSpaceSize = 0x2000000000ULL;
|
||||||
|
@ -110,11 +111,11 @@ typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
|
||||||
void GetAllocatorStats(AllocatorStatCounters s);
|
void GetAllocatorStats(AllocatorStatCounters s);
|
||||||
|
|
||||||
inline bool InTaggableRegion(uptr addr) {
|
inline bool InTaggableRegion(uptr addr) {
|
||||||
#if defined(__x86_64__)
|
#if defined(HWASAN_ALIASING_MODE)
|
||||||
// Aliases are mapped next to shadow so that the upper bits match the shadow
|
// Aliases are mapped next to shadow so that the upper bits match the shadow
|
||||||
// base.
|
// base.
|
||||||
return (addr >> kTaggableRegionCheckShift) ==
|
return (addr >> kTaggableRegionCheckShift) ==
|
||||||
(__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
(GetShadowOffset() >> kTaggableRegionCheckShift);
|
||||||
#endif
|
#endif
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ namespace __hwasan {
|
||||||
void InitShadowGOT() {}
|
void InitShadowGOT() {}
|
||||||
|
|
||||||
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
|
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
|
||||||
#if defined(__x86_64__)
|
# if defined(HWASAN_ALIASING_MODE)
|
||||||
constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
|
constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
|
||||||
constexpr uptr kNumAliases = 1ULL << kTagBits;
|
constexpr uptr kNumAliases = 1ULL << kTagBits;
|
||||||
return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
|
return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
|
||||||
|
|
192
libsanitizer/hwasan/hwasan_fuchsia.cpp
Normal file
192
libsanitizer/hwasan/hwasan_fuchsia.cpp
Normal file
|
@ -0,0 +1,192 @@
|
||||||
|
//===-- hwasan_fuchsia.cpp --------------------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
|
// See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
///
|
||||||
|
/// \file
|
||||||
|
/// This file is a part of HWAddressSanitizer and contains Fuchsia-specific
|
||||||
|
/// code.
|
||||||
|
///
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "sanitizer_common/sanitizer_fuchsia.h"
|
||||||
|
#if SANITIZER_FUCHSIA
|
||||||
|
|
||||||
|
#include "hwasan.h"
|
||||||
|
#include "hwasan_interface_internal.h"
|
||||||
|
#include "hwasan_report.h"
|
||||||
|
#include "hwasan_thread.h"
|
||||||
|
#include "hwasan_thread_list.h"
|
||||||
|
|
||||||
|
// This TLS variable contains the location of the stack ring buffer and can be
|
||||||
|
// used to always find the hwasan thread object associated with the current
|
||||||
|
// running thread.
|
||||||
|
[[gnu::tls_model("initial-exec")]]
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
THREADLOCAL uptr __hwasan_tls;
|
||||||
|
|
||||||
|
namespace __hwasan {
|
||||||
|
|
||||||
|
bool InitShadow() {
|
||||||
|
__sanitizer::InitShadowBounds();
|
||||||
|
CHECK_NE(__sanitizer::ShadowBounds.shadow_limit, 0);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MemIsApp(uptr p) {
|
||||||
|
CHECK(GetTagFromPointer(p) == 0);
|
||||||
|
return __sanitizer::ShadowBounds.shadow_limit <= p &&
|
||||||
|
p <= (__sanitizer::ShadowBounds.memory_limit - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are known parameters passed to the hwasan runtime on thread creation.
|
||||||
|
struct Thread::InitState {
|
||||||
|
uptr stack_bottom, stack_top;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void FinishThreadInitialization(Thread *thread);
|
||||||
|
|
||||||
|
void InitThreads() {
|
||||||
|
// This is the minimal alignment needed for the storage where hwasan threads
|
||||||
|
// and their stack ring buffers are placed. This alignment is necessary so the
|
||||||
|
// stack ring buffer can perform a simple calculation to get the next element
|
||||||
|
// in the RB. The instructions for this calculation are emitted by the
|
||||||
|
// compiler. (Full explanation in hwasan_thread_list.h.)
|
||||||
|
uptr alloc_size = UINT64_C(1) << kShadowBaseAlignment;
|
||||||
|
uptr thread_start = reinterpret_cast<uptr>(
|
||||||
|
MmapAlignedOrDieOnFatalError(alloc_size, alloc_size, __func__));
|
||||||
|
|
||||||
|
InitThreadList(thread_start, alloc_size);
|
||||||
|
|
||||||
|
// Create the hwasan thread object for the current (main) thread. Stack info
|
||||||
|
// for this thread is known from information passed via
|
||||||
|
// __sanitizer_startup_hook.
|
||||||
|
const Thread::InitState state = {
|
||||||
|
.stack_bottom = __sanitizer::MainThreadStackBase,
|
||||||
|
.stack_top =
|
||||||
|
__sanitizer::MainThreadStackBase + __sanitizer::MainThreadStackSize,
|
||||||
|
};
|
||||||
|
FinishThreadInitialization(hwasanThreadList().CreateCurrentThread(&state));
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
|
||||||
|
|
||||||
|
// This is called from the parent thread before the new thread is created. Here
|
||||||
|
// we can propagate known info like the stack bounds to Thread::Init before
|
||||||
|
// jumping into the thread. We cannot initialize the stack ring buffer yet since
|
||||||
|
// we have not entered the new thread.
|
||||||
|
static void *BeforeThreadCreateHook(uptr user_id, bool detached,
|
||||||
|
const char *name, uptr stack_bottom,
|
||||||
|
uptr stack_size) {
|
||||||
|
const Thread::InitState state = {
|
||||||
|
.stack_bottom = stack_bottom,
|
||||||
|
.stack_top = stack_bottom + stack_size,
|
||||||
|
};
|
||||||
|
return hwasanThreadList().CreateCurrentThread(&state);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This sets the stack top and bottom according to the InitState passed to
|
||||||
|
// CreateCurrentThread above.
|
||||||
|
void Thread::InitStackAndTls(const InitState *state) {
|
||||||
|
CHECK_NE(state->stack_bottom, 0);
|
||||||
|
CHECK_NE(state->stack_top, 0);
|
||||||
|
stack_bottom_ = state->stack_bottom;
|
||||||
|
stack_top_ = state->stack_top;
|
||||||
|
tls_end_ = tls_begin_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is called after creating a new thread with the pointer returned by
|
||||||
|
// BeforeThreadCreateHook. We are still in the creating thread and should check
|
||||||
|
// if it was actually created correctly.
|
||||||
|
static void ThreadCreateHook(void *hook, bool aborted) {
|
||||||
|
Thread *thread = static_cast<Thread *>(hook);
|
||||||
|
if (!aborted) {
|
||||||
|
// The thread was created successfully.
|
||||||
|
// ThreadStartHook can already be running in the new thread.
|
||||||
|
} else {
|
||||||
|
// The thread wasn't created after all.
|
||||||
|
// Clean up everything we set up in BeforeThreadCreateHook.
|
||||||
|
atomic_signal_fence(memory_order_seq_cst);
|
||||||
|
hwasanThreadList().ReleaseThread(thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is called in the newly-created thread before it runs anything else,
|
||||||
|
// with the pointer returned by BeforeThreadCreateHook (above). Here we can
|
||||||
|
// setup the stack ring buffer.
|
||||||
|
static void ThreadStartHook(void *hook, thrd_t self) {
|
||||||
|
Thread *thread = static_cast<Thread *>(hook);
|
||||||
|
FinishThreadInitialization(thread);
|
||||||
|
thread->InitRandomState();
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the function that sets up the stack ring buffer and enables us to use
|
||||||
|
// GetCurrentThread. This function should only be called while IN the thread
|
||||||
|
// that we want to create the hwasan thread object for so __hwasan_tls can be
|
||||||
|
// properly referenced.
|
||||||
|
static void FinishThreadInitialization(Thread *thread) {
|
||||||
|
CHECK_NE(thread, nullptr);
|
||||||
|
|
||||||
|
// The ring buffer is located immediately before the thread object.
|
||||||
|
uptr stack_buffer_size = hwasanThreadList().GetRingBufferSize();
|
||||||
|
uptr stack_buffer_start = reinterpret_cast<uptr>(thread) - stack_buffer_size;
|
||||||
|
thread->InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ThreadExitHook(void *hook, thrd_t self) {
|
||||||
|
Thread *thread = static_cast<Thread *>(hook);
|
||||||
|
atomic_signal_fence(memory_order_seq_cst);
|
||||||
|
hwasanThreadList().ReleaseThread(thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not implemented because Fuchsia does not use signal handlers.
|
||||||
|
void HwasanOnDeadlySignal(int signo, void *info, void *context) {}
|
||||||
|
|
||||||
|
// Not implemented because Fuchsia does not use interceptors.
|
||||||
|
void InitializeInterceptors() {}
|
||||||
|
|
||||||
|
// Not implemented because this is only relevant for Android.
|
||||||
|
void AndroidTestTlsSlot() {}
|
||||||
|
|
||||||
|
// TSD was normally used on linux as a means of calling the hwasan thread exit
|
||||||
|
// handler passed to pthread_key_create. This is not needed on Fuchsia because
|
||||||
|
// we will be using __sanitizer_thread_exit_hook.
|
||||||
|
void HwasanTSDInit() {}
|
||||||
|
void HwasanTSDThreadInit() {}
|
||||||
|
|
||||||
|
// On linux, this just would call `atexit(HwasanAtExit)`. The functions in
|
||||||
|
// HwasanAtExit are unimplemented for Fuchsia and effectively no-ops, so this
|
||||||
|
// function is unneeded.
|
||||||
|
void InstallAtExitHandler() {}
|
||||||
|
|
||||||
|
} // namespace __hwasan
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
|
||||||
|
const char *name, void *stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
|
return __hwasan::BeforeThreadCreateHook(
|
||||||
|
reinterpret_cast<uptr>(thread), detached, name,
|
||||||
|
reinterpret_cast<uptr>(stack_base), stack_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
|
||||||
|
__hwasan::ThreadCreateHook(hook, error != thrd_success);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
|
||||||
|
__hwasan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
|
||||||
|
}
|
||||||
|
|
||||||
|
void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
|
||||||
|
__hwasan::ThreadExitHook(hook, self);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#endif // SANITIZER_FUCHSIA
|
|
@ -16,192 +16,14 @@
|
||||||
|
|
||||||
#include "interception/interception.h"
|
#include "interception/interception.h"
|
||||||
#include "hwasan.h"
|
#include "hwasan.h"
|
||||||
#include "hwasan_allocator.h"
|
|
||||||
#include "hwasan_mapping.h"
|
|
||||||
#include "hwasan_thread.h"
|
#include "hwasan_thread.h"
|
||||||
#include "hwasan_poisoning.h"
|
|
||||||
#include "hwasan_report.h"
|
|
||||||
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
|
|
||||||
#include "sanitizer_common/sanitizer_allocator.h"
|
|
||||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
|
||||||
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
|
||||||
#include "sanitizer_common/sanitizer_atomic.h"
|
|
||||||
#include "sanitizer_common/sanitizer_common.h"
|
|
||||||
#include "sanitizer_common/sanitizer_errno.h"
|
|
||||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||||
#include "sanitizer_common/sanitizer_libc.h"
|
|
||||||
#include "sanitizer_common/sanitizer_linux.h"
|
|
||||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
|
||||||
|
|
||||||
#include <stdarg.h>
|
#if !SANITIZER_FUCHSIA
|
||||||
// ACHTUNG! No other system header includes in this file.
|
|
||||||
// Ideally, we should get rid of stdarg.h as well.
|
|
||||||
|
|
||||||
using namespace __hwasan;
|
using namespace __hwasan;
|
||||||
|
|
||||||
using __sanitizer::memory_order;
|
|
||||||
using __sanitizer::atomic_load;
|
|
||||||
using __sanitizer::atomic_store;
|
|
||||||
using __sanitizer::atomic_uintptr_t;
|
|
||||||
|
|
||||||
static uptr allocated_for_dlsym;
|
|
||||||
static const uptr kDlsymAllocPoolSize = 1024;
|
|
||||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
|
||||||
|
|
||||||
static bool IsInDlsymAllocPool(const void *ptr) {
|
|
||||||
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
|
||||||
return off < sizeof(alloc_memory_for_dlsym);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
|
||||||
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
|
||||||
void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
|
||||||
allocated_for_dlsym += size_in_words;
|
|
||||||
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
|
||||||
return mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ENSURE_HWASAN_INITED() do { \
|
|
||||||
CHECK(!hwasan_init_is_running); \
|
|
||||||
if (!hwasan_inited) { \
|
|
||||||
__hwasan_init(); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
|
|
||||||
int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
CHECK_NE(memptr, 0);
|
|
||||||
int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_memalign(uptr alignment, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
return hwasan_memalign(alignment, size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_aligned_alloc(uptr alignment, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
return hwasan_aligned_alloc(alignment, size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer___libc_memalign(uptr alignment, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
void *ptr = hwasan_memalign(alignment, size, &stack);
|
|
||||||
if (ptr)
|
|
||||||
DTLS_on_libc_memalign(ptr, size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_valloc(uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
return hwasan_valloc(size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_pvalloc(uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
return hwasan_pvalloc(size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_free(void *ptr) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
|
|
||||||
hwasan_free(ptr, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_cfree(void *ptr) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
|
|
||||||
hwasan_free(ptr, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr __sanitizer_malloc_usable_size(const void *ptr) {
|
|
||||||
return __sanitizer_get_allocated_size(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
|
|
||||||
__sanitizer_struct_mallinfo sret;
|
|
||||||
internal_memset(&sret, 0, sizeof(sret));
|
|
||||||
return sret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sanitizer_mallopt(int cmd, int value) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sanitizer_malloc_stats(void) {
|
|
||||||
// FIXME: implement, but don't call REAL(malloc_stats)!
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_calloc(uptr nmemb, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
if (UNLIKELY(!hwasan_inited))
|
|
||||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
|
||||||
return AllocateFromLocalPool(nmemb * size);
|
|
||||||
return hwasan_calloc(nmemb, size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_realloc(void *ptr, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
|
||||||
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
|
||||||
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
|
||||||
void *new_ptr;
|
|
||||||
if (UNLIKELY(!hwasan_inited)) {
|
|
||||||
new_ptr = AllocateFromLocalPool(copy_size);
|
|
||||||
} else {
|
|
||||||
copy_size = size;
|
|
||||||
new_ptr = hwasan_malloc(copy_size, &stack);
|
|
||||||
}
|
|
||||||
internal_memcpy(new_ptr, ptr, copy_size);
|
|
||||||
return new_ptr;
|
|
||||||
}
|
|
||||||
return hwasan_realloc(ptr, size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
return hwasan_reallocarray(ptr, nmemb, size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * __sanitizer_malloc(uptr size) {
|
|
||||||
GET_MALLOC_STACK_TRACE;
|
|
||||||
if (UNLIKELY(!hwasan_init_is_running))
|
|
||||||
ENSURE_HWASAN_INITED();
|
|
||||||
if (UNLIKELY(!hwasan_inited))
|
|
||||||
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
|
||||||
return AllocateFromLocalPool(size);
|
|
||||||
return hwasan_malloc(size, &stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if HWASAN_WITH_INTERCEPTORS
|
#if HWASAN_WITH_INTERCEPTORS
|
||||||
#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
|
|
||||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
|
|
||||||
ALIAS("__sanitizer_" #FN); \
|
|
||||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
|
|
||||||
ARGS) ALIAS("__sanitizer_" #FN)
|
|
||||||
|
|
||||||
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
|
|
||||||
SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void, free, void *ptr);
|
|
||||||
INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
|
|
||||||
INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
|
|
||||||
|
|
||||||
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
|
|
||||||
INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
|
|
||||||
INTERCEPTOR_ALIAS(void, cfree, void *ptr);
|
|
||||||
INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
|
|
||||||
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
|
|
||||||
INTERCEPTOR_ALIAS(void, malloc_stats, void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct ThreadStartArg {
|
struct ThreadStartArg {
|
||||||
thread_callback_t callback;
|
thread_callback_t callback;
|
||||||
|
@ -346,3 +168,5 @@ void InitializeInterceptors() {
|
||||||
inited = 1;
|
inited = 1;
|
||||||
}
|
}
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
||||||
|
#endif // #if !SANITIZER_FUCHSIA
|
||||||
|
|
|
@ -69,15 +69,9 @@ static void ProtectGap(uptr addr, uptr size) {
|
||||||
|
|
||||||
uptr kLowMemStart;
|
uptr kLowMemStart;
|
||||||
uptr kLowMemEnd;
|
uptr kLowMemEnd;
|
||||||
uptr kLowShadowEnd;
|
|
||||||
uptr kLowShadowStart;
|
|
||||||
uptr kHighShadowStart;
|
|
||||||
uptr kHighShadowEnd;
|
|
||||||
uptr kHighMemStart;
|
uptr kHighMemStart;
|
||||||
uptr kHighMemEnd;
|
uptr kHighMemEnd;
|
||||||
|
|
||||||
uptr kAliasRegionStart; // Always 0 on non-x86.
|
|
||||||
|
|
||||||
static void PrintRange(uptr start, uptr end, const char *name) {
|
static void PrintRange(uptr start, uptr end, const char *name) {
|
||||||
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
|
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
|
||||||
}
|
}
|
||||||
|
@ -116,7 +110,7 @@ static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
|
||||||
FindDynamicShadowStart(shadow_size_bytes);
|
FindDynamicShadowStart(shadow_size_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitPrctl() {
|
void InitializeOsSupport() {
|
||||||
#define PR_SET_TAGGED_ADDR_CTRL 55
|
#define PR_SET_TAGGED_ADDR_CTRL 55
|
||||||
#define PR_GET_TAGGED_ADDR_CTRL 56
|
#define PR_GET_TAGGED_ADDR_CTRL 56
|
||||||
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
||||||
|
@ -125,7 +119,7 @@ void InitPrctl() {
|
||||||
if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
|
if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
|
||||||
&local_errno) &&
|
&local_errno) &&
|
||||||
local_errno == EINVAL) {
|
local_errno == EINVAL) {
|
||||||
#if SANITIZER_ANDROID || defined(__x86_64__)
|
# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
|
||||||
// Some older Android kernels have the tagged pointer ABI on
|
// Some older Android kernels have the tagged pointer ABI on
|
||||||
// unconditionally, and hence don't have the tagged-addr prctl while still
|
// unconditionally, and hence don't have the tagged-addr prctl while still
|
||||||
// allow the ABI.
|
// allow the ABI.
|
||||||
|
@ -145,14 +139,31 @@ void InitPrctl() {
|
||||||
// Turn on the tagged address ABI.
|
// Turn on the tagged address ABI.
|
||||||
if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
|
if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
|
||||||
PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
|
PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
|
||||||
!internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &&
|
!internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
|
||||||
flags()->fail_without_syscall_abi) {
|
# if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
|
||||||
|
// Try the new prctl API for Intel LAM. The API is based on a currently
|
||||||
|
// unsubmitted patch to the Linux kernel (as of May 2021) and is thus
|
||||||
|
// subject to change. Patch is here:
|
||||||
|
// https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
|
||||||
|
int tag_bits = kTagBits;
|
||||||
|
int tag_shift = kAddressTagShift;
|
||||||
|
if (!internal_iserror(
|
||||||
|
internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
|
||||||
|
reinterpret_cast<unsigned long>(&tag_bits),
|
||||||
|
reinterpret_cast<unsigned long>(&tag_shift), 0))) {
|
||||||
|
CHECK_EQ(tag_bits, kTagBits);
|
||||||
|
CHECK_EQ(tag_shift, kAddressTagShift);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
# endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
|
||||||
|
if (flags()->fail_without_syscall_abi) {
|
||||||
Printf(
|
Printf(
|
||||||
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
|
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
|
||||||
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
|
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
|
||||||
"configuration.\n");
|
"configuration.\n");
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#undef PR_SET_TAGGED_ADDR_CTRL
|
#undef PR_SET_TAGGED_ADDR_CTRL
|
||||||
#undef PR_GET_TAGGED_ADDR_CTRL
|
#undef PR_GET_TAGGED_ADDR_CTRL
|
||||||
#undef PR_TAGGED_ADDR_ENABLE
|
#undef PR_TAGGED_ADDR_ENABLE
|
||||||
|
@ -181,18 +192,6 @@ bool InitShadow() {
|
||||||
// High memory starts where allocated shadow allows.
|
// High memory starts where allocated shadow allows.
|
||||||
kHighMemStart = ShadowToMem(kHighShadowStart);
|
kHighMemStart = ShadowToMem(kHighShadowStart);
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
|
||||||
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
|
|
||||||
kAliasRegionStart =
|
|
||||||
__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
|
|
||||||
|
|
||||||
CHECK_EQ(kAliasRegionStart >> kTaggableRegionCheckShift,
|
|
||||||
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
|
||||||
CHECK_EQ(
|
|
||||||
(kAliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
|
|
||||||
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Check the sanity of the defined memory ranges (there might be gaps).
|
// Check the sanity of the defined memory ranges (there might be gaps).
|
||||||
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
|
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
|
||||||
CHECK_GT(kHighMemStart, kHighShadowEnd);
|
CHECK_GT(kHighMemStart, kHighShadowEnd);
|
||||||
|
@ -233,25 +232,16 @@ void InitThreads() {
|
||||||
ProtectGap(thread_space_end,
|
ProtectGap(thread_space_end,
|
||||||
__hwasan_shadow_memory_dynamic_address - thread_space_end);
|
__hwasan_shadow_memory_dynamic_address - thread_space_end);
|
||||||
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
|
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
|
||||||
|
hwasanThreadList().CreateCurrentThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemIsApp(uptr p) {
|
bool MemIsApp(uptr p) {
|
||||||
#if !defined(__x86_64__) // Memory outside the alias range has non-zero tags.
|
// Memory outside the alias range has non-zero tags.
|
||||||
|
# if !defined(HWASAN_ALIASING_MODE)
|
||||||
CHECK(GetTagFromPointer(p) == 0);
|
CHECK(GetTagFromPointer(p) == 0);
|
||||||
# endif
|
# endif
|
||||||
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void HwasanAtExit(void) {
|
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
|
||||||
if (common_flags()->print_module_map)
|
|
||||||
DumpProcessMap();
|
|
||||||
if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
|
|
||||||
ReportStats();
|
|
||||||
if (hwasan_report_count > 0) {
|
|
||||||
// ReportAtExitStatistics();
|
|
||||||
if (common_flags()->exitcode)
|
|
||||||
internal__exit(common_flags()->exitcode);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstallAtExitHandler() {
|
void InstallAtExitHandler() {
|
||||||
|
@ -330,22 +320,6 @@ void AndroidTestTlsSlot() {
|
||||||
void AndroidTestTlsSlot() {}
|
void AndroidTestTlsSlot() {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Thread *GetCurrentThread() {
|
|
||||||
uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
|
|
||||||
if (UNLIKELY(*ThreadLongPtr == 0))
|
|
||||||
return nullptr;
|
|
||||||
auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
|
|
||||||
return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
|
|
||||||
}
|
|
||||||
|
|
||||||
struct AccessInfo {
|
|
||||||
uptr addr;
|
|
||||||
uptr size;
|
|
||||||
bool is_store;
|
|
||||||
bool is_load;
|
|
||||||
bool recover;
|
|
||||||
};
|
|
||||||
|
|
||||||
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
|
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
|
||||||
// Access type is passed in a platform dependent way (see below) and encoded
|
// Access type is passed in a platform dependent way (see below) and encoded
|
||||||
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
|
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
|
||||||
|
@ -396,28 +370,6 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
|
||||||
return AccessInfo{addr, size, is_store, !is_store, recover};
|
return AccessInfo{addr, size, is_store, !is_store, recover};
|
||||||
}
|
}
|
||||||
|
|
||||||
static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
|
|
||||||
ucontext_t *uc, uptr *registers_frame = nullptr) {
|
|
||||||
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
|
|
||||||
BufferedStackTrace *stack = stack_buffer.data();
|
|
||||||
stack->Reset();
|
|
||||||
stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
|
|
||||||
|
|
||||||
// The second stack frame contains the failure __hwasan_check function, as
|
|
||||||
// we have a stack frame for the registers saved in __hwasan_tag_mismatch that
|
|
||||||
// we wish to ignore. This (currently) only occurs on AArch64, as x64
|
|
||||||
// implementations use SIGTRAP to implement the failure, and thus do not go
|
|
||||||
// through the stack saver.
|
|
||||||
if (registers_frame && stack->trace && stack->size > 0) {
|
|
||||||
stack->trace++;
|
|
||||||
stack->size--;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool fatal = flags()->halt_on_error || !ai.recover;
|
|
||||||
ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
|
|
||||||
registers_frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
|
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
|
||||||
AccessInfo ai = GetAccessInfo(info, uc);
|
AccessInfo ai = GetAccessInfo(info, uc);
|
||||||
if (!ai.is_store && !ai.is_load)
|
if (!ai.is_store && !ai.is_load)
|
||||||
|
@ -450,27 +402,39 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context) {
|
||||||
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
|
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Thread::InitStackAndTls(const InitState *) {
|
||||||
|
uptr tls_size;
|
||||||
|
uptr stack_size;
|
||||||
|
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
|
||||||
|
&tls_size);
|
||||||
|
stack_top_ = stack_bottom_ + stack_size;
|
||||||
|
tls_end_ = tls_begin_ + tls_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
|
||||||
|
CHECK(IsAligned(p, kShadowAlignment));
|
||||||
|
CHECK(IsAligned(size, kShadowAlignment));
|
||||||
|
uptr shadow_start = MemToShadow(p);
|
||||||
|
uptr shadow_size = MemToShadowSize(size);
|
||||||
|
|
||||||
|
uptr page_size = GetPageSizeCached();
|
||||||
|
uptr page_start = RoundUpTo(shadow_start, page_size);
|
||||||
|
uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
|
||||||
|
uptr threshold = common_flags()->clear_shadow_mmap_threshold;
|
||||||
|
if (SANITIZER_LINUX &&
|
||||||
|
UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
|
||||||
|
internal_memset((void *)shadow_start, tag, page_start - shadow_start);
|
||||||
|
internal_memset((void *)page_end, tag,
|
||||||
|
shadow_start + shadow_size - page_end);
|
||||||
|
// For an anonymous private mapping MADV_DONTNEED will return a zero page on
|
||||||
|
// Linux.
|
||||||
|
ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
|
||||||
|
} else {
|
||||||
|
internal_memset((void *)shadow_start, tag, shadow_size);
|
||||||
|
}
|
||||||
|
return AddTagToPointer(p, tag);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
||||||
// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
|
|
||||||
// rest of the mismatch handling code (C++).
|
|
||||||
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
|
|
||||||
size_t outsize) {
|
|
||||||
__hwasan::AccessInfo ai;
|
|
||||||
ai.is_store = access_info & 0x10;
|
|
||||||
ai.is_load = !ai.is_store;
|
|
||||||
ai.recover = access_info & 0x20;
|
|
||||||
ai.addr = addr;
|
|
||||||
if ((access_info & 0xf) == 0xf)
|
|
||||||
ai.size = outsize;
|
|
||||||
else
|
|
||||||
ai.size = 1 << (access_info & 0xf);
|
|
||||||
|
|
||||||
__hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
|
|
||||||
(uptr)__builtin_frame_address(0), nullptr,
|
|
||||||
registers_frame);
|
|
||||||
__builtin_unreachable();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
||||||
|
|
|
@ -48,14 +48,14 @@ extern uptr kHighShadowEnd;
|
||||||
extern uptr kHighMemStart;
|
extern uptr kHighMemStart;
|
||||||
extern uptr kHighMemEnd;
|
extern uptr kHighMemEnd;
|
||||||
|
|
||||||
extern uptr kAliasRegionStart;
|
inline uptr GetShadowOffset() {
|
||||||
|
return SANITIZER_FUCHSIA ? 0 : __hwasan_shadow_memory_dynamic_address;
|
||||||
|
}
|
||||||
inline uptr MemToShadow(uptr untagged_addr) {
|
inline uptr MemToShadow(uptr untagged_addr) {
|
||||||
return (untagged_addr >> kShadowScale) +
|
return (untagged_addr >> kShadowScale) + GetShadowOffset();
|
||||||
__hwasan_shadow_memory_dynamic_address;
|
|
||||||
}
|
}
|
||||||
inline uptr ShadowToMem(uptr shadow_addr) {
|
inline uptr ShadowToMem(uptr shadow_addr) {
|
||||||
return (shadow_addr - __hwasan_shadow_memory_dynamic_address) << kShadowScale;
|
return (shadow_addr - GetShadowOffset()) << kShadowScale;
|
||||||
}
|
}
|
||||||
inline uptr MemToShadowSize(uptr size) {
|
inline uptr MemToShadowSize(uptr size) {
|
||||||
return size >> kShadowScale;
|
return size >> kShadowScale;
|
||||||
|
@ -63,6 +63,13 @@ inline uptr MemToShadowSize(uptr size) {
|
||||||
|
|
||||||
bool MemIsApp(uptr p);
|
bool MemIsApp(uptr p);
|
||||||
|
|
||||||
|
inline bool MemIsShadow(uptr p) {
|
||||||
|
return (kLowShadowStart <= p && p <= kLowShadowEnd) ||
|
||||||
|
(kHighShadowStart <= p && p <= kHighShadowEnd);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetAliasRegionStart();
|
||||||
|
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
||||||
#endif // HWASAN_MAPPING_H
|
#endif // HWASAN_MAPPING_H
|
||||||
|
|
|
@ -56,7 +56,6 @@ using namespace __hwasan;
|
||||||
// Fake std::nothrow_t to avoid including <new>.
|
// Fake std::nothrow_t to avoid including <new>.
|
||||||
namespace std {
|
namespace std {
|
||||||
struct nothrow_t {};
|
struct nothrow_t {};
|
||||||
enum class align_val_t : size_t {};
|
|
||||||
} // namespace std
|
} // namespace std
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,6 +72,32 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||||
void *operator new[](size_t size, std::nothrow_t const&) {
|
void *operator new[](size_t size, std::nothrow_t const&) {
|
||||||
OPERATOR_NEW_BODY(true /*nothrow*/);
|
OPERATOR_NEW_BODY(true /*nothrow*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(void *ptr)
|
||||||
|
NOEXCEPT {
|
||||||
|
OPERATOR_DELETE_BODY;
|
||||||
|
}
|
||||||
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||||
|
void *ptr) NOEXCEPT {
|
||||||
|
OPERATOR_DELETE_BODY;
|
||||||
|
}
|
||||||
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||||
|
void *ptr, std::nothrow_t const &) {
|
||||||
|
OPERATOR_DELETE_BODY;
|
||||||
|
}
|
||||||
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||||
|
void *ptr, std::nothrow_t const &) {
|
||||||
|
OPERATOR_DELETE_BODY;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // OPERATOR_NEW_BODY
|
||||||
|
|
||||||
|
#ifdef OPERATOR_NEW_ALIGN_BODY
|
||||||
|
|
||||||
|
namespace std {
|
||||||
|
enum class align_val_t : size_t {};
|
||||||
|
} // namespace std
|
||||||
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
|
||||||
size_t size, std::align_val_t align) {
|
size_t size, std::align_val_t align) {
|
||||||
OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
|
OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
|
||||||
|
@ -90,16 +115,6 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
|
||||||
OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
|
OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
||||||
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
||||||
void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
||||||
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
||||||
void operator delete[](void *ptr, std::nothrow_t const&) {
|
|
||||||
OPERATOR_DELETE_BODY;
|
|
||||||
}
|
|
||||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||||
void *ptr, std::align_val_t align) NOEXCEPT {
|
void *ptr, std::align_val_t align) NOEXCEPT {
|
||||||
OPERATOR_DELETE_BODY;
|
OPERATOR_DELETE_BODY;
|
||||||
|
@ -117,4 +132,4 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||||
OPERATOR_DELETE_BODY;
|
OPERATOR_DELETE_BODY;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // OPERATOR_NEW_BODY
|
#endif // OPERATOR_NEW_ALIGN_BODY
|
||||||
|
|
|
@ -19,30 +19,6 @@
|
||||||
|
|
||||||
namespace __hwasan {
|
namespace __hwasan {
|
||||||
|
|
||||||
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
|
|
||||||
CHECK(IsAligned(p, kShadowAlignment));
|
|
||||||
CHECK(IsAligned(size, kShadowAlignment));
|
|
||||||
uptr shadow_start = MemToShadow(p);
|
|
||||||
uptr shadow_size = MemToShadowSize(size);
|
|
||||||
|
|
||||||
uptr page_size = GetPageSizeCached();
|
|
||||||
uptr page_start = RoundUpTo(shadow_start, page_size);
|
|
||||||
uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
|
|
||||||
uptr threshold = common_flags()->clear_shadow_mmap_threshold;
|
|
||||||
if (SANITIZER_LINUX &&
|
|
||||||
UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
|
|
||||||
internal_memset((void *)shadow_start, tag, page_start - shadow_start);
|
|
||||||
internal_memset((void *)page_end, tag,
|
|
||||||
shadow_start + shadow_size - page_end);
|
|
||||||
// For an anonymous private mapping MADV_DONTNEED will return a zero page on
|
|
||||||
// Linux.
|
|
||||||
ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
|
|
||||||
} else {
|
|
||||||
internal_memset((void *)shadow_start, tag, shadow_size);
|
|
||||||
}
|
|
||||||
return AddTagToPointer(p, tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr TagMemory(uptr p, uptr size, tag_t tag) {
|
uptr TagMemory(uptr p, uptr size, tag_t tag) {
|
||||||
uptr start = RoundDownTo(p, kShadowAlignment);
|
uptr start = RoundDownTo(p, kShadowAlignment);
|
||||||
uptr end = RoundUpTo(p + size, kShadowAlignment);
|
uptr end = RoundUpTo(p + size, kShadowAlignment);
|
||||||
|
|
|
@ -236,12 +236,12 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
|
||||||
frame_desc.append(" record_addr:0x%zx record:0x%zx",
|
frame_desc.append(" record_addr:0x%zx record:0x%zx",
|
||||||
reinterpret_cast<uptr>(record_addr), record);
|
reinterpret_cast<uptr>(record_addr), record);
|
||||||
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
|
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
|
||||||
RenderFrame(&frame_desc, " %F %L\n", 0, frame->info.address, &frame->info,
|
RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
|
||||||
common_flags()->symbolize_vs_style,
|
common_flags()->symbolize_vs_style,
|
||||||
common_flags()->strip_path_prefix);
|
common_flags()->strip_path_prefix);
|
||||||
frame->ClearAll();
|
frame->ClearAll();
|
||||||
}
|
}
|
||||||
Printf("%s", frame_desc.data());
|
Printf("%s\n", frame_desc.data());
|
||||||
frame_desc.clear();
|
frame_desc.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,6 +296,75 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
|
||||||
|
tag_t *left, tag_t *right) {
|
||||||
|
Decorator d;
|
||||||
|
uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
|
||||||
|
HwasanChunkView chunk = FindHeapChunkByAddress(mem);
|
||||||
|
if (chunk.IsAllocated()) {
|
||||||
|
uptr offset;
|
||||||
|
const char *whence;
|
||||||
|
if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
|
||||||
|
offset = untagged_addr - chunk.Beg();
|
||||||
|
whence = "inside";
|
||||||
|
} else if (candidate == left) {
|
||||||
|
offset = untagged_addr - chunk.End();
|
||||||
|
whence = "to the right of";
|
||||||
|
} else {
|
||||||
|
offset = chunk.Beg() - untagged_addr;
|
||||||
|
whence = "to the left of";
|
||||||
|
}
|
||||||
|
Printf("%s", d.Error());
|
||||||
|
Printf("\nCause: heap-buffer-overflow\n");
|
||||||
|
Printf("%s", d.Default());
|
||||||
|
Printf("%s", d.Location());
|
||||||
|
Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
|
||||||
|
untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
|
||||||
|
chunk.End());
|
||||||
|
Printf("%s", d.Allocation());
|
||||||
|
Printf("allocated here:\n");
|
||||||
|
Printf("%s", d.Default());
|
||||||
|
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Check whether the address points into a loaded library. If so, this is
|
||||||
|
// most likely a global variable.
|
||||||
|
const char *module_name;
|
||||||
|
uptr module_address;
|
||||||
|
Symbolizer *sym = Symbolizer::GetOrInit();
|
||||||
|
if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
|
||||||
|
Printf("%s", d.Error());
|
||||||
|
Printf("\nCause: global-overflow\n");
|
||||||
|
Printf("%s", d.Default());
|
||||||
|
DataInfo info;
|
||||||
|
Printf("%s", d.Location());
|
||||||
|
if (sym->SymbolizeData(mem, &info) && info.start) {
|
||||||
|
Printf(
|
||||||
|
"%p is located %zd bytes to the %s of %zd-byte global variable "
|
||||||
|
"%s [%p,%p) in %s\n",
|
||||||
|
untagged_addr,
|
||||||
|
candidate == left ? untagged_addr - (info.start + info.size)
|
||||||
|
: info.start - untagged_addr,
|
||||||
|
candidate == left ? "right" : "left", info.size, info.name,
|
||||||
|
info.start, info.start + info.size, module_name);
|
||||||
|
} else {
|
||||||
|
uptr size = GetGlobalSizeFromDescriptor(mem);
|
||||||
|
if (size == 0)
|
||||||
|
// We couldn't find the size of the global from the descriptors.
|
||||||
|
Printf("%p is located to the %s of a global variable in (%s+0x%x)\n",
|
||||||
|
untagged_addr, candidate == left ? "right" : "left", module_name,
|
||||||
|
module_address);
|
||||||
|
else
|
||||||
|
Printf(
|
||||||
|
"%p is located to the %s of a %zd-byte global variable in "
|
||||||
|
"(%s+0x%x)\n",
|
||||||
|
untagged_addr, candidate == left ? "right" : "left", size,
|
||||||
|
module_name, module_address);
|
||||||
|
}
|
||||||
|
Printf("%s", d.Default());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void PrintAddressDescription(
|
void PrintAddressDescription(
|
||||||
uptr tagged_addr, uptr access_size,
|
uptr tagged_addr, uptr access_size,
|
||||||
StackAllocationsRingBuffer *current_stack_allocations) {
|
StackAllocationsRingBuffer *current_stack_allocations) {
|
||||||
|
@ -317,78 +386,59 @@ void PrintAddressDescription(
|
||||||
d.Default());
|
d.Default());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tag_t addr_tag = GetTagFromPointer(tagged_addr);
|
||||||
|
|
||||||
|
bool on_stack = false;
|
||||||
|
// Check stack first. If the address is on the stack of a live thread, we
|
||||||
|
// know it cannot be a heap / global overflow.
|
||||||
|
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
|
||||||
|
if (t->AddrIsInStack(untagged_addr)) {
|
||||||
|
on_stack = true;
|
||||||
|
// TODO(fmayer): figure out how to distinguish use-after-return and
|
||||||
|
// stack-buffer-overflow.
|
||||||
|
Printf("%s", d.Error());
|
||||||
|
Printf("\nCause: stack tag-mismatch\n");
|
||||||
|
Printf("%s", d.Location());
|
||||||
|
Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
|
||||||
|
t->unique_id());
|
||||||
|
Printf("%s", d.Default());
|
||||||
|
t->Announce();
|
||||||
|
|
||||||
|
auto *sa = (t == GetCurrentThread() && current_stack_allocations)
|
||||||
|
? current_stack_allocations
|
||||||
|
: t->stack_allocations();
|
||||||
|
PrintStackAllocations(sa, addr_tag, untagged_addr);
|
||||||
|
num_descriptions_printed++;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Check if this looks like a heap buffer overflow by scanning
|
// Check if this looks like a heap buffer overflow by scanning
|
||||||
// the shadow left and right and looking for the first adjacent
|
// the shadow left and right and looking for the first adjacent
|
||||||
// object with a different memory tag. If that tag matches addr_tag,
|
// object with a different memory tag. If that tag matches addr_tag,
|
||||||
// check the allocator if it has a live chunk there.
|
// check the allocator if it has a live chunk there.
|
||||||
tag_t addr_tag = GetTagFromPointer(tagged_addr);
|
|
||||||
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
|
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
|
||||||
tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
|
tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
|
||||||
for (int i = 0; i < 1000; i++) {
|
uptr candidate_distance = 0;
|
||||||
if (TagsEqual(addr_tag, left)) {
|
for (; candidate_distance < 1000; candidate_distance++) {
|
||||||
|
if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
|
||||||
|
TagsEqual(addr_tag, left)) {
|
||||||
candidate = left;
|
candidate = left;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
--left;
|
--left;
|
||||||
if (TagsEqual(addr_tag, right)) {
|
if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
|
||||||
|
TagsEqual(addr_tag, right)) {
|
||||||
candidate = right;
|
candidate = right;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
++right;
|
++right;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (candidate) {
|
constexpr auto kCloseCandidateDistance = 1;
|
||||||
uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
|
|
||||||
HwasanChunkView chunk = FindHeapChunkByAddress(mem);
|
if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
|
||||||
if (chunk.IsAllocated()) {
|
ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
|
||||||
Printf("%s", d.Location());
|
|
||||||
Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
|
|
||||||
untagged_addr,
|
|
||||||
candidate == left ? untagged_addr - chunk.End()
|
|
||||||
: chunk.Beg() - untagged_addr,
|
|
||||||
candidate == left ? "right" : "left", chunk.UsedSize(),
|
|
||||||
chunk.Beg(), chunk.End());
|
|
||||||
Printf("%s", d.Allocation());
|
|
||||||
Printf("allocated here:\n");
|
|
||||||
Printf("%s", d.Default());
|
|
||||||
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
|
|
||||||
num_descriptions_printed++;
|
num_descriptions_printed++;
|
||||||
} else {
|
|
||||||
// Check whether the address points into a loaded library. If so, this is
|
|
||||||
// most likely a global variable.
|
|
||||||
const char *module_name;
|
|
||||||
uptr module_address;
|
|
||||||
Symbolizer *sym = Symbolizer::GetOrInit();
|
|
||||||
if (sym->GetModuleNameAndOffsetForPC(mem, &module_name,
|
|
||||||
&module_address)) {
|
|
||||||
DataInfo info;
|
|
||||||
if (sym->SymbolizeData(mem, &info) && info.start) {
|
|
||||||
Printf(
|
|
||||||
"%p is located %zd bytes to the %s of %zd-byte global variable "
|
|
||||||
"%s [%p,%p) in %s\n",
|
|
||||||
untagged_addr,
|
|
||||||
candidate == left ? untagged_addr - (info.start + info.size)
|
|
||||||
: info.start - untagged_addr,
|
|
||||||
candidate == left ? "right" : "left", info.size, info.name,
|
|
||||||
info.start, info.start + info.size, module_name);
|
|
||||||
} else {
|
|
||||||
uptr size = GetGlobalSizeFromDescriptor(mem);
|
|
||||||
if (size == 0)
|
|
||||||
// We couldn't find the size of the global from the descriptors.
|
|
||||||
Printf(
|
|
||||||
"%p is located to the %s of a global variable in (%s+0x%x)\n",
|
|
||||||
untagged_addr, candidate == left ? "right" : "left",
|
|
||||||
module_name, module_address);
|
|
||||||
else
|
|
||||||
Printf(
|
|
||||||
"%p is located to the %s of a %zd-byte global variable in "
|
|
||||||
"(%s+0x%x)\n",
|
|
||||||
untagged_addr, candidate == left ? "right" : "left", size,
|
|
||||||
module_name, module_address);
|
|
||||||
}
|
|
||||||
num_descriptions_printed++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
|
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
|
||||||
|
@ -398,6 +448,8 @@ void PrintAddressDescription(
|
||||||
if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
|
if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
|
||||||
&ring_index, &num_matching_addrs,
|
&ring_index, &num_matching_addrs,
|
||||||
&num_matching_addrs_4b)) {
|
&num_matching_addrs_4b)) {
|
||||||
|
Printf("%s", d.Error());
|
||||||
|
Printf("\nCause: use-after-free\n");
|
||||||
Printf("%s", d.Location());
|
Printf("%s", d.Location());
|
||||||
Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
|
Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
|
||||||
untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
|
untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
|
||||||
|
@ -424,22 +476,12 @@ void PrintAddressDescription(
|
||||||
t->Announce();
|
t->Announce();
|
||||||
num_descriptions_printed++;
|
num_descriptions_printed++;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Very basic check for stack memory.
|
if (candidate && num_descriptions_printed == 0) {
|
||||||
if (t->AddrIsInStack(untagged_addr)) {
|
ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
|
||||||
Printf("%s", d.Location());
|
|
||||||
Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
|
|
||||||
t->unique_id());
|
|
||||||
Printf("%s", d.Default());
|
|
||||||
t->Announce();
|
|
||||||
|
|
||||||
auto *sa = (t == GetCurrentThread() && current_stack_allocations)
|
|
||||||
? current_stack_allocations
|
|
||||||
: t->stack_allocations();
|
|
||||||
PrintStackAllocations(sa, addr_tag, untagged_addr);
|
|
||||||
num_descriptions_printed++;
|
num_descriptions_printed++;
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
// Print the remaining threads, as an extra information, 1 line per thread.
|
// Print the remaining threads, as an extra information, 1 line per thread.
|
||||||
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
|
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
|
||||||
|
@ -447,6 +489,12 @@ void PrintAddressDescription(
|
||||||
if (!num_descriptions_printed)
|
if (!num_descriptions_printed)
|
||||||
// We exhausted our possibilities. Bail out.
|
// We exhausted our possibilities. Bail out.
|
||||||
Printf("HWAddressSanitizer can not describe address in more detail.\n");
|
Printf("HWAddressSanitizer can not describe address in more detail.\n");
|
||||||
|
if (num_descriptions_printed > 1) {
|
||||||
|
Printf(
|
||||||
|
"There are %d potential causes, printed above in order "
|
||||||
|
"of likeliness.\n",
|
||||||
|
num_descriptions_printed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReportStats() {}
|
void ReportStats() {}
|
||||||
|
@ -538,6 +586,12 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
|
||||||
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
|
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
|
||||||
bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
|
bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
|
||||||
Printf("\n%s", d.Default());
|
Printf("\n%s", d.Default());
|
||||||
|
Printf(
|
||||||
|
"Stack of invalid access unknown. Issue detected at deallocation "
|
||||||
|
"time.\n");
|
||||||
|
Printf("%s", d.Allocation());
|
||||||
|
Printf("deallocated here:\n");
|
||||||
|
Printf("%s", d.Default());
|
||||||
stack->Print();
|
stack->Print();
|
||||||
HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
|
HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
|
||||||
if (chunk.Beg()) {
|
if (chunk.Beg()) {
|
||||||
|
@ -657,8 +711,10 @@ void ReportRegisters(uptr *frame, uptr pc) {
|
||||||
frame[20], frame[21], frame[22], frame[23]);
|
frame[20], frame[21], frame[22], frame[23]);
|
||||||
Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
|
Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
|
||||||
frame[24], frame[25], frame[26], frame[27]);
|
frame[24], frame[25], frame[26], frame[27]);
|
||||||
Printf(" x28 %016llx x29 %016llx x30 %016llx\n",
|
// hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
|
||||||
frame[28], frame[29], frame[30]);
|
// passes it to this function.
|
||||||
|
Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
|
||||||
|
frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __hwasan
|
} // namespace __hwasan
|
||||||
|
|
|
@ -34,7 +34,8 @@ void Thread::InitRandomState() {
|
||||||
stack_allocations_->push(0);
|
stack_allocations_->push(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
|
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
||||||
|
const InitState *state) {
|
||||||
CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
|
CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
|
||||||
CHECK_EQ(0, stack_top_);
|
CHECK_EQ(0, stack_top_);
|
||||||
CHECK_EQ(0, stack_bottom_);
|
CHECK_EQ(0, stack_bottom_);
|
||||||
|
@ -44,6 +45,17 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
|
||||||
if (auto sz = flags()->heap_history_size)
|
if (auto sz = flags()->heap_history_size)
|
||||||
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
|
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
|
||||||
|
|
||||||
|
InitStackAndTls(state);
|
||||||
|
#if !SANITIZER_FUCHSIA
|
||||||
|
// Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
|
||||||
|
// be initialized before we enter the thread itself, so we will instead call
|
||||||
|
// this later.
|
||||||
|
InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
|
||||||
|
uptr stack_buffer_size) {
|
||||||
HwasanTSDThreadInit(); // Only needed with interceptors.
|
HwasanTSDThreadInit(); // Only needed with interceptors.
|
||||||
uptr *ThreadLong = GetCurrentThreadLongPtr();
|
uptr *ThreadLong = GetCurrentThreadLongPtr();
|
||||||
// The following implicitly sets (this) as the current thread.
|
// The following implicitly sets (this) as the current thread.
|
||||||
|
@ -55,13 +67,6 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
|
||||||
// ScopedTaggingDisable needs GetCurrentThread to be set up.
|
// ScopedTaggingDisable needs GetCurrentThread to be set up.
|
||||||
ScopedTaggingDisabler disabler;
|
ScopedTaggingDisabler disabler;
|
||||||
|
|
||||||
uptr tls_size;
|
|
||||||
uptr stack_size;
|
|
||||||
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
|
|
||||||
&tls_size);
|
|
||||||
stack_top_ = stack_bottom_ + stack_size;
|
|
||||||
tls_end_ = tls_begin_ + tls_size;
|
|
||||||
|
|
||||||
if (stack_bottom_) {
|
if (stack_bottom_) {
|
||||||
int local;
|
int local;
|
||||||
CHECK(AddrIsInStack((uptr)&local));
|
CHECK(AddrIsInStack((uptr)&local));
|
||||||
|
|
|
@ -23,8 +23,17 @@ typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
|
||||||
|
|
||||||
class Thread {
|
class Thread {
|
||||||
public:
|
public:
|
||||||
void Init(uptr stack_buffer_start, uptr stack_buffer_size); // Must be called from the thread itself.
|
// These are optional parameters that can be passed to Init.
|
||||||
|
struct InitState;
|
||||||
|
|
||||||
|
void Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
||||||
|
const InitState *state = nullptr);
|
||||||
void InitRandomState();
|
void InitRandomState();
|
||||||
|
void InitStackAndTls(const InitState *state = nullptr);
|
||||||
|
|
||||||
|
// Must be called from the thread itself.
|
||||||
|
void InitStackRingBuffer(uptr stack_buffer_start, uptr stack_buffer_size);
|
||||||
|
|
||||||
void Destroy();
|
void Destroy();
|
||||||
|
|
||||||
uptr stack_top() { return stack_top_; }
|
uptr stack_top() { return stack_top_; }
|
||||||
|
|
|
@ -12,4 +12,4 @@ void InitThreadList(uptr storage, uptr size) {
|
||||||
new (thread_list_placeholder) HwasanThreadList(storage, size);
|
new (thread_list_placeholder) HwasanThreadList(storage, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace __hwasan
|
||||||
|
|
|
@ -85,7 +85,7 @@ class HwasanThreadList {
|
||||||
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
|
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread *CreateCurrentThread() {
|
Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
|
||||||
Thread *t = nullptr;
|
Thread *t = nullptr;
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&free_list_mutex_);
|
SpinMutexLock l(&free_list_mutex_);
|
||||||
|
@ -104,7 +104,7 @@ class HwasanThreadList {
|
||||||
SpinMutexLock l(&live_list_mutex_);
|
SpinMutexLock l(&live_list_mutex_);
|
||||||
live_list_.push_back(t);
|
live_list_.push_back(t);
|
||||||
}
|
}
|
||||||
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
|
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
|
||||||
AddThreadStats(t);
|
AddThreadStats(t);
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
@ -171,6 +171,8 @@ class HwasanThreadList {
|
||||||
return stats_;
|
return stats_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uptr GetRingBufferSize() const { return ring_buffer_size_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Thread *AllocThread() {
|
Thread *AllocThread() {
|
||||||
SpinMutexLock l(&free_space_mutex_);
|
SpinMutexLock l(&free_space_mutex_);
|
||||||
|
@ -200,4 +202,4 @@ class HwasanThreadList {
|
||||||
void InitThreadList(uptr storage, uptr size);
|
void InitThreadList(uptr storage, uptr size);
|
||||||
HwasanThreadList &hwasanThreadList();
|
HwasanThreadList &hwasanThreadList();
|
||||||
|
|
||||||
} // namespace
|
} // namespace __hwasan
|
||||||
|
|
|
@ -21,34 +21,15 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef uint16_t dfsan_label;
|
typedef uint8_t dfsan_label;
|
||||||
typedef uint32_t dfsan_origin;
|
typedef uint32_t dfsan_origin;
|
||||||
|
|
||||||
/// Stores information associated with a specific label identifier. A label
|
|
||||||
/// may be a base label created using dfsan_create_label, with associated
|
|
||||||
/// text description and user data, or an automatically created union label,
|
|
||||||
/// which represents the union of two label identifiers (which may themselves
|
|
||||||
/// be base or union labels).
|
|
||||||
struct dfsan_label_info {
|
|
||||||
// Fields for union labels, set to 0 for base labels.
|
|
||||||
dfsan_label l1;
|
|
||||||
dfsan_label l2;
|
|
||||||
|
|
||||||
// Fields for base labels.
|
|
||||||
const char *desc;
|
|
||||||
void *userdata;
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Signature of the callback argument to dfsan_set_write_callback().
|
/// Signature of the callback argument to dfsan_set_write_callback().
|
||||||
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
|
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
|
||||||
|
|
||||||
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
|
/// Computes the union of \c l1 and \c l2, resulting in a union label.
|
||||||
/// the process.
|
|
||||||
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
|
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
|
||||||
|
|
||||||
/// Creates and returns a base label with the given description and user data.
|
|
||||||
dfsan_label dfsan_create_label(const char *desc, void *userdata);
|
|
||||||
|
|
||||||
/// Sets the label for each address in [addr,addr+size) to \c label.
|
/// Sets the label for each address in [addr,addr+size) to \c label.
|
||||||
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
|
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
|
||||||
|
|
||||||
|
@ -73,19 +54,9 @@ dfsan_origin dfsan_get_origin(long data);
|
||||||
/// Retrieves the label associated with the data at the given address.
|
/// Retrieves the label associated with the data at the given address.
|
||||||
dfsan_label dfsan_read_label(const void *addr, size_t size);
|
dfsan_label dfsan_read_label(const void *addr, size_t size);
|
||||||
|
|
||||||
/// Retrieves a pointer to the dfsan_label_info struct for the given label.
|
|
||||||
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
|
|
||||||
|
|
||||||
/// Returns whether the given label label contains the label elem.
|
/// Returns whether the given label label contains the label elem.
|
||||||
int dfsan_has_label(dfsan_label label, dfsan_label elem);
|
int dfsan_has_label(dfsan_label label, dfsan_label elem);
|
||||||
|
|
||||||
/// If the given label label contains a label with the description desc, returns
|
|
||||||
/// that label, else returns 0.
|
|
||||||
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
|
|
||||||
|
|
||||||
/// Returns the number of labels allocated.
|
|
||||||
size_t dfsan_get_label_count(void);
|
|
||||||
|
|
||||||
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
|
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
|
||||||
/// with the application memory. Use this call to start over the taint tracking
|
/// with the application memory. Use this call to start over the taint tracking
|
||||||
/// within the same process.
|
/// within the same process.
|
||||||
|
@ -99,12 +70,6 @@ void dfsan_flush(void);
|
||||||
/// callback executes. Pass in NULL to remove any callback.
|
/// callback executes. Pass in NULL to remove any callback.
|
||||||
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
||||||
|
|
||||||
/// Writes the labels currently used by the program to the given file
|
|
||||||
/// descriptor. The lines of the output have the following format:
|
|
||||||
///
|
|
||||||
/// <label> <parent label 1> <parent label 2> <label description if any>
|
|
||||||
void dfsan_dump_labels(int fd);
|
|
||||||
|
|
||||||
/// Interceptor hooks.
|
/// Interceptor hooks.
|
||||||
/// Whenever a dfsan's custom function is called the corresponding
|
/// Whenever a dfsan's custom function is called the corresponding
|
||||||
/// hook is called it non-zero. The hooks should be defined by the user.
|
/// hook is called it non-zero. The hooks should be defined by the user.
|
||||||
|
@ -123,9 +88,65 @@ void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
|
||||||
/// on, or the address is not labeled, it prints nothing.
|
/// on, or the address is not labeled, it prints nothing.
|
||||||
void dfsan_print_origin_trace(const void *addr, const char *description);
|
void dfsan_print_origin_trace(const void *addr, const char *description);
|
||||||
|
|
||||||
|
/// Prints the origin trace of the label at the address \p addr to a
|
||||||
|
/// pre-allocated output buffer. If origin tracking is not on, or the address is
|
||||||
|
/// not labeled, it prints nothing.
|
||||||
|
///
|
||||||
|
/// Typical usage:
|
||||||
|
/// \code
|
||||||
|
/// char kDescription[] = "...";
|
||||||
|
/// char buf[1024];
|
||||||
|
/// dfsan_sprint_origin_trace(&tainted_var, kDescription, buf, sizeof(buf));
|
||||||
|
/// \endcode
|
||||||
|
///
|
||||||
|
/// Typical usage that handles truncation:
|
||||||
|
/// \code
|
||||||
|
/// char buf[1024];
|
||||||
|
/// int len = dfsan_sprint_origin_trace(&var, nullptr, buf, sizeof(buf));
|
||||||
|
///
|
||||||
|
/// if (len < sizeof(buf)) {
|
||||||
|
/// ProcessOriginTrace(buf);
|
||||||
|
/// } else {
|
||||||
|
/// char *tmpbuf = new char[len + 1];
|
||||||
|
/// dfsan_sprint_origin_trace(&var, nullptr, tmpbuf, len + 1);
|
||||||
|
/// ProcessOriginTrace(tmpbuf);
|
||||||
|
/// delete[] tmpbuf;
|
||||||
|
/// }
|
||||||
|
/// \endcode
|
||||||
|
///
|
||||||
|
/// \param addr The tainted memory address whose origin we are printing.
|
||||||
|
/// \param description A description printed at the beginning of the trace.
|
||||||
|
/// \param [out] out_buf The output buffer to write the results to.
|
||||||
|
/// \param out_buf_size The size of \p out_buf.
|
||||||
|
///
|
||||||
|
/// \returns The number of symbols that should have been written to \p out_buf
|
||||||
|
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
|
||||||
|
/// return value is not less than \p out_buf_size.
|
||||||
|
size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
|
||||||
|
char *out_buf, size_t out_buf_size);
|
||||||
|
|
||||||
|
/// Prints the stack trace leading to this call to a pre-allocated output
|
||||||
|
/// buffer.
|
||||||
|
///
|
||||||
|
/// For usage examples, see dfsan_sprint_origin_trace.
|
||||||
|
///
|
||||||
|
/// \param [out] out_buf The output buffer to write the results to.
|
||||||
|
/// \param out_buf_size The size of \p out_buf.
|
||||||
|
///
|
||||||
|
/// \returns The number of symbols that should have been written to \p out_buf
|
||||||
|
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
|
||||||
|
/// return value is not less than \p out_buf_size.
|
||||||
|
size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size);
|
||||||
|
|
||||||
/// Retrieves the very first origin associated with the data at the given
|
/// Retrieves the very first origin associated with the data at the given
|
||||||
/// address.
|
/// address.
|
||||||
dfsan_origin dfsan_get_init_origin(const void *addr);
|
dfsan_origin dfsan_get_init_origin(const void *addr);
|
||||||
|
|
||||||
|
/// Returns the value of -dfsan-track-origins.
|
||||||
|
/// * 0: do not track origins.
|
||||||
|
/// * 1: track origins at memory store operations.
|
||||||
|
/// * 2: track origins at memory load and store operations.
|
||||||
|
int dfsan_get_track_origins(void);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||||
|
|
||||||
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
|
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
|
||||||
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && \
|
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
|
||||||
!SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
|
!SANITIZER_SOLARIS
|
||||||
# error "Interception doesn't work on this operating system."
|
# error "Interception doesn't work on this operating system."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -130,11 +130,6 @@ const interpose_substitution substitution_##func_name[] \
|
||||||
extern "C" ret_type func(__VA_ARGS__);
|
extern "C" ret_type func(__VA_ARGS__);
|
||||||
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
||||||
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
|
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
|
||||||
#elif SANITIZER_RTEMS
|
|
||||||
# define WRAP(x) x
|
|
||||||
# define WRAPPER_NAME(x) #x
|
|
||||||
# define INTERCEPTOR_ATTRIBUTE
|
|
||||||
# define DECLARE_WRAPPER(ret_type, func, ...)
|
|
||||||
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
|
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||||
# define WRAP(x) __interceptor_ ## x
|
# define WRAP(x) __interceptor_ ## x
|
||||||
# define WRAPPER_NAME(x) "__interceptor_" #x
|
# define WRAPPER_NAME(x) "__interceptor_" #x
|
||||||
|
@ -162,10 +157,6 @@ const interpose_substitution substitution_##func_name[] \
|
||||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||||
# define REAL(x) __unsanitized_##x
|
# define REAL(x) __unsanitized_##x
|
||||||
# define DECLARE_REAL(ret_type, func, ...)
|
# define DECLARE_REAL(ret_type, func, ...)
|
||||||
#elif SANITIZER_RTEMS
|
|
||||||
# define REAL(x) __real_ ## x
|
|
||||||
# define DECLARE_REAL(ret_type, func, ...) \
|
|
||||||
extern "C" ret_type REAL(func)(__VA_ARGS__);
|
|
||||||
#elif !SANITIZER_MAC
|
#elif !SANITIZER_MAC
|
||||||
# define PTR_TO_REAL(x) real_##x
|
# define PTR_TO_REAL(x) real_##x
|
||||||
# define REAL(x) __interception::PTR_TO_REAL(x)
|
# define REAL(x) __interception::PTR_TO_REAL(x)
|
||||||
|
@ -184,7 +175,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||||
# define ASSIGN_REAL(x, y)
|
# define ASSIGN_REAL(x, y)
|
||||||
#endif // SANITIZER_MAC
|
#endif // SANITIZER_MAC
|
||||||
|
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#if !SANITIZER_FUCHSIA
|
||||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||||
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
|
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
|
||||||
extern "C" ret_type WRAP(func)(__VA_ARGS__);
|
extern "C" ret_type WRAP(func)(__VA_ARGS__);
|
||||||
|
@ -202,7 +193,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||||
// macros does its job. In exceptional cases you may need to call REAL(foo)
|
// macros does its job. In exceptional cases you may need to call REAL(foo)
|
||||||
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
|
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
|
||||||
// foo with an interceptor for other function.
|
// foo with an interceptor for other function.
|
||||||
#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#if !SANITIZER_MAC && !SANITIZER_FUCHSIA
|
||||||
# define DEFINE_REAL(ret_type, func, ...) \
|
# define DEFINE_REAL(ret_type, func, ...) \
|
||||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||||
namespace __interception { \
|
namespace __interception { \
|
||||||
|
|
|
@ -35,18 +35,14 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
|
||||||
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
|
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
|
||||||
using namespace __lsan;
|
using namespace __lsan;
|
||||||
uptr stack_top = 0, stack_bottom = 0;
|
uptr stack_top = 0, stack_bottom = 0;
|
||||||
ThreadContext *t;
|
if (ThreadContext *t = CurrentThreadContext()) {
|
||||||
if (StackTrace::WillUseFastUnwind(request_fast) &&
|
|
||||||
(t = CurrentThreadContext())) {
|
|
||||||
stack_top = t->stack_end();
|
stack_top = t->stack_end();
|
||||||
stack_bottom = t->stack_begin();
|
stack_bottom = t->stack_begin();
|
||||||
}
|
}
|
||||||
if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
|
if (SANITIZER_MIPS && !IsValidFrame(bp, stack_top, stack_bottom))
|
||||||
if (StackTrace::WillUseFastUnwind(request_fast))
|
return;
|
||||||
Unwind(max_depth, pc, bp, nullptr, stack_top, stack_bottom, true);
|
bool fast = StackTrace::WillUseFastUnwind(request_fast);
|
||||||
else
|
Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
|
||||||
Unwind(max_depth, pc, 0, context, 0, 0, false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace __lsan;
|
using namespace __lsan;
|
||||||
|
|
|
@ -221,8 +221,8 @@ void UnlockAllocator();
|
||||||
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
||||||
bool WordIsPoisoned(uptr addr);
|
bool WordIsPoisoned(uptr addr);
|
||||||
// Wrappers for ThreadRegistry access.
|
// Wrappers for ThreadRegistry access.
|
||||||
void LockThreadRegistry();
|
void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
|
||||||
void UnlockThreadRegistry();
|
void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
|
||||||
ThreadRegistry *GetThreadRegistryLocked();
|
ThreadRegistry *GetThreadRegistryLocked();
|
||||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||||
|
|
|
@ -30,13 +30,10 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
|
||||||
return new (mem) ThreadContext(tid);
|
return new (mem) ThreadContext(tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const uptr kMaxThreads = 1 << 13;
|
|
||||||
static const uptr kThreadQuarantineSize = 64;
|
|
||||||
|
|
||||||
void InitializeThreadRegistry() {
|
void InitializeThreadRegistry() {
|
||||||
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||||
thread_registry = new (thread_registry_placeholder)
|
thread_registry =
|
||||||
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
|
new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
|
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
|
||||||
|
|
|
@ -41,6 +41,7 @@ sanitizer_common_files = \
|
||||||
sanitizer_linux_s390.cpp \
|
sanitizer_linux_s390.cpp \
|
||||||
sanitizer_mac.cpp \
|
sanitizer_mac.cpp \
|
||||||
sanitizer_mac_libcdep.cpp \
|
sanitizer_mac_libcdep.cpp \
|
||||||
|
sanitizer_mutex.cpp \
|
||||||
sanitizer_netbsd.cpp \
|
sanitizer_netbsd.cpp \
|
||||||
sanitizer_openbsd.cpp \
|
sanitizer_openbsd.cpp \
|
||||||
sanitizer_persistent_allocator.cpp \
|
sanitizer_persistent_allocator.cpp \
|
||||||
|
@ -57,7 +58,6 @@ sanitizer_common_files = \
|
||||||
sanitizer_procmaps_linux.cpp \
|
sanitizer_procmaps_linux.cpp \
|
||||||
sanitizer_procmaps_mac.cpp \
|
sanitizer_procmaps_mac.cpp \
|
||||||
sanitizer_procmaps_solaris.cpp \
|
sanitizer_procmaps_solaris.cpp \
|
||||||
sanitizer_rtems.cpp \
|
|
||||||
sanitizer_solaris.cpp \
|
sanitizer_solaris.cpp \
|
||||||
sanitizer_stackdepot.cpp \
|
sanitizer_stackdepot.cpp \
|
||||||
sanitizer_stacktrace.cpp \
|
sanitizer_stacktrace.cpp \
|
||||||
|
|
|
@ -128,8 +128,9 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
|
||||||
sanitizer_file.lo sanitizer_flags.lo sanitizer_flag_parser.lo \
|
sanitizer_file.lo sanitizer_flags.lo sanitizer_flag_parser.lo \
|
||||||
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
|
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
|
||||||
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
|
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
|
||||||
sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_netbsd.lo \
|
sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_mutex.lo \
|
||||||
sanitizer_openbsd.lo sanitizer_persistent_allocator.lo \
|
sanitizer_netbsd.lo sanitizer_openbsd.lo \
|
||||||
|
sanitizer_persistent_allocator.lo \
|
||||||
sanitizer_platform_limits_freebsd.lo \
|
sanitizer_platform_limits_freebsd.lo \
|
||||||
sanitizer_platform_limits_linux.lo \
|
sanitizer_platform_limits_linux.lo \
|
||||||
sanitizer_platform_limits_openbsd.lo \
|
sanitizer_platform_limits_openbsd.lo \
|
||||||
|
@ -138,11 +139,11 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
|
||||||
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
|
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
|
||||||
sanitizer_procmaps_bsd.lo sanitizer_procmaps_common.lo \
|
sanitizer_procmaps_bsd.lo sanitizer_procmaps_common.lo \
|
||||||
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
|
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
|
||||||
sanitizer_procmaps_solaris.lo sanitizer_rtems.lo \
|
sanitizer_procmaps_solaris.lo sanitizer_solaris.lo \
|
||||||
sanitizer_solaris.lo sanitizer_stackdepot.lo \
|
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
|
||||||
sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
|
sanitizer_stacktrace_libcdep.lo sanitizer_stacktrace_sparc.lo \
|
||||||
sanitizer_stacktrace_sparc.lo sanitizer_symbolizer_mac.lo \
|
sanitizer_symbolizer_mac.lo sanitizer_symbolizer_report.lo \
|
||||||
sanitizer_symbolizer_report.lo sanitizer_stacktrace_printer.lo \
|
sanitizer_stacktrace_printer.lo \
|
||||||
sanitizer_stoptheworld_linux_libcdep.lo \
|
sanitizer_stoptheworld_linux_libcdep.lo \
|
||||||
sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
|
sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
|
||||||
sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
|
sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
|
||||||
|
@ -400,6 +401,7 @@ sanitizer_common_files = \
|
||||||
sanitizer_linux_s390.cpp \
|
sanitizer_linux_s390.cpp \
|
||||||
sanitizer_mac.cpp \
|
sanitizer_mac.cpp \
|
||||||
sanitizer_mac_libcdep.cpp \
|
sanitizer_mac_libcdep.cpp \
|
||||||
|
sanitizer_mutex.cpp \
|
||||||
sanitizer_netbsd.cpp \
|
sanitizer_netbsd.cpp \
|
||||||
sanitizer_openbsd.cpp \
|
sanitizer_openbsd.cpp \
|
||||||
sanitizer_persistent_allocator.cpp \
|
sanitizer_persistent_allocator.cpp \
|
||||||
|
@ -416,7 +418,6 @@ sanitizer_common_files = \
|
||||||
sanitizer_procmaps_linux.cpp \
|
sanitizer_procmaps_linux.cpp \
|
||||||
sanitizer_procmaps_mac.cpp \
|
sanitizer_procmaps_mac.cpp \
|
||||||
sanitizer_procmaps_solaris.cpp \
|
sanitizer_procmaps_solaris.cpp \
|
||||||
sanitizer_rtems.cpp \
|
|
||||||
sanitizer_solaris.cpp \
|
sanitizer_solaris.cpp \
|
||||||
sanitizer_stackdepot.cpp \
|
sanitizer_stackdepot.cpp \
|
||||||
sanitizer_stacktrace.cpp \
|
sanitizer_stacktrace.cpp \
|
||||||
|
@ -557,6 +558,7 @@ distclean-compile:
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac_libcdep.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac_libcdep.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mutex.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_netbsd.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_netbsd.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_openbsd.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_openbsd.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
|
||||||
|
@ -573,7 +575,6 @@ distclean-compile:
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_solaris.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_solaris.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_rtems.Plo@am__quote@
|
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_solaris.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_solaris.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
|
||||||
|
|
|
@ -163,7 +163,7 @@ AddrHashMap<T, kSize>::AddrHashMap() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, uptr kSize>
|
template <typename T, uptr kSize>
|
||||||
void AddrHashMap<T, kSize>::acquire(Handle *h) {
|
void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
|
||||||
uptr addr = h->addr_;
|
uptr addr = h->addr_;
|
||||||
uptr hash = calcHash(addr);
|
uptr hash = calcHash(addr);
|
||||||
Bucket *b = &table_[hash];
|
Bucket *b = &table_[hash];
|
||||||
|
@ -292,7 +292,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, uptr kSize>
|
template <typename T, uptr kSize>
|
||||||
void AddrHashMap<T, kSize>::release(Handle *h) {
|
void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
|
||||||
if (!h->cell_)
|
if (!h->cell_)
|
||||||
return;
|
return;
|
||||||
Bucket *b = h->bucket_;
|
Bucket *b = h->bucket_;
|
||||||
|
|
|
@ -137,14 +137,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||||
|
|
||||||
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||||
|
|
||||||
namespace {
|
|
||||||
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
|
|
||||||
|
|
||||||
struct BlockHeader {
|
|
||||||
u64 magic;
|
|
||||||
};
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
||||||
SetAllocatorOutOfMemory();
|
SetAllocatorOutOfMemory();
|
||||||
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
|
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
|
||||||
|
@ -153,28 +145,17 @@ static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
|
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
|
||||||
uptr s = size + sizeof(BlockHeader);
|
void *p = RawInternalAlloc(size, cache, alignment);
|
||||||
if (s < size)
|
|
||||||
return nullptr;
|
|
||||||
BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
|
|
||||||
if (UNLIKELY(!p))
|
if (UNLIKELY(!p))
|
||||||
ReportInternalAllocatorOutOfMemory(s);
|
ReportInternalAllocatorOutOfMemory(size);
|
||||||
p->magic = kBlockMagic;
|
return p;
|
||||||
return p + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||||
if (!addr)
|
void *p = RawInternalRealloc(addr, size, cache);
|
||||||
return InternalAlloc(size, cache);
|
|
||||||
uptr s = size + sizeof(BlockHeader);
|
|
||||||
if (s < size)
|
|
||||||
return nullptr;
|
|
||||||
BlockHeader *p = (BlockHeader *)addr - 1;
|
|
||||||
CHECK_EQ(kBlockMagic, p->magic);
|
|
||||||
p = (BlockHeader *)RawInternalRealloc(p, s, cache);
|
|
||||||
if (UNLIKELY(!p))
|
if (UNLIKELY(!p))
|
||||||
ReportInternalAllocatorOutOfMemory(s);
|
ReportInternalAllocatorOutOfMemory(size);
|
||||||
return p + 1;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *InternalReallocArray(void *addr, uptr count, uptr size,
|
void *InternalReallocArray(void *addr, uptr count, uptr size,
|
||||||
|
@ -203,12 +184,7 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||||
if (!addr)
|
RawInternalFree(addr, cache);
|
||||||
return;
|
|
||||||
BlockHeader *p = (BlockHeader *)addr - 1;
|
|
||||||
CHECK_EQ(kBlockMagic, p->magic);
|
|
||||||
p->magic = 0;
|
|
||||||
RawInternalFree(p, cache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LowLevelAllocator
|
// LowLevelAllocator
|
||||||
|
|
|
@ -177,12 +177,12 @@ class CombinedAllocator {
|
||||||
|
|
||||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||||
// introspection API.
|
// introspection API.
|
||||||
void ForceLock() {
|
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
primary_.ForceLock();
|
primary_.ForceLock();
|
||||||
secondary_.ForceLock();
|
secondary_.ForceLock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceUnlock() {
|
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
secondary_.ForceUnlock();
|
secondary_.ForceUnlock();
|
||||||
primary_.ForceUnlock();
|
primary_.ForceUnlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
template <class SizeClassAllocator>
|
template <class SizeClassAllocator>
|
||||||
struct SizeClassAllocator64LocalCache {
|
struct SizeClassAllocator64LocalCache {
|
||||||
typedef SizeClassAllocator Allocator;
|
typedef SizeClassAllocator Allocator;
|
||||||
|
typedef MemoryMapper<Allocator> MemoryMapperT;
|
||||||
|
|
||||||
void Init(AllocatorGlobalStats *s) {
|
void Init(AllocatorGlobalStats *s) {
|
||||||
stats_.Init();
|
stats_.Init();
|
||||||
|
@ -53,7 +54,7 @@ struct SizeClassAllocator64LocalCache {
|
||||||
PerClass *c = &per_class_[class_id];
|
PerClass *c = &per_class_[class_id];
|
||||||
InitCache(c);
|
InitCache(c);
|
||||||
if (UNLIKELY(c->count == c->max_count))
|
if (UNLIKELY(c->count == c->max_count))
|
||||||
Drain(c, allocator, class_id, c->max_count / 2);
|
DrainHalfMax(c, allocator, class_id);
|
||||||
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
||||||
allocator->GetRegionBeginBySizeClass(class_id),
|
allocator->GetRegionBeginBySizeClass(class_id),
|
||||||
reinterpret_cast<uptr>(p));
|
reinterpret_cast<uptr>(p));
|
||||||
|
@ -62,10 +63,10 @@ struct SizeClassAllocator64LocalCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Drain(SizeClassAllocator *allocator) {
|
void Drain(SizeClassAllocator *allocator) {
|
||||||
|
MemoryMapperT memory_mapper(*allocator);
|
||||||
for (uptr i = 1; i < kNumClasses; i++) {
|
for (uptr i = 1; i < kNumClasses; i++) {
|
||||||
PerClass *c = &per_class_[i];
|
PerClass *c = &per_class_[i];
|
||||||
while (c->count > 0)
|
while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
|
||||||
Drain(c, allocator, i, c->count);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,12 +107,18 @@ struct SizeClassAllocator64LocalCache {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
|
NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
|
||||||
uptr count) {
|
uptr class_id) {
|
||||||
|
MemoryMapperT memory_mapper(*allocator);
|
||||||
|
Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Drain(MemoryMapperT *memory_mapper, PerClass *c,
|
||||||
|
SizeClassAllocator *allocator, uptr class_id, uptr count) {
|
||||||
CHECK_GE(c->count, count);
|
CHECK_GE(c->count, count);
|
||||||
const uptr first_idx_to_drain = c->count - count;
|
const uptr first_idx_to_drain = c->count - count;
|
||||||
c->count -= count;
|
c->count -= count;
|
||||||
allocator->ReturnToAllocator(&stats_, class_id,
|
allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
|
||||||
&c->chunks[first_idx_to_drain], count);
|
&c->chunks[first_idx_to_drain], count);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -237,13 +237,13 @@ class SizeClassAllocator32 {
|
||||||
|
|
||||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||||
// introspection API.
|
// introspection API.
|
||||||
void ForceLock() {
|
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
for (uptr i = 0; i < kNumClasses; i++) {
|
for (uptr i = 0; i < kNumClasses; i++) {
|
||||||
GetSizeClassInfo(i)->mutex.Lock();
|
GetSizeClassInfo(i)->mutex.Lock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceUnlock() {
|
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
for (int i = kNumClasses - 1; i >= 0; i--) {
|
for (int i = kNumClasses - 1; i >= 0; i--) {
|
||||||
GetSizeClassInfo(i)->mutex.Unlock();
|
GetSizeClassInfo(i)->mutex.Unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,44 @@ struct SizeClassAllocator64FlagMasks { // Bit masks.
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
class MemoryMapper {
|
||||||
|
public:
|
||||||
|
typedef typename Allocator::CompactPtrT CompactPtrT;
|
||||||
|
|
||||||
|
explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
|
||||||
|
|
||||||
|
bool GetAndResetStats(uptr &ranges, uptr &bytes) {
|
||||||
|
ranges = released_ranges_count_;
|
||||||
|
released_ranges_count_ = 0;
|
||||||
|
bytes = released_bytes_;
|
||||||
|
released_bytes_ = 0;
|
||||||
|
return ranges != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 *MapPackedCounterArrayBuffer(uptr count) {
|
||||||
|
buffer_.clear();
|
||||||
|
buffer_.resize(count);
|
||||||
|
return buffer_.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Releases [from, to) range of pages back to OS.
|
||||||
|
void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
|
||||||
|
const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
|
||||||
|
const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
|
||||||
|
const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
|
||||||
|
ReleaseMemoryPagesToOS(from_page, to_page);
|
||||||
|
released_ranges_count_++;
|
||||||
|
released_bytes_ += to_page - from_page;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const Allocator &allocator_;
|
||||||
|
uptr released_ranges_count_ = 0;
|
||||||
|
uptr released_bytes_ = 0;
|
||||||
|
InternalMmapVector<u64> buffer_;
|
||||||
|
};
|
||||||
|
|
||||||
template <class Params>
|
template <class Params>
|
||||||
class SizeClassAllocator64 {
|
class SizeClassAllocator64 {
|
||||||
public:
|
public:
|
||||||
|
@ -57,6 +95,7 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
typedef SizeClassAllocator64<Params> ThisT;
|
typedef SizeClassAllocator64<Params> ThisT;
|
||||||
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
||||||
|
typedef MemoryMapper<ThisT> MemoryMapperT;
|
||||||
|
|
||||||
// When we know the size class (the region base) we can represent a pointer
|
// When we know the size class (the region base) we can represent a pointer
|
||||||
// as a 4-byte integer (offset from the region start shifted right by 4).
|
// as a 4-byte integer (offset from the region start shifted right by 4).
|
||||||
|
@ -120,9 +159,10 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceReleaseToOS() {
|
void ForceReleaseToOS() {
|
||||||
|
MemoryMapperT memory_mapper(*this);
|
||||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||||
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
|
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
|
||||||
MaybeReleaseToOS(class_id, true /*force*/);
|
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +171,8 @@ class SizeClassAllocator64 {
|
||||||
alignment <= SizeClassMap::kMaxSize;
|
alignment <= SizeClassMap::kMaxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
|
NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
|
||||||
|
AllocatorStats *stat, uptr class_id,
|
||||||
const CompactPtrT *chunks, uptr n_chunks) {
|
const CompactPtrT *chunks, uptr n_chunks) {
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||||
|
@ -154,7 +195,7 @@ class SizeClassAllocator64 {
|
||||||
region->num_freed_chunks = new_num_freed_chunks;
|
region->num_freed_chunks = new_num_freed_chunks;
|
||||||
region->stats.n_freed += n_chunks;
|
region->stats.n_freed += n_chunks;
|
||||||
|
|
||||||
MaybeReleaseToOS(class_id, false /*force*/);
|
MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||||
|
@ -312,13 +353,13 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||||
// introspection API.
|
// introspection API.
|
||||||
void ForceLock() {
|
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
for (uptr i = 0; i < kNumClasses; i++) {
|
for (uptr i = 0; i < kNumClasses; i++) {
|
||||||
GetRegionInfo(i)->mutex.Lock();
|
GetRegionInfo(i)->mutex.Lock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceUnlock() {
|
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||||
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
|
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
|
||||||
GetRegionInfo(i)->mutex.Unlock();
|
GetRegionInfo(i)->mutex.Unlock();
|
||||||
}
|
}
|
||||||
|
@ -362,11 +403,11 @@ class SizeClassAllocator64 {
|
||||||
// For the performance sake, none of the accessors check the validity of the
|
// For the performance sake, none of the accessors check the validity of the
|
||||||
// arguments, it is assumed that index is always in [0, n) range and the value
|
// arguments, it is assumed that index is always in [0, n) range and the value
|
||||||
// is not incremented past max_value.
|
// is not incremented past max_value.
|
||||||
template<class MemoryMapperT>
|
|
||||||
class PackedCounterArray {
|
class PackedCounterArray {
|
||||||
public:
|
public:
|
||||||
PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
|
template <typename MemoryMapper>
|
||||||
: n(num_counters), memory_mapper(mapper) {
|
PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
|
||||||
|
: n(num_counters) {
|
||||||
CHECK_GT(num_counters, 0);
|
CHECK_GT(num_counters, 0);
|
||||||
CHECK_GT(max_value, 0);
|
CHECK_GT(max_value, 0);
|
||||||
constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
|
constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
|
||||||
|
@ -383,16 +424,8 @@ class SizeClassAllocator64 {
|
||||||
packing_ratio_log = Log2(packing_ratio);
|
packing_ratio_log = Log2(packing_ratio);
|
||||||
bit_offset_mask = packing_ratio - 1;
|
bit_offset_mask = packing_ratio - 1;
|
||||||
|
|
||||||
buffer_size =
|
buffer = mapper->MapPackedCounterArrayBuffer(
|
||||||
(RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
|
RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
|
||||||
sizeof(*buffer);
|
|
||||||
buffer = reinterpret_cast<u64*>(
|
|
||||||
memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
|
|
||||||
}
|
|
||||||
~PackedCounterArray() {
|
|
||||||
if (buffer) {
|
|
||||||
memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsAllocated() const {
|
bool IsAllocated() const {
|
||||||
|
@ -429,19 +462,16 @@ class SizeClassAllocator64 {
|
||||||
u64 counter_mask;
|
u64 counter_mask;
|
||||||
u64 packing_ratio_log;
|
u64 packing_ratio_log;
|
||||||
u64 bit_offset_mask;
|
u64 bit_offset_mask;
|
||||||
|
|
||||||
MemoryMapperT* const memory_mapper;
|
|
||||||
u64 buffer_size;
|
|
||||||
u64* buffer;
|
u64* buffer;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class MemoryMapperT>
|
template <class MemoryMapperT>
|
||||||
class FreePagesRangeTracker {
|
class FreePagesRangeTracker {
|
||||||
public:
|
public:
|
||||||
explicit FreePagesRangeTracker(MemoryMapperT* mapper)
|
FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
|
||||||
: memory_mapper(mapper),
|
: memory_mapper(mapper),
|
||||||
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
|
class_id(class_id),
|
||||||
in_the_range(false), current_page(0), current_range_start_page(0) {}
|
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
|
||||||
|
|
||||||
void NextPage(bool freed) {
|
void NextPage(bool freed) {
|
||||||
if (freed) {
|
if (freed) {
|
||||||
|
@ -463,28 +493,30 @@ class SizeClassAllocator64 {
|
||||||
void CloseOpenedRange() {
|
void CloseOpenedRange() {
|
||||||
if (in_the_range) {
|
if (in_the_range) {
|
||||||
memory_mapper->ReleasePageRangeToOS(
|
memory_mapper->ReleasePageRangeToOS(
|
||||||
current_range_start_page << page_size_scaled_log,
|
class_id, current_range_start_page << page_size_scaled_log,
|
||||||
current_page << page_size_scaled_log);
|
current_page << page_size_scaled_log);
|
||||||
in_the_range = false;
|
in_the_range = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryMapperT* const memory_mapper;
|
MemoryMapperT *const memory_mapper = nullptr;
|
||||||
const uptr page_size_scaled_log;
|
const uptr class_id = 0;
|
||||||
bool in_the_range;
|
const uptr page_size_scaled_log = 0;
|
||||||
uptr current_page;
|
bool in_the_range = false;
|
||||||
uptr current_range_start_page;
|
uptr current_page = 0;
|
||||||
|
uptr current_range_start_page = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Iterates over the free_array to identify memory pages containing freed
|
// Iterates over the free_array to identify memory pages containing freed
|
||||||
// chunks only and returns these pages back to OS.
|
// chunks only and returns these pages back to OS.
|
||||||
// allocated_pages_count is the total number of pages allocated for the
|
// allocated_pages_count is the total number of pages allocated for the
|
||||||
// current bucket.
|
// current bucket.
|
||||||
template<class MemoryMapperT>
|
template <typename MemoryMapper>
|
||||||
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
|
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
|
||||||
uptr free_array_count, uptr chunk_size,
|
uptr free_array_count, uptr chunk_size,
|
||||||
uptr allocated_pages_count,
|
uptr allocated_pages_count,
|
||||||
MemoryMapperT *memory_mapper) {
|
MemoryMapper *memory_mapper,
|
||||||
|
uptr class_id) {
|
||||||
const uptr page_size = GetPageSizeCached();
|
const uptr page_size = GetPageSizeCached();
|
||||||
|
|
||||||
// Figure out the number of chunks per page and whether we can take a fast
|
// Figure out the number of chunks per page and whether we can take a fast
|
||||||
|
@ -520,9 +552,8 @@ class SizeClassAllocator64 {
|
||||||
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
|
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
|
||||||
}
|
}
|
||||||
|
|
||||||
PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
|
PackedCounterArray counters(allocated_pages_count,
|
||||||
full_pages_chunk_count_max,
|
full_pages_chunk_count_max, memory_mapper);
|
||||||
memory_mapper);
|
|
||||||
if (!counters.IsAllocated())
|
if (!counters.IsAllocated())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -547,7 +578,7 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
// Iterate over pages detecting ranges of pages with chunk counters equal
|
// Iterate over pages detecting ranges of pages with chunk counters equal
|
||||||
// to the expected number of chunks for the particular page.
|
// to the expected number of chunks for the particular page.
|
||||||
FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
|
FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
|
||||||
if (same_chunk_count_per_page) {
|
if (same_chunk_count_per_page) {
|
||||||
// Fast path, every page has the same number of chunks affecting it.
|
// Fast path, every page has the same number of chunks affecting it.
|
||||||
for (uptr i = 0; i < counters.GetCount(); i++)
|
for (uptr i = 0; i < counters.GetCount(); i++)
|
||||||
|
@ -586,7 +617,7 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class MemoryMapper;
|
friend class MemoryMapper<ThisT>;
|
||||||
|
|
||||||
ReservedAddressRange address_range;
|
ReservedAddressRange address_range;
|
||||||
|
|
||||||
|
@ -820,57 +851,13 @@ class SizeClassAllocator64 {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
class MemoryMapper {
|
|
||||||
public:
|
|
||||||
MemoryMapper(const ThisT& base_allocator, uptr class_id)
|
|
||||||
: allocator(base_allocator),
|
|
||||||
region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
|
|
||||||
released_ranges_count(0),
|
|
||||||
released_bytes(0) {
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr GetReleasedRangesCount() const {
|
|
||||||
return released_ranges_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr GetReleasedBytes() const {
|
|
||||||
return released_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MapPackedCounterArrayBuffer(uptr buffer_size) {
|
|
||||||
// TODO(alekseyshl): The idea to explore is to check if we have enough
|
|
||||||
// space between num_freed_chunks*sizeof(CompactPtrT) and
|
|
||||||
// mapped_free_array to fit buffer_size bytes and use that space instead
|
|
||||||
// of mapping a temporary one.
|
|
||||||
return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
|
|
||||||
UnmapOrDie(buffer, buffer_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases [from, to) range of pages back to OS.
|
|
||||||
void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
|
|
||||||
const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
|
|
||||||
const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
|
|
||||||
ReleaseMemoryPagesToOS(from_page, to_page);
|
|
||||||
released_ranges_count++;
|
|
||||||
released_bytes += to_page - from_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const ThisT& allocator;
|
|
||||||
const uptr region_base;
|
|
||||||
uptr released_ranges_count;
|
|
||||||
uptr released_bytes;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
||||||
// expected to be locked.
|
// expected to be locked.
|
||||||
//
|
//
|
||||||
// TODO(morehouse): Support a callback on memory release so HWASan can release
|
// TODO(morehouse): Support a callback on memory release so HWASan can release
|
||||||
// aliases as well.
|
// aliases as well.
|
||||||
void MaybeReleaseToOS(uptr class_id, bool force) {
|
void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
|
||||||
|
bool force) {
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
const uptr chunk_size = ClassIdToSize(class_id);
|
const uptr chunk_size = ClassIdToSize(class_id);
|
||||||
const uptr page_size = GetPageSizeCached();
|
const uptr page_size = GetPageSizeCached();
|
||||||
|
@ -894,17 +881,16 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryMapper memory_mapper(*this, class_id);
|
ReleaseFreeMemoryToOS(
|
||||||
|
|
||||||
ReleaseFreeMemoryToOS<MemoryMapper>(
|
|
||||||
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
|
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
|
||||||
RoundUpTo(region->allocated_user, page_size) / page_size,
|
RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
|
||||||
&memory_mapper);
|
class_id);
|
||||||
|
|
||||||
if (memory_mapper.GetReleasedRangesCount() > 0) {
|
uptr ranges, bytes;
|
||||||
|
if (memory_mapper->GetAndResetStats(ranges, bytes)) {
|
||||||
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
||||||
region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
|
region->rtoi.num_releases += ranges;
|
||||||
region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
|
region->rtoi.last_released_bytes = bytes;
|
||||||
}
|
}
|
||||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||||
}
|
}
|
||||||
|
|
|
@ -267,13 +267,9 @@ class LargeMmapAllocator {
|
||||||
|
|
||||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||||
// introspection API.
|
// introspection API.
|
||||||
void ForceLock() {
|
void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
|
||||||
mutex_.Lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ForceUnlock() {
|
void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
|
||||||
mutex_.Unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over all existing chunks.
|
// Iterate over all existing chunks.
|
||||||
// The allocator must be locked when calling this function.
|
// The allocator must be locked when calling this function.
|
||||||
|
|
|
@ -37,10 +37,9 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||||
const char *mmap_type, error_t err,
|
const char *mmap_type, error_t err,
|
||||||
bool raw_report) {
|
bool raw_report) {
|
||||||
static int recursion_count;
|
static int recursion_count;
|
||||||
if (SANITIZER_RTEMS || raw_report || recursion_count) {
|
if (raw_report || recursion_count) {
|
||||||
// If we are on RTEMS or raw report is requested or we went into recursion,
|
// If raw report is requested or we went into recursion just die. The
|
||||||
// just die. The Report() and CHECK calls below may call mmap recursively
|
// Report() and CHECK calls below may call mmap recursively and fail.
|
||||||
// and fail.
|
|
||||||
RawWrite("ERROR: Failed to mmap\n");
|
RawWrite("ERROR: Failed to mmap\n");
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
@ -331,6 +330,14 @@ static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void internal_sleep(unsigned seconds) {
|
||||||
|
internal_usleep((u64)seconds * 1000 * 1000);
|
||||||
|
}
|
||||||
|
void SleepForSeconds(unsigned seconds) {
|
||||||
|
internal_usleep((u64)seconds * 1000 * 1000);
|
||||||
|
}
|
||||||
|
void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
using namespace __sanitizer;
|
using namespace __sanitizer;
|
||||||
|
|
|
@ -237,10 +237,16 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
|
||||||
// Lock sanitizer error reporting and protects against nested errors.
|
// Lock sanitizer error reporting and protects against nested errors.
|
||||||
class ScopedErrorReportLock {
|
class ScopedErrorReportLock {
|
||||||
public:
|
public:
|
||||||
ScopedErrorReportLock();
|
ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
|
||||||
~ScopedErrorReportLock();
|
~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
|
||||||
|
|
||||||
static void CheckLocked();
|
static void Lock() ACQUIRE(mutex_);
|
||||||
|
static void Unlock() RELEASE(mutex_);
|
||||||
|
static void CheckLocked() CHECK_LOCKED(mutex_);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static atomic_uintptr_t reporting_thread_;
|
||||||
|
static StaticSpinMutex mutex_;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern uptr stoptheworld_tracer_pid;
|
extern uptr stoptheworld_tracer_pid;
|
||||||
|
@ -288,8 +294,8 @@ void InitTlsSize();
|
||||||
uptr GetTlsSize();
|
uptr GetTlsSize();
|
||||||
|
|
||||||
// Other
|
// Other
|
||||||
void SleepForSeconds(int seconds);
|
void SleepForSeconds(unsigned seconds);
|
||||||
void SleepForMillis(int millis);
|
void SleepForMillis(unsigned millis);
|
||||||
u64 NanoTime();
|
u64 NanoTime();
|
||||||
u64 MonotonicNanoTime();
|
u64 MonotonicNanoTime();
|
||||||
int Atexit(void (*function)(void));
|
int Atexit(void (*function)(void));
|
||||||
|
@ -1057,6 +1063,13 @@ class ArrayRef {
|
||||||
T *end_ = nullptr;
|
T *end_ = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define PRINTF_128(v) \
|
||||||
|
(*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
|
||||||
|
(*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
|
||||||
|
(*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
|
||||||
|
(*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
|
||||||
|
(*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||||
|
|
|
@ -134,11 +134,11 @@ extern const short *_tolower_tab_;
|
||||||
|
|
||||||
// Platform-specific options.
|
// Platform-specific options.
|
||||||
#if SANITIZER_MAC
|
#if SANITIZER_MAC
|
||||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
|
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||||
#elif SANITIZER_WINDOWS64
|
#elif SANITIZER_WINDOWS64
|
||||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
|
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||||
#else
|
#else
|
||||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
|
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
|
||||||
#endif // SANITIZER_MAC
|
#endif // SANITIZER_MAC
|
||||||
|
|
||||||
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
|
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
|
||||||
|
@ -823,11 +823,11 @@ INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
|
||||||
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
|
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
|
||||||
// due to memcpy being an alias of memmove on OS X.
|
// due to memcpy being an alias of memmove on OS X.
|
||||||
void *ctx;
|
void *ctx;
|
||||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
|
#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
|
||||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
|
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
|
||||||
} else {
|
#else
|
||||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||||
}
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INIT_MEMCPY \
|
#define INIT_MEMCPY \
|
||||||
|
@ -957,6 +957,7 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
|
||||||
// Assuming frexp() always writes to |exp|.
|
// Assuming frexp() always writes to |exp|.
|
||||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
||||||
double res = REAL(frexp)(x, exp);
|
double res = REAL(frexp)(x, exp);
|
||||||
|
COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -969,22 +970,18 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
|
||||||
INTERCEPTOR(float, frexpf, float x, int *exp) {
|
INTERCEPTOR(float, frexpf, float x, int *exp) {
|
||||||
void *ctx;
|
void *ctx;
|
||||||
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
|
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
|
||||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
|
||||||
// its metadata. See
|
|
||||||
// https://github.com/google/sanitizers/issues/321.
|
|
||||||
float res = REAL(frexpf)(x, exp);
|
|
||||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
||||||
|
float res = REAL(frexpf)(x, exp);
|
||||||
|
COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
|
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
|
||||||
void *ctx;
|
void *ctx;
|
||||||
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
|
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
|
||||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
|
||||||
// its metadata. See
|
|
||||||
// https://github.com/google/sanitizers/issues/321.
|
|
||||||
long double res = REAL(frexpl)(x, exp);
|
|
||||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
|
||||||
|
long double res = REAL(frexpl)(x, exp);
|
||||||
|
COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5303,6 +5300,12 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
|
||||||
#define INIT_TIMES
|
#define INIT_TIMES
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_S390 && \
|
||||||
|
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
|
||||||
|
extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
|
||||||
|
DEFINE_REAL(uptr, __tls_get_offset, void *arg)
|
||||||
|
#endif
|
||||||
|
|
||||||
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
|
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
|
||||||
#if !SANITIZER_S390
|
#if !SANITIZER_S390
|
||||||
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
|
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
|
||||||
|
@ -5342,11 +5345,7 @@ void *__tls_get_addr_opt(void *arg);
|
||||||
// descriptor offset as an argument instead of a pointer. GOT address
|
// descriptor offset as an argument instead of a pointer. GOT address
|
||||||
// is passed in r12, so it's necessary to write it in assembly. This is
|
// is passed in r12, so it's necessary to write it in assembly. This is
|
||||||
// the function used by the compiler.
|
// the function used by the compiler.
|
||||||
extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
|
|
||||||
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
|
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
|
||||||
DEFINE_REAL(uptr, __tls_get_offset, void *arg)
|
|
||||||
extern "C" uptr __tls_get_offset(void *arg);
|
|
||||||
extern "C" uptr __interceptor___tls_get_offset(void *arg);
|
|
||||||
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
||||||
void *ctx;
|
void *ctx;
|
||||||
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
|
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
|
||||||
|
@ -5362,6 +5361,15 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
#endif // SANITIZER_S390
|
||||||
|
#else
|
||||||
|
#define INIT_TLS_GET_ADDR
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_S390 && \
|
||||||
|
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
|
||||||
|
extern "C" uptr __tls_get_offset(void *arg);
|
||||||
|
extern "C" uptr __interceptor___tls_get_offset(void *arg);
|
||||||
// We need a hidden symbol aliasing the above, so that we can jump
|
// We need a hidden symbol aliasing the above, so that we can jump
|
||||||
// directly to it from the assembly below.
|
// directly to it from the assembly below.
|
||||||
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
|
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
|
||||||
|
@ -5400,9 +5408,6 @@ asm(
|
||||||
"br %r3\n"
|
"br %r3\n"
|
||||||
".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
|
".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
|
||||||
);
|
);
|
||||||
#endif // SANITIZER_S390
|
|
||||||
#else
|
|
||||||
#define INIT_TLS_GET_ADDR
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if SANITIZER_INTERCEPT_LISTXATTR
|
#if SANITIZER_INTERCEPT_LISTXATTR
|
||||||
|
@ -6099,6 +6104,40 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
|
||||||
#define INIT_FOPEN
|
#define INIT_FOPEN
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_FLOPEN
|
||||||
|
INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
|
||||||
|
void *ctx;
|
||||||
|
va_list ap;
|
||||||
|
va_start(ap, flags);
|
||||||
|
u16 mode = static_cast<u16>(va_arg(ap, u32));
|
||||||
|
va_end(ap);
|
||||||
|
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
|
||||||
|
if (path) {
|
||||||
|
COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
|
||||||
|
}
|
||||||
|
return REAL(flopen)(path, flags, mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
|
||||||
|
void *ctx;
|
||||||
|
va_list ap;
|
||||||
|
va_start(ap, flags);
|
||||||
|
u16 mode = static_cast<u16>(va_arg(ap, u32));
|
||||||
|
va_end(ap);
|
||||||
|
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
|
||||||
|
if (path) {
|
||||||
|
COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
|
||||||
|
}
|
||||||
|
return REAL(flopenat)(dirfd, path, flags, mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define INIT_FLOPEN \
|
||||||
|
COMMON_INTERCEPT_FUNCTION(flopen); \
|
||||||
|
COMMON_INTERCEPT_FUNCTION(flopenat);
|
||||||
|
#else
|
||||||
|
#define INIT_FLOPEN
|
||||||
|
#endif
|
||||||
|
|
||||||
#if SANITIZER_INTERCEPT_FOPEN64
|
#if SANITIZER_INTERCEPT_FOPEN64
|
||||||
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
|
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
|
||||||
void *ctx;
|
void *ctx;
|
||||||
|
@ -6463,7 +6502,7 @@ INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
|
||||||
INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
|
INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
|
||||||
void *ctx;
|
void *ctx;
|
||||||
COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
|
COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
|
||||||
int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
|
int res = REAL(sem_trywait)(s);
|
||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
|
COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
|
||||||
}
|
}
|
||||||
|
@ -10264,6 +10303,7 @@ static void InitializeCommonInterceptors() {
|
||||||
INIT_LIBIO_INTERNALS;
|
INIT_LIBIO_INTERNALS;
|
||||||
INIT_FOPEN;
|
INIT_FOPEN;
|
||||||
INIT_FOPEN64;
|
INIT_FOPEN64;
|
||||||
|
INIT_FLOPEN;
|
||||||
INIT_OPEN_MEMSTREAM;
|
INIT_OPEN_MEMSTREAM;
|
||||||
INIT_OBSTACK;
|
INIT_OBSTACK;
|
||||||
INIT_FFLUSH;
|
INIT_FFLUSH;
|
||||||
|
|
|
@ -138,7 +138,7 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#if !SANITIZER_FUCHSIA
|
||||||
|
|
||||||
// Reserve memory range [beg, end].
|
// Reserve memory range [beg, end].
|
||||||
// We need to use inclusive range because end+1 may not be representable.
|
// We need to use inclusive range because end+1 may not be representable.
|
||||||
|
@ -189,7 +189,7 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
#endif // !SANITIZER_FUCHSIA
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ void LogMessageOnPrintf(const char *str) {}
|
||||||
#endif
|
#endif
|
||||||
void WriteToSyslog(const char *buffer) {}
|
void WriteToSyslog(const char *buffer) {}
|
||||||
void Abort() { internal__exit(1); }
|
void Abort() { internal__exit(1); }
|
||||||
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
|
|
||||||
#endif // !SANITIZER_WINDOWS
|
#endif // !SANITIZER_WINDOWS
|
||||||
|
|
||||||
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
|
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
|
||||||
|
|
|
@ -136,7 +136,7 @@ void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
|
||||||
DDMutex *m0 = (DDMutex*)dd.getData(from);
|
DDMutex *m0 = (DDMutex*)dd.getData(from);
|
||||||
DDMutex *m1 = (DDMutex*)dd.getData(to);
|
DDMutex *m1 = (DDMutex*)dd.getData(to);
|
||||||
|
|
||||||
u32 stk_from = -1U, stk_to = -1U;
|
u32 stk_from = 0, stk_to = 0;
|
||||||
int unique_tid = 0;
|
int unique_tid = 0;
|
||||||
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
|
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
|
||||||
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
|
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
|
||||||
|
|
|
@ -73,7 +73,7 @@ struct DDLogicalThread {
|
||||||
int nlocked;
|
int nlocked;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Mutex {
|
struct MutexState {
|
||||||
StaticSpinMutex mtx;
|
StaticSpinMutex mtx;
|
||||||
u32 seq;
|
u32 seq;
|
||||||
int nlink;
|
int nlink;
|
||||||
|
@ -101,12 +101,12 @@ struct DD final : public DDetector {
|
||||||
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
|
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
|
||||||
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
|
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
|
||||||
u32 allocateId(DDCallback *cb);
|
u32 allocateId(DDCallback *cb);
|
||||||
Mutex *getMutex(u32 id);
|
MutexState *getMutex(u32 id);
|
||||||
u32 getMutexId(Mutex *m);
|
u32 getMutexId(MutexState *m);
|
||||||
|
|
||||||
DDFlags flags;
|
DDFlags flags;
|
||||||
|
|
||||||
Mutex* mutex[kL1Size];
|
MutexState *mutex[kL1Size];
|
||||||
|
|
||||||
SpinMutex mtx;
|
SpinMutex mtx;
|
||||||
InternalMmapVector<u32> free_id;
|
InternalMmapVector<u32> free_id;
|
||||||
|
@ -152,13 +152,11 @@ void DD::MutexInit(DDCallback *cb, DDMutex *m) {
|
||||||
atomic_store(&m->owner, 0, memory_order_relaxed);
|
atomic_store(&m->owner, 0, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
Mutex *DD::getMutex(u32 id) {
|
MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
|
||||||
return &mutex[id / kL2Size][id % kL2Size];
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 DD::getMutexId(Mutex *m) {
|
u32 DD::getMutexId(MutexState *m) {
|
||||||
for (int i = 0; i < kL1Size; i++) {
|
for (int i = 0; i < kL1Size; i++) {
|
||||||
Mutex *tab = mutex[i];
|
MutexState *tab = mutex[i];
|
||||||
if (tab == 0)
|
if (tab == 0)
|
||||||
break;
|
break;
|
||||||
if (m >= tab && m < tab + kL2Size)
|
if (m >= tab && m < tab + kL2Size)
|
||||||
|
@ -176,8 +174,8 @@ u32 DD::allocateId(DDCallback *cb) {
|
||||||
} else {
|
} else {
|
||||||
CHECK_LT(id_gen, kMaxMutex);
|
CHECK_LT(id_gen, kMaxMutex);
|
||||||
if ((id_gen % kL2Size) == 0) {
|
if ((id_gen % kL2Size) == 0) {
|
||||||
mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
|
mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
|
||||||
"deadlock detector (mutex table)");
|
kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
|
||||||
}
|
}
|
||||||
id = id_gen++;
|
id = id_gen++;
|
||||||
}
|
}
|
||||||
|
@ -216,11 +214,11 @@ void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool added = false;
|
bool added = false;
|
||||||
Mutex *mtx = getMutex(m->id);
|
MutexState *mtx = getMutex(m->id);
|
||||||
for (int i = 0; i < lt->nlocked - 1; i++) {
|
for (int i = 0; i < lt->nlocked - 1; i++) {
|
||||||
u32 id1 = lt->locked[i].id;
|
u32 id1 = lt->locked[i].id;
|
||||||
u32 stk1 = lt->locked[i].stk;
|
u32 stk1 = lt->locked[i].stk;
|
||||||
Mutex *mtx1 = getMutex(id1);
|
MutexState *mtx1 = getMutex(id1);
|
||||||
SpinMutexLock l(&mtx1->mtx);
|
SpinMutexLock l(&mtx1->mtx);
|
||||||
if (mtx1->nlink == kMaxLink) {
|
if (mtx1->nlink == kMaxLink) {
|
||||||
// FIXME(dvyukov): check stale links
|
// FIXME(dvyukov): check stale links
|
||||||
|
@ -342,7 +340,7 @@ void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
|
||||||
|
|
||||||
// Clear and invalidate the mutex descriptor.
|
// Clear and invalidate the mutex descriptor.
|
||||||
{
|
{
|
||||||
Mutex *mtx = getMutex(m->id);
|
MutexState *mtx = getMutex(m->id);
|
||||||
SpinMutexLock l(&mtx->mtx);
|
SpinMutexLock l(&mtx->mtx);
|
||||||
mtx->seq++;
|
mtx->seq++;
|
||||||
mtx->nlink = 0;
|
mtx->nlink = 0;
|
||||||
|
@ -361,7 +359,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
|
||||||
int npath = 0;
|
int npath = 0;
|
||||||
int npending = 0;
|
int npending = 0;
|
||||||
{
|
{
|
||||||
Mutex *mtx = getMutex(m->id);
|
MutexState *mtx = getMutex(m->id);
|
||||||
SpinMutexLock l(&mtx->mtx);
|
SpinMutexLock l(&mtx->mtx);
|
||||||
for (int li = 0; li < mtx->nlink; li++)
|
for (int li = 0; li < mtx->nlink; li++)
|
||||||
pt->pending[npending++] = mtx->link[li];
|
pt->pending[npending++] = mtx->link[li];
|
||||||
|
@ -374,7 +372,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
|
||||||
}
|
}
|
||||||
if (pt->visited[link.id])
|
if (pt->visited[link.id])
|
||||||
continue;
|
continue;
|
||||||
Mutex *mtx1 = getMutex(link.id);
|
MutexState *mtx1 = getMutex(link.id);
|
||||||
SpinMutexLock l(&mtx1->mtx);
|
SpinMutexLock l(&mtx1->mtx);
|
||||||
if (mtx1->seq != link.seq)
|
if (mtx1->seq != link.seq)
|
||||||
continue;
|
continue;
|
||||||
|
@ -387,7 +385,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
|
||||||
return Report(pt, lt, npath); // Bingo!
|
return Report(pt, lt, npath); // Bingo!
|
||||||
for (int li = 0; li < mtx1->nlink; li++) {
|
for (int li = 0; li < mtx1->nlink; li++) {
|
||||||
Link *link1 = &mtx1->link[li];
|
Link *link1 = &mtx1->link[li];
|
||||||
// Mutex *mtx2 = getMutex(link->id);
|
// MutexState *mtx2 = getMutex(link->id);
|
||||||
// FIXME(dvyukov): fast seq check
|
// FIXME(dvyukov): fast seq check
|
||||||
// FIXME(dvyukov): fast nlink != 0 check
|
// FIXME(dvyukov): fast nlink != 0 check
|
||||||
// FIXME(dvyukov): fast pending check?
|
// FIXME(dvyukov): fast pending check?
|
||||||
|
|
|
@ -23,8 +23,7 @@
|
||||||
|
|
||||||
#if SANITIZER_FREEBSD || SANITIZER_MAC
|
#if SANITIZER_FREEBSD || SANITIZER_MAC
|
||||||
# define __errno_location __error
|
# define __errno_location __error
|
||||||
#elif SANITIZER_ANDROID || SANITIZER_NETBSD || \
|
#elif SANITIZER_ANDROID || SANITIZER_NETBSD
|
||||||
SANITIZER_RTEMS
|
|
||||||
# define __errno_location __errno
|
# define __errno_location __errno
|
||||||
#elif SANITIZER_SOLARIS
|
#elif SANITIZER_SOLARIS
|
||||||
# define __errno_location ___errno
|
# define __errno_location ___errno
|
||||||
|
|
|
@ -36,16 +36,11 @@ uptr internal_sched_yield() {
|
||||||
return 0; // Why doesn't this return void?
|
return 0; // Why doesn't this return void?
|
||||||
}
|
}
|
||||||
|
|
||||||
static void internal_nanosleep(zx_time_t ns) {
|
void internal_usleep(u64 useconds) {
|
||||||
zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
|
zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
|
||||||
CHECK_EQ(status, ZX_OK);
|
CHECK_EQ(status, ZX_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int internal_sleep(unsigned int seconds) {
|
|
||||||
internal_nanosleep(ZX_SEC(seconds));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 NanoTime() {
|
u64 NanoTime() {
|
||||||
zx_handle_t utc_clock = _zx_utc_reference_get();
|
zx_handle_t utc_clock = _zx_utc_reference_get();
|
||||||
CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
|
CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
|
||||||
|
@ -78,10 +73,6 @@ void Abort() { abort(); }
|
||||||
|
|
||||||
int Atexit(void (*function)(void)) { return atexit(function); }
|
int Atexit(void (*function)(void)) { return atexit(function); }
|
||||||
|
|
||||||
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
|
|
||||||
|
|
||||||
void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
|
|
||||||
|
|
||||||
void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
|
void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
|
||||||
pthread_attr_t attr;
|
pthread_attr_t attr;
|
||||||
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
|
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
|
||||||
|
@ -109,6 +100,18 @@ bool SignalContext::IsStackOverflow() const { return false; }
|
||||||
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
|
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
|
||||||
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
|
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
|
||||||
|
|
||||||
|
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||||
|
zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
|
||||||
|
ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
|
||||||
|
if (status != ZX_ERR_BAD_STATE) // Normal race.
|
||||||
|
CHECK_EQ(status, ZX_OK);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count) {
|
||||||
|
zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
|
||||||
|
CHECK_EQ(status, ZX_OK);
|
||||||
|
}
|
||||||
|
|
||||||
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
|
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
BlockingMutex::BlockingMutex() {
|
||||||
|
@ -145,8 +148,8 @@ void BlockingMutex::Unlock() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
void BlockingMutex::CheckLocked() const {
|
||||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
|
||||||
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,8 +159,10 @@ uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
|
||||||
|
|
||||||
sanitizer_shadow_bounds_t ShadowBounds;
|
sanitizer_shadow_bounds_t ShadowBounds;
|
||||||
|
|
||||||
|
void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
|
||||||
|
|
||||||
uptr GetMaxUserVirtualAddress() {
|
uptr GetMaxUserVirtualAddress() {
|
||||||
ShadowBounds = __sanitizer_shadow_bounds();
|
InitShadowBounds();
|
||||||
return ShadowBounds.memory_limit - 1;
|
return ShadowBounds.memory_limit - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,8 @@ struct MemoryMappingLayoutData {
|
||||||
size_t current; // Current index into the vector.
|
size_t current; // Current index into the vector.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void InitShadowBounds();
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
#endif // SANITIZER_FUCHSIA
|
#endif // SANITIZER_FUCHSIA
|
||||||
|
|
|
@ -67,7 +67,8 @@ uptr internal_ftruncate(fd_t fd, uptr size);
|
||||||
|
|
||||||
// OS
|
// OS
|
||||||
void NORETURN internal__exit(int exitcode);
|
void NORETURN internal__exit(int exitcode);
|
||||||
unsigned int internal_sleep(unsigned int seconds);
|
void internal_sleep(unsigned seconds);
|
||||||
|
void internal_usleep(u64 useconds);
|
||||||
|
|
||||||
uptr internal_getpid();
|
uptr internal_getpid();
|
||||||
uptr internal_getppid();
|
uptr internal_getppid();
|
||||||
|
|
|
@ -84,6 +84,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
|
||||||
ignored_code_ranges_[idx].begin = range.beg;
|
ignored_code_ranges_[idx].begin = range.beg;
|
||||||
ignored_code_ranges_[idx].end = range.end;
|
ignored_code_ranges_[idx].end = range.end;
|
||||||
atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
|
atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
|
||||||
|
atomic_store(&enabled_, 1, memory_order_release);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -114,6 +115,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
|
||||||
instrumented_code_ranges_[idx].end = range.end;
|
instrumented_code_ranges_[idx].end = range.end;
|
||||||
atomic_store(&instrumented_ranges_count_, idx + 1,
|
atomic_store(&instrumented_ranges_count_, idx + 1,
|
||||||
memory_order_release);
|
memory_order_release);
|
||||||
|
atomic_store(&enabled_, 1, memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -123,6 +125,29 @@ void LibIgnore::OnLibraryUnloaded() {
|
||||||
OnLibraryLoaded(nullptr);
|
OnLibraryLoaded(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool LibIgnore::IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const {
|
||||||
|
const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
|
||||||
|
for (uptr i = 0; i < n; i++) {
|
||||||
|
if (IsInRange(pc, ignored_code_ranges_[i])) {
|
||||||
|
*pc_in_ignored_lib = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*pc_in_ignored_lib = false;
|
||||||
|
if (track_instrumented_libs_ && !IsPcInstrumented(pc))
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LibIgnore::IsPcInstrumented(uptr pc) const {
|
||||||
|
const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
|
||||||
|
for (uptr i = 0; i < n; i++) {
|
||||||
|
if (IsInRange(pc, instrumented_code_ranges_[i]))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
|
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
|
||||||
|
|
|
@ -45,9 +45,6 @@ class LibIgnore {
|
||||||
// "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
|
// "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
|
||||||
bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
|
bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
|
||||||
|
|
||||||
// Checks whether the provided PC belongs to an instrumented module.
|
|
||||||
bool IsPcInstrumented(uptr pc) const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Lib {
|
struct Lib {
|
||||||
char *templ;
|
char *templ;
|
||||||
|
@ -61,6 +58,10 @@ class LibIgnore {
|
||||||
uptr end;
|
uptr end;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Checks whether the provided PC belongs to an instrumented module.
|
||||||
|
bool IsPcInstrumented(uptr pc) const;
|
||||||
|
bool IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const;
|
||||||
|
|
||||||
inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
|
inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
|
||||||
return (pc >= range.begin && pc < range.end);
|
return (pc >= range.begin && pc < range.end);
|
||||||
}
|
}
|
||||||
|
@ -70,6 +71,8 @@ class LibIgnore {
|
||||||
static const uptr kMaxLibs = 1024;
|
static const uptr kMaxLibs = 1024;
|
||||||
|
|
||||||
// Hot part:
|
// Hot part:
|
||||||
|
atomic_uintptr_t enabled_;
|
||||||
|
|
||||||
atomic_uintptr_t ignored_ranges_count_;
|
atomic_uintptr_t ignored_ranges_count_;
|
||||||
LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
|
LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
|
||||||
|
|
||||||
|
@ -87,27 +90,11 @@ class LibIgnore {
|
||||||
void operator = (const LibIgnore&); // not implemented
|
void operator = (const LibIgnore&); // not implemented
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
|
ALWAYS_INLINE
|
||||||
const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
|
bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
|
||||||
for (uptr i = 0; i < n; i++) {
|
if (LIKELY(atomic_load(&enabled_, memory_order_acquire) == 0))
|
||||||
if (IsInRange(pc, ignored_code_ranges_[i])) {
|
|
||||||
*pc_in_ignored_lib = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*pc_in_ignored_lib = false;
|
|
||||||
if (track_instrumented_libs_ && !IsPcInstrumented(pc))
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
|
|
||||||
const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
|
|
||||||
for (uptr i = 0; i < n; i++) {
|
|
||||||
if (IsInRange(pc, instrumented_code_ranges_[i]))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
|
return IsIgnoredSlow(pc, pc_in_ignored_lib);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
|
@ -430,13 +430,11 @@ uptr internal_sched_yield() {
|
||||||
return internal_syscall(SYSCALL(sched_yield));
|
return internal_syscall(SYSCALL(sched_yield));
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int internal_sleep(unsigned int seconds) {
|
void internal_usleep(u64 useconds) {
|
||||||
struct timespec ts;
|
struct timespec ts;
|
||||||
ts.tv_sec = seconds;
|
ts.tv_sec = useconds / 1000000;
|
||||||
ts.tv_nsec = 0;
|
ts.tv_nsec = (useconds % 1000000) * 1000;
|
||||||
int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
|
internal_syscall(SYSCALL(nanosleep), &ts, &ts);
|
||||||
if (res) return ts.tv_sec;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr internal_execve(const char *filename, char *const argv[],
|
uptr internal_execve(const char *filename, char *const argv[],
|
||||||
|
@ -641,11 +639,27 @@ char **GetEnviron() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !SANITIZER_SOLARIS
|
#if !SANITIZER_SOLARIS
|
||||||
enum MutexState {
|
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||||
MtxUnlocked = 0,
|
# if SANITIZER_FREEBSD
|
||||||
MtxLocked = 1,
|
_umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
|
||||||
MtxSleeping = 2
|
# elif SANITIZER_NETBSD
|
||||||
};
|
sched_yield(); /* No userspace futex-like synchronization */
|
||||||
|
# else
|
||||||
|
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count) {
|
||||||
|
# if SANITIZER_FREEBSD
|
||||||
|
_umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
|
||||||
|
# elif SANITIZER_NETBSD
|
||||||
|
/* No userspace futex-like synchronization */
|
||||||
|
# else
|
||||||
|
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
BlockingMutex::BlockingMutex() {
|
||||||
internal_memset(this, 0, sizeof(*this));
|
internal_memset(this, 0, sizeof(*this));
|
||||||
|
@ -683,8 +697,8 @@ void BlockingMutex::Unlock() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
void BlockingMutex::CheckLocked() const {
|
||||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
|
||||||
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
||||||
}
|
}
|
||||||
# endif // !SANITIZER_SOLARIS
|
# endif // !SANITIZER_SOLARIS
|
||||||
|
@ -884,7 +898,7 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
|
||||||
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
|
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
|
||||||
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
|
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
|
||||||
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
|
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
|
||||||
k_set->sig[idx] &= ~(1 << bit);
|
k_set->sig[idx] &= ~((uptr)1 << bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
|
bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
|
||||||
|
@ -894,7 +908,7 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
|
||||||
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
|
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
|
||||||
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
|
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
|
||||||
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
|
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
|
||||||
return k_set->sig[idx] & (1 << bit);
|
return k_set->sig[idx] & ((uptr)1 << bit);
|
||||||
}
|
}
|
||||||
#elif SANITIZER_FREEBSD
|
#elif SANITIZER_FREEBSD
|
||||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
|
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
|
||||||
|
|
|
@ -203,7 +203,7 @@ void InitTlsSize() {
|
||||||
g_use_dlpi_tls_data =
|
g_use_dlpi_tls_data =
|
||||||
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
|
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
|
||||||
|
|
||||||
#if defined(__x86_64__) || defined(__powerpc64__)
|
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
|
||||||
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
|
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
|
||||||
size_t tls_align;
|
size_t tls_align;
|
||||||
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
|
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
|
||||||
|
@ -317,21 +317,44 @@ struct TlsBlock {
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
#ifdef __s390__
|
||||||
|
extern "C" uptr __tls_get_offset(void *arg);
|
||||||
|
|
||||||
|
static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
|
||||||
|
// The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an
|
||||||
|
// offset of a struct tls_index inside GOT. We don't possess either of the
|
||||||
|
// two, so violate the letter of the "ELF Handling For Thread-Local
|
||||||
|
// Storage" document and assume that the implementation just dereferences
|
||||||
|
// %r2 + %r12.
|
||||||
|
uptr tls_index[2] = {ti_module, ti_offset};
|
||||||
|
register uptr r2 asm("2") = 0;
|
||||||
|
register void *r12 asm("12") = tls_index;
|
||||||
|
asm("basr %%r14, %[__tls_get_offset]"
|
||||||
|
: "+r"(r2)
|
||||||
|
: [__tls_get_offset] "r"(__tls_get_offset), "r"(r12)
|
||||||
|
: "memory", "cc", "0", "1", "3", "4", "5", "14");
|
||||||
|
return r2;
|
||||||
|
}
|
||||||
|
#else
|
||||||
extern "C" void *__tls_get_addr(size_t *);
|
extern "C" void *__tls_get_addr(size_t *);
|
||||||
|
#endif
|
||||||
|
|
||||||
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
|
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
|
||||||
void *data) {
|
void *data) {
|
||||||
if (!info->dlpi_tls_modid)
|
if (!info->dlpi_tls_modid)
|
||||||
return 0;
|
return 0;
|
||||||
uptr begin = (uptr)info->dlpi_tls_data;
|
uptr begin = (uptr)info->dlpi_tls_data;
|
||||||
#ifndef __s390__
|
|
||||||
if (!g_use_dlpi_tls_data) {
|
if (!g_use_dlpi_tls_data) {
|
||||||
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
|
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
|
||||||
// and FreeBSD.
|
// and FreeBSD.
|
||||||
|
#ifdef __s390__
|
||||||
|
begin = (uptr)__builtin_thread_pointer() +
|
||||||
|
TlsGetOffset(info->dlpi_tls_modid, 0);
|
||||||
|
#else
|
||||||
size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
|
size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
|
||||||
begin = (uptr)__tls_get_addr(mod_and_off);
|
begin = (uptr)__tls_get_addr(mod_and_off);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
|
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
|
||||||
if (info->dlpi_phdr[i].p_type == PT_TLS) {
|
if (info->dlpi_phdr[i].p_type == PT_TLS) {
|
||||||
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
|
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
|
||||||
|
@ -427,12 +450,16 @@ static void GetTls(uptr *addr, uptr *size) {
|
||||||
*size = 0;
|
*size = 0;
|
||||||
}
|
}
|
||||||
#elif SANITIZER_GLIBC && defined(__x86_64__)
|
#elif SANITIZER_GLIBC && defined(__x86_64__)
|
||||||
// For x86-64, use an O(1) approach which requires precise
|
// For aarch64 and x86-64, use an O(1) approach which requires relatively
|
||||||
// ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
|
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
|
||||||
asm("mov %%fs:16,%0" : "=r"(*addr));
|
asm("mov %%fs:16,%0" : "=r"(*addr));
|
||||||
*size = g_tls_size;
|
*size = g_tls_size;
|
||||||
*addr -= *size;
|
*addr -= *size;
|
||||||
*addr += ThreadDescriptorSize();
|
*addr += ThreadDescriptorSize();
|
||||||
|
#elif SANITIZER_GLIBC && defined(__aarch64__)
|
||||||
|
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
|
||||||
|
ThreadDescriptorSize();
|
||||||
|
*size = g_tls_size + ThreadDescriptorSize();
|
||||||
#elif SANITIZER_GLIBC && defined(__powerpc64__)
|
#elif SANITIZER_GLIBC && defined(__powerpc64__)
|
||||||
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
|
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
|
||||||
uptr tp;
|
uptr tp;
|
||||||
|
@ -732,13 +759,9 @@ u32 GetNumberOfCPUs() {
|
||||||
#elif SANITIZER_SOLARIS
|
#elif SANITIZER_SOLARIS
|
||||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||||
#else
|
#else
|
||||||
#if defined(CPU_COUNT)
|
|
||||||
cpu_set_t CPUs;
|
cpu_set_t CPUs;
|
||||||
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
||||||
return CPU_COUNT(&CPUs);
|
return CPU_COUNT(&CPUs);
|
||||||
#else
|
|
||||||
return 1;
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
extern char **environ;
|
extern char **environ;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
|
#if defined(__has_include) && __has_include(<os/trace.h>)
|
||||||
#define SANITIZER_OS_TRACE 1
|
#define SANITIZER_OS_TRACE 1
|
||||||
#include <os/trace.h>
|
#include <os/trace.h>
|
||||||
#else
|
#else
|
||||||
|
@ -70,15 +70,7 @@ extern "C" {
|
||||||
#include <mach/mach_time.h>
|
#include <mach/mach_time.h>
|
||||||
#include <mach/vm_statistics.h>
|
#include <mach/vm_statistics.h>
|
||||||
#include <malloc/malloc.h>
|
#include <malloc/malloc.h>
|
||||||
#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
|
|
||||||
#include <os/log.h>
|
#include <os/log.h>
|
||||||
#else
|
|
||||||
/* Without support for __builtin_os_log_format, fall back to the older
|
|
||||||
method. */
|
|
||||||
# define OS_LOG_DEFAULT 0
|
|
||||||
# define os_log_error(A,B,C) \
|
|
||||||
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
|
|
||||||
#endif
|
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
|
@ -227,9 +219,7 @@ void internal__exit(int exitcode) {
|
||||||
_exit(exitcode);
|
_exit(exitcode);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int internal_sleep(unsigned int seconds) {
|
void internal_usleep(u64 useconds) { usleep(useconds); }
|
||||||
return sleep(seconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr internal_getpid() {
|
uptr internal_getpid() {
|
||||||
return getpid();
|
return getpid();
|
||||||
|
@ -519,6 +509,13 @@ void MprotectMallocZones(void *addr, int prot) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||||
|
// FIXME: implement actual blocking.
|
||||||
|
sched_yield();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count) {}
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
BlockingMutex::BlockingMutex() {
|
||||||
internal_memset(this, 0, sizeof(*this));
|
internal_memset(this, 0, sizeof(*this));
|
||||||
}
|
}
|
||||||
|
@ -534,7 +531,7 @@ void BlockingMutex::Unlock() {
|
||||||
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
void BlockingMutex::CheckLocked() const {
|
||||||
CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
|
CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,26 +14,6 @@
|
||||||
|
|
||||||
#include "sanitizer_common.h"
|
#include "sanitizer_common.h"
|
||||||
#include "sanitizer_platform.h"
|
#include "sanitizer_platform.h"
|
||||||
|
|
||||||
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
|
|
||||||
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
|
|
||||||
so there's no ambiguity). */
|
|
||||||
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
|
|
||||||
# define TARGET_OS_OSX 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
|
|
||||||
0 (we have no support for them; they are not valid targets anyway). */
|
|
||||||
#ifndef TARGET_OS_IOS
|
|
||||||
#define TARGET_OS_IOS 0
|
|
||||||
#endif
|
|
||||||
#ifndef TARGET_OS_TV
|
|
||||||
#define TARGET_OS_TV 0
|
|
||||||
#endif
|
|
||||||
#ifndef TARGET_OS_WATCH
|
|
||||||
#define TARGET_OS_WATCH 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SANITIZER_MAC
|
#if SANITIZER_MAC
|
||||||
#include "sanitizer_posix.h"
|
#include "sanitizer_posix.h"
|
||||||
|
|
||||||
|
|
39
libsanitizer/sanitizer_common/sanitizer_mutex.cpp
Normal file
39
libsanitizer/sanitizer_common/sanitizer_mutex.cpp
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
//===-- sanitizer_mutex.cpp -----------------------------------------------===//
|
||||||
|
//
|
||||||
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
|
// See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||||
|
// run-time libraries.
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "sanitizer_mutex.h"
|
||||||
|
|
||||||
|
#include "sanitizer_common.h"
|
||||||
|
|
||||||
|
namespace __sanitizer {
|
||||||
|
|
||||||
|
void Semaphore::Wait() {
|
||||||
|
u32 count = atomic_load(&state_, memory_order_relaxed);
|
||||||
|
for (;;) {
|
||||||
|
if (count == 0) {
|
||||||
|
FutexWait(&state_, 0);
|
||||||
|
count = atomic_load(&state_, memory_order_relaxed);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (atomic_compare_exchange_weak(&state_, &count, count - 1,
|
||||||
|
memory_order_acquire))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Semaphore::Post(u32 count) {
|
||||||
|
CHECK_NE(count, 0);
|
||||||
|
atomic_fetch_add(&state_, count, memory_order_release);
|
||||||
|
FutexWake(&state_, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __sanitizer
|
|
@ -16,30 +16,29 @@
|
||||||
#include "sanitizer_atomic.h"
|
#include "sanitizer_atomic.h"
|
||||||
#include "sanitizer_internal_defs.h"
|
#include "sanitizer_internal_defs.h"
|
||||||
#include "sanitizer_libc.h"
|
#include "sanitizer_libc.h"
|
||||||
|
#include "sanitizer_thread_safety.h"
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
class StaticSpinMutex {
|
class MUTEX StaticSpinMutex {
|
||||||
public:
|
public:
|
||||||
void Init() {
|
void Init() {
|
||||||
atomic_store(&state_, 0, memory_order_relaxed);
|
atomic_store(&state_, 0, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
void Lock() ACQUIRE() {
|
||||||
if (TryLock())
|
if (TryLock())
|
||||||
return;
|
return;
|
||||||
LockSlow();
|
LockSlow();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TryLock() {
|
bool TryLock() TRY_ACQUIRE(true) {
|
||||||
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
|
||||||
atomic_store(&state_, 0, memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CheckLocked() {
|
void CheckLocked() const CHECK_LOCKED() {
|
||||||
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,24 +58,223 @@ class StaticSpinMutex {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SpinMutex : public StaticSpinMutex {
|
class MUTEX SpinMutex : public StaticSpinMutex {
|
||||||
public:
|
public:
|
||||||
SpinMutex() {
|
SpinMutex() {
|
||||||
Init();
|
Init();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SpinMutex(const SpinMutex&);
|
SpinMutex(const SpinMutex &) = delete;
|
||||||
void operator=(const SpinMutex&);
|
void operator=(const SpinMutex &) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
class BlockingMutex {
|
// Semaphore provides an OS-dependent way to park/unpark threads.
|
||||||
|
// The last thread returned from Wait can destroy the object
|
||||||
|
// (destruction-safety).
|
||||||
|
class Semaphore {
|
||||||
|
public:
|
||||||
|
constexpr Semaphore() {}
|
||||||
|
Semaphore(const Semaphore &) = delete;
|
||||||
|
void operator=(const Semaphore &) = delete;
|
||||||
|
|
||||||
|
void Wait();
|
||||||
|
void Post(u32 count = 1);
|
||||||
|
|
||||||
|
private:
|
||||||
|
atomic_uint32_t state_ = {0};
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reader-writer mutex.
|
||||||
|
class MUTEX Mutex2 {
|
||||||
|
public:
|
||||||
|
constexpr Mutex2() {}
|
||||||
|
|
||||||
|
void Lock() ACQUIRE() {
|
||||||
|
u64 reset_mask = ~0ull;
|
||||||
|
u64 state = atomic_load_relaxed(&state_);
|
||||||
|
const uptr kMaxSpinIters = 1500;
|
||||||
|
for (uptr spin_iters = 0;; spin_iters++) {
|
||||||
|
u64 new_state;
|
||||||
|
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
|
||||||
|
if (LIKELY(!locked)) {
|
||||||
|
// The mutex is not read-/write-locked, try to lock.
|
||||||
|
new_state = (state | kWriterLock) & reset_mask;
|
||||||
|
} else if (spin_iters > kMaxSpinIters) {
|
||||||
|
// We've spun enough, increment waiting writers count and block.
|
||||||
|
// The counter will be decremented by whoever wakes us.
|
||||||
|
new_state = (state + kWaitingWriterInc) & reset_mask;
|
||||||
|
} else if ((state & kWriterSpinWait) == 0) {
|
||||||
|
// Active spinning, but denote our presence so that unlocking
|
||||||
|
// thread does not wake up other threads.
|
||||||
|
new_state = state | kWriterSpinWait;
|
||||||
|
} else {
|
||||||
|
// Active spinning.
|
||||||
|
state = atomic_load(&state_, memory_order_relaxed);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
||||||
|
memory_order_acquire)))
|
||||||
|
continue;
|
||||||
|
if (LIKELY(!locked))
|
||||||
|
return; // We've locked the mutex.
|
||||||
|
if (spin_iters > kMaxSpinIters) {
|
||||||
|
// We've incremented waiting writers, so now block.
|
||||||
|
writers_.Wait();
|
||||||
|
spin_iters = 0;
|
||||||
|
state = atomic_load(&state_, memory_order_relaxed);
|
||||||
|
DCHECK_NE(state & kWriterSpinWait, 0);
|
||||||
|
} else {
|
||||||
|
// We've set kWriterSpinWait, but we are still in active spinning.
|
||||||
|
}
|
||||||
|
// We either blocked and were unblocked,
|
||||||
|
// or we just spun but set kWriterSpinWait.
|
||||||
|
// Either way we need to reset kWriterSpinWait
|
||||||
|
// next time we take the lock or block again.
|
||||||
|
reset_mask = ~kWriterSpinWait;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unlock() RELEASE() {
|
||||||
|
bool wake_writer;
|
||||||
|
u64 wake_readers;
|
||||||
|
u64 new_state;
|
||||||
|
u64 state = atomic_load_relaxed(&state_);
|
||||||
|
do {
|
||||||
|
DCHECK_NE(state & kWriterLock, 0);
|
||||||
|
DCHECK_EQ(state & kReaderLockMask, 0);
|
||||||
|
new_state = state & ~kWriterLock;
|
||||||
|
wake_writer =
|
||||||
|
(state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
|
||||||
|
if (wake_writer)
|
||||||
|
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
|
||||||
|
wake_readers =
|
||||||
|
(state & (kWriterSpinWait | kWaitingWriterMask)) != 0
|
||||||
|
? 0
|
||||||
|
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
|
||||||
|
if (wake_readers)
|
||||||
|
new_state = (new_state & ~kWaitingReaderMask) +
|
||||||
|
(wake_readers << kReaderLockShift);
|
||||||
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
||||||
|
memory_order_release)));
|
||||||
|
if (UNLIKELY(wake_writer))
|
||||||
|
writers_.Post();
|
||||||
|
else if (UNLIKELY(wake_readers))
|
||||||
|
readers_.Post(wake_readers);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadLock() ACQUIRE_SHARED() {
|
||||||
|
bool locked;
|
||||||
|
u64 new_state;
|
||||||
|
u64 state = atomic_load_relaxed(&state_);
|
||||||
|
do {
|
||||||
|
locked =
|
||||||
|
(state & kReaderLockMask) == 0 &&
|
||||||
|
(state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
|
||||||
|
if (LIKELY(!locked))
|
||||||
|
new_state = state + kReaderLockInc;
|
||||||
|
else
|
||||||
|
new_state = state + kWaitingReaderInc;
|
||||||
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
||||||
|
memory_order_acquire)));
|
||||||
|
if (UNLIKELY(locked))
|
||||||
|
readers_.Wait();
|
||||||
|
DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
|
||||||
|
DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadUnlock() RELEASE_SHARED() {
|
||||||
|
bool wake;
|
||||||
|
u64 new_state;
|
||||||
|
u64 state = atomic_load_relaxed(&state_);
|
||||||
|
do {
|
||||||
|
DCHECK_NE(state & kReaderLockMask, 0);
|
||||||
|
DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
|
||||||
|
new_state = state - kReaderLockInc;
|
||||||
|
wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
|
||||||
|
(new_state & kWaitingWriterMask) != 0;
|
||||||
|
if (wake)
|
||||||
|
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
|
||||||
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
||||||
|
memory_order_release)));
|
||||||
|
if (UNLIKELY(wake))
|
||||||
|
writers_.Post();
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function does not guarantee an explicit check that the calling thread
|
||||||
|
// is the thread which owns the mutex. This behavior, while more strictly
|
||||||
|
// correct, causes problems in cases like StopTheWorld, where a parent thread
|
||||||
|
// owns the mutex but a child checks that it is locked. Rather than
|
||||||
|
// maintaining complex state to work around those situations, the check only
|
||||||
|
// checks that the mutex is owned.
|
||||||
|
void CheckWriteLocked() const CHECK_LOCKED() {
|
||||||
|
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
|
||||||
|
|
||||||
|
void CheckReadLocked() const CHECK_LOCKED() {
|
||||||
|
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
atomic_uint64_t state_ = {0};
|
||||||
|
Semaphore writers_;
|
||||||
|
Semaphore readers_;
|
||||||
|
|
||||||
|
// The state has 3 counters:
|
||||||
|
// - number of readers holding the lock,
|
||||||
|
// if non zero, the mutex is read-locked
|
||||||
|
// - number of waiting readers,
|
||||||
|
// if not zero, the mutex is write-locked
|
||||||
|
// - number of waiting writers,
|
||||||
|
// if non zero, the mutex is read- or write-locked
|
||||||
|
// And 2 flags:
|
||||||
|
// - writer lock
|
||||||
|
// if set, the mutex is write-locked
|
||||||
|
// - a writer is awake and spin-waiting
|
||||||
|
// the flag is used to prevent thundering herd problem
|
||||||
|
// (new writers are not woken if this flag is set)
|
||||||
|
//
|
||||||
|
// Writer support active spinning, readers does not.
|
||||||
|
// But readers are more aggressive and always take the mutex
|
||||||
|
// if there are any other readers.
|
||||||
|
// Writers hand off the mutex to readers: after wake up readers
|
||||||
|
// already assume ownership of the mutex (don't need to do any
|
||||||
|
// state updates). But the mutex is not handed off to writers,
|
||||||
|
// after wake up writers compete to lock the mutex again.
|
||||||
|
// This is needed to allow repeated write locks even in presence
|
||||||
|
// of other blocked writers.
|
||||||
|
static constexpr u64 kCounterWidth = 20;
|
||||||
|
static constexpr u64 kReaderLockShift = 0;
|
||||||
|
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
|
||||||
|
static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
|
||||||
|
<< kReaderLockShift;
|
||||||
|
static constexpr u64 kWaitingReaderShift = kCounterWidth;
|
||||||
|
static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
|
||||||
|
static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
|
||||||
|
<< kWaitingReaderShift;
|
||||||
|
static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
|
||||||
|
static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
|
||||||
|
static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
|
||||||
|
<< kWaitingWriterShift;
|
||||||
|
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
|
||||||
|
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
|
||||||
|
|
||||||
|
Mutex2(const Mutex2 &) = delete;
|
||||||
|
void operator=(const Mutex2 &) = delete;
|
||||||
|
};
|
||||||
|
|
||||||
|
void FutexWait(atomic_uint32_t *p, u32 cmp);
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count);
|
||||||
|
|
||||||
|
class MUTEX BlockingMutex {
|
||||||
public:
|
public:
|
||||||
explicit constexpr BlockingMutex(LinkerInitialized)
|
explicit constexpr BlockingMutex(LinkerInitialized)
|
||||||
: opaque_storage_ {0, }, owner_ {0} {}
|
: opaque_storage_ {0, }, owner_ {0} {}
|
||||||
BlockingMutex();
|
BlockingMutex();
|
||||||
void Lock();
|
void Lock() ACQUIRE();
|
||||||
void Unlock();
|
void Unlock() RELEASE();
|
||||||
|
|
||||||
// This function does not guarantee an explicit check that the calling thread
|
// This function does not guarantee an explicit check that the calling thread
|
||||||
// is the thread which owns the mutex. This behavior, while more strictly
|
// is the thread which owns the mutex. This behavior, while more strictly
|
||||||
|
@ -85,7 +283,7 @@ class BlockingMutex {
|
||||||
// maintaining complex state to work around those situations, the check only
|
// maintaining complex state to work around those situations, the check only
|
||||||
// checks that the mutex is owned, and assumes callers to be generally
|
// checks that the mutex is owned, and assumes callers to be generally
|
||||||
// well-behaved.
|
// well-behaved.
|
||||||
void CheckLocked();
|
void CheckLocked() const CHECK_LOCKED();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Solaris mutex_t has a member that requires 64-bit alignment.
|
// Solaris mutex_t has a member that requires 64-bit alignment.
|
||||||
|
@ -94,7 +292,7 @@ class BlockingMutex {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Reader-writer spin mutex.
|
// Reader-writer spin mutex.
|
||||||
class RWMutex {
|
class MUTEX RWMutex {
|
||||||
public:
|
public:
|
||||||
RWMutex() {
|
RWMutex() {
|
||||||
atomic_store(&state_, kUnlocked, memory_order_relaxed);
|
atomic_store(&state_, kUnlocked, memory_order_relaxed);
|
||||||
|
@ -104,7 +302,7 @@ class RWMutex {
|
||||||
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
void Lock() ACQUIRE() {
|
||||||
u32 cmp = kUnlocked;
|
u32 cmp = kUnlocked;
|
||||||
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
|
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
|
||||||
memory_order_acquire))
|
memory_order_acquire))
|
||||||
|
@ -112,27 +310,27 @@ class RWMutex {
|
||||||
LockSlow();
|
LockSlow();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
void Unlock() RELEASE() {
|
||||||
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
|
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
|
||||||
DCHECK_NE(prev & kWriteLock, 0);
|
DCHECK_NE(prev & kWriteLock, 0);
|
||||||
(void)prev;
|
(void)prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadLock() {
|
void ReadLock() ACQUIRE_SHARED() {
|
||||||
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
|
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
|
||||||
if ((prev & kWriteLock) == 0)
|
if ((prev & kWriteLock) == 0)
|
||||||
return;
|
return;
|
||||||
ReadLockSlow();
|
ReadLockSlow();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadUnlock() {
|
void ReadUnlock() RELEASE_SHARED() {
|
||||||
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
|
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
|
||||||
DCHECK_EQ(prev & kWriteLock, 0);
|
DCHECK_EQ(prev & kWriteLock, 0);
|
||||||
DCHECK_GT(prev & ~kWriteLock, 0);
|
DCHECK_GT(prev & ~kWriteLock, 0);
|
||||||
(void)prev;
|
(void)prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheckLocked() {
|
void CheckLocked() const CHECK_LOCKED() {
|
||||||
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,46 +369,40 @@ class RWMutex {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RWMutex(const RWMutex&);
|
RWMutex(const RWMutex &) = delete;
|
||||||
void operator = (const RWMutex&);
|
void operator=(const RWMutex &) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename MutexType>
|
template <typename MutexType>
|
||||||
class GenericScopedLock {
|
class SCOPED_LOCK GenericScopedLock {
|
||||||
public:
|
public:
|
||||||
explicit GenericScopedLock(MutexType *mu)
|
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
||||||
: mu_(mu) {
|
|
||||||
mu_->Lock();
|
mu_->Lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
~GenericScopedLock() {
|
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
|
||||||
mu_->Unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MutexType *mu_;
|
MutexType *mu_;
|
||||||
|
|
||||||
GenericScopedLock(const GenericScopedLock&);
|
GenericScopedLock(const GenericScopedLock &) = delete;
|
||||||
void operator=(const GenericScopedLock&);
|
void operator=(const GenericScopedLock &) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename MutexType>
|
template <typename MutexType>
|
||||||
class GenericScopedReadLock {
|
class SCOPED_LOCK GenericScopedReadLock {
|
||||||
public:
|
public:
|
||||||
explicit GenericScopedReadLock(MutexType *mu)
|
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
||||||
: mu_(mu) {
|
|
||||||
mu_->ReadLock();
|
mu_->ReadLock();
|
||||||
}
|
}
|
||||||
|
|
||||||
~GenericScopedReadLock() {
|
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
|
||||||
mu_->ReadUnlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MutexType *mu_;
|
MutexType *mu_;
|
||||||
|
|
||||||
GenericScopedReadLock(const GenericScopedReadLock&);
|
GenericScopedReadLock(const GenericScopedReadLock &) = delete;
|
||||||
void operator=(const GenericScopedReadLock&);
|
void operator=(const GenericScopedReadLock &) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
||||||
|
|
|
@ -215,15 +215,12 @@ void internal__exit(int exitcode) {
|
||||||
Die(); // Unreachable.
|
Die(); // Unreachable.
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int internal_sleep(unsigned int seconds) {
|
void internal_usleep(u64 useconds) {
|
||||||
struct timespec ts;
|
struct timespec ts;
|
||||||
ts.tv_sec = seconds;
|
ts.tv_sec = useconds / 1000000;
|
||||||
ts.tv_nsec = 0;
|
ts.tv_nsec = (useconds % 1000000) * 1000;
|
||||||
CHECK(&_sys___nanosleep50);
|
CHECK(&_sys___nanosleep50);
|
||||||
int res = _sys___nanosleep50(&ts, &ts);
|
_sys___nanosleep50(&ts, &ts);
|
||||||
if (res)
|
|
||||||
return ts.tv_sec;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr internal_execve(const char *filename, char *const argv[],
|
uptr internal_execve(const char *filename, char *const argv[],
|
||||||
|
|
|
@ -13,8 +13,7 @@
|
||||||
#define SANITIZER_PLATFORM_H
|
#define SANITIZER_PLATFORM_H
|
||||||
|
|
||||||
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
|
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
|
||||||
!defined(__APPLE__) && !defined(_WIN32) && \
|
!defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \
|
||||||
!defined(__Fuchsia__) && !defined(__rtems__) && \
|
|
||||||
!(defined(__sun__) && defined(__svr4__))
|
!(defined(__sun__) && defined(__svr4__))
|
||||||
# error "This operating system is not supported"
|
# error "This operating system is not supported"
|
||||||
#endif
|
#endif
|
||||||
|
@ -117,12 +116,6 @@
|
||||||
# define SANITIZER_FUCHSIA 0
|
# define SANITIZER_FUCHSIA 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__rtems__)
|
|
||||||
# define SANITIZER_RTEMS 1
|
|
||||||
#else
|
|
||||||
# define SANITIZER_RTEMS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SANITIZER_POSIX \
|
#define SANITIZER_POSIX \
|
||||||
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
|
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
|
||||||
SANITIZER_NETBSD || SANITIZER_SOLARIS)
|
SANITIZER_NETBSD || SANITIZER_SOLARIS)
|
||||||
|
@ -226,12 +219,6 @@
|
||||||
# define SANITIZER_SOLARIS32 0
|
# define SANITIZER_SOLARIS32 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__myriad2__)
|
|
||||||
# define SANITIZER_MYRIAD2 1
|
|
||||||
#else
|
|
||||||
# define SANITIZER_MYRIAD2 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__riscv) && (__riscv_xlen == 64)
|
#if defined(__riscv) && (__riscv_xlen == 64)
|
||||||
#define SANITIZER_RISCV64 1
|
#define SANITIZER_RISCV64 1
|
||||||
#else
|
#else
|
||||||
|
@ -374,8 +361,8 @@
|
||||||
# define SANITIZER_CACHE_LINE_SIZE 64
|
# define SANITIZER_CACHE_LINE_SIZE 64
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Enable offline markup symbolizer for Fuchsia and RTEMS.
|
// Enable offline markup symbolizer for Fuchsia.
|
||||||
#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
|
#if SANITIZER_FUCHSIA
|
||||||
# define SANITIZER_SYMBOLIZER_MARKUP 1
|
# define SANITIZER_SYMBOLIZER_MARKUP 1
|
||||||
#else
|
#else
|
||||||
#define SANITIZER_SYMBOLIZER_MARKUP 0
|
#define SANITIZER_SYMBOLIZER_MARKUP 0
|
||||||
|
|
|
@ -114,12 +114,6 @@
|
||||||
#define SI_NOT_FUCHSIA 1
|
#define SI_NOT_FUCHSIA 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
#define SI_NOT_RTEMS 0
|
|
||||||
#else
|
|
||||||
#define SI_NOT_RTEMS 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SANITIZER_SOLARIS
|
#if SANITIZER_SOLARIS
|
||||||
#define SI_SOLARIS 1
|
#define SI_SOLARIS 1
|
||||||
#else
|
#else
|
||||||
|
@ -482,13 +476,12 @@
|
||||||
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
|
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
|
||||||
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
|
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
|
||||||
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
|
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
|
||||||
#define SANITIZER_INTERCEPT_MEMALIGN \
|
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
|
||||||
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
|
|
||||||
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
|
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
|
||||||
#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
|
#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
|
||||||
#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
|
#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
|
||||||
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
|
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
|
||||||
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
|
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
|
||||||
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
|
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
|
||||||
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
|
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
|
||||||
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
|
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
|
||||||
|
@ -584,6 +577,7 @@
|
||||||
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
|
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
|
||||||
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
|
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
|
||||||
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
|
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
|
||||||
|
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
|
||||||
|
|
||||||
// This macro gives a way for downstream users to override the above
|
// This macro gives a way for downstream users to override the above
|
||||||
// interceptor macros irrespective of the platform they are on. They have
|
// interceptor macros irrespective of the platform they are on. They have
|
||||||
|
|
|
@ -26,12 +26,9 @@
|
||||||
|
|
||||||
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
||||||
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
||||||
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
|
// fine with newer headers, too.
|
||||||
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
|
|
||||||
// Also, for some platforms (e.g. mips) there are additional members in the
|
|
||||||
// <sys/stat.h> struct stat:s.
|
|
||||||
#include <linux/posix_types.h>
|
#include <linux/posix_types.h>
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__) || defined(__mips__)
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#else
|
#else
|
||||||
#define ino_t __kernel_ino_t
|
#define ino_t __kernel_ino_t
|
||||||
|
|
|
@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
|
||||||
#elif defined(__mips__)
|
#elif defined(__mips__)
|
||||||
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
|
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
|
||||||
? FIRST_32_SECOND_64(104, 128)
|
? FIRST_32_SECOND_64(104, 128)
|
||||||
: FIRST_32_SECOND_64(144, 216);
|
: FIRST_32_SECOND_64(160, 216);
|
||||||
const unsigned struct_kernel_stat64_sz = 104;
|
const unsigned struct_kernel_stat64_sz = 104;
|
||||||
#elif defined(__s390__) && !defined(__s390x__)
|
#elif defined(__s390__) && !defined(__s390x__)
|
||||||
const unsigned struct_kernel_stat_sz = 64;
|
const unsigned struct_kernel_stat_sz = 64;
|
||||||
|
@ -650,15 +650,15 @@ struct __sanitizer_sigaction {
|
||||||
#endif // !SANITIZER_ANDROID
|
#endif // !SANITIZER_ANDROID
|
||||||
|
|
||||||
#if defined(__mips__)
|
#if defined(__mips__)
|
||||||
struct __sanitizer_kernel_sigset_t {
|
#define __SANITIZER_KERNEL_NSIG 128
|
||||||
uptr sig[2];
|
|
||||||
};
|
|
||||||
#else
|
#else
|
||||||
struct __sanitizer_kernel_sigset_t {
|
#define __SANITIZER_KERNEL_NSIG 64
|
||||||
u8 sig[8];
|
|
||||||
};
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct __sanitizer_kernel_sigset_t {
|
||||||
|
uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)];
|
||||||
|
};
|
||||||
|
|
||||||
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
|
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
|
||||||
#if SANITIZER_MIPS
|
#if SANITIZER_MIPS
|
||||||
struct __sanitizer_kernel_sigaction_t {
|
struct __sanitizer_kernel_sigaction_t {
|
||||||
|
|
|
@ -128,14 +128,6 @@ void SetAddressSpaceUnlimited() {
|
||||||
CHECK(AddressSpaceIsUnlimited());
|
CHECK(AddressSpaceIsUnlimited());
|
||||||
}
|
}
|
||||||
|
|
||||||
void SleepForSeconds(int seconds) {
|
|
||||||
sleep(seconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SleepForMillis(int millis) {
|
|
||||||
usleep(millis * 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Abort() {
|
void Abort() {
|
||||||
#if !SANITIZER_GO
|
#if !SANITIZER_GO
|
||||||
// If we are handling SIGABRT, unhandle it first.
|
// If we are handling SIGABRT, unhandle it first.
|
||||||
|
@ -166,9 +158,10 @@ bool SupportsColoredOutput(fd_t fd) {
|
||||||
#if !SANITIZER_GO
|
#if !SANITIZER_GO
|
||||||
// TODO(glider): different tools may require different altstack size.
|
// TODO(glider): different tools may require different altstack size.
|
||||||
static uptr GetAltStackSize() {
|
static uptr GetAltStackSize() {
|
||||||
// SIGSTKSZ is not enough.
|
// Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
|
||||||
static const uptr kAltStackSize = SIGSTKSZ * 4;
|
// more costly that you think. However GetAltStackSize is only call 2-3 times
|
||||||
return kAltStackSize;
|
// per thread so don't cache the evaluation.
|
||||||
|
return SIGSTKSZ * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetAlternateSignalStack() {
|
void SetAlternateSignalStack() {
|
||||||
|
|
|
@ -20,6 +20,10 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
|
|
||||||
|
#if defined(__x86_64__)
|
||||||
|
# include <emmintrin.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
|
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
|
||||||
!defined(va_copy)
|
!defined(va_copy)
|
||||||
# define va_copy(dst, src) ((dst) = (src))
|
# define va_copy(dst, src) ((dst) = (src))
|
||||||
|
@ -128,7 +132,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
|
||||||
int VSNPrintf(char *buff, int buff_length,
|
int VSNPrintf(char *buff, int buff_length,
|
||||||
const char *format, va_list args) {
|
const char *format, va_list args) {
|
||||||
static const char *kPrintfFormatsHelp =
|
static const char *kPrintfFormatsHelp =
|
||||||
"Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
|
"Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
|
||||||
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
|
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
|
||||||
RAW_CHECK(format);
|
RAW_CHECK(format);
|
||||||
RAW_CHECK(buff_length > 0);
|
RAW_CHECK(buff_length > 0);
|
||||||
|
@ -162,15 +166,13 @@ int VSNPrintf(char *buff, int buff_length,
|
||||||
cur += have_z;
|
cur += have_z;
|
||||||
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
|
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
|
||||||
cur += have_ll * 2;
|
cur += have_ll * 2;
|
||||||
s64 dval;
|
|
||||||
u64 uval;
|
|
||||||
const bool have_length = have_z || have_ll;
|
const bool have_length = have_z || have_ll;
|
||||||
const bool have_flags = have_width || have_length;
|
const bool have_flags = have_width || have_length;
|
||||||
// At the moment only %s supports precision and left-justification.
|
// At the moment only %s supports precision and left-justification.
|
||||||
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
|
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
|
||||||
switch (*cur) {
|
switch (*cur) {
|
||||||
case 'd': {
|
case 'd': {
|
||||||
dval = have_ll ? va_arg(args, s64)
|
s64 dval = have_ll ? va_arg(args, s64)
|
||||||
: have_z ? va_arg(args, sptr)
|
: have_z ? va_arg(args, sptr)
|
||||||
: va_arg(args, int);
|
: va_arg(args, int);
|
||||||
result += AppendSignedDecimal(&buff, buff_end, dval, width,
|
result += AppendSignedDecimal(&buff, buff_end, dval, width,
|
||||||
|
@ -180,7 +182,7 @@ int VSNPrintf(char *buff, int buff_length,
|
||||||
case 'u':
|
case 'u':
|
||||||
case 'x':
|
case 'x':
|
||||||
case 'X': {
|
case 'X': {
|
||||||
uval = have_ll ? va_arg(args, u64)
|
u64 uval = have_ll ? va_arg(args, u64)
|
||||||
: have_z ? va_arg(args, uptr)
|
: have_z ? va_arg(args, uptr)
|
||||||
: va_arg(args, unsigned);
|
: va_arg(args, unsigned);
|
||||||
bool uppercase = (*cur == 'X');
|
bool uppercase = (*cur == 'X');
|
||||||
|
@ -188,6 +190,13 @@ int VSNPrintf(char *buff, int buff_length,
|
||||||
width, pad_with_zero, uppercase);
|
width, pad_with_zero, uppercase);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 'V': {
|
||||||
|
for (uptr i = 0; i < 16; i++) {
|
||||||
|
unsigned x = va_arg(args, unsigned);
|
||||||
|
result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
case 'p': {
|
case 'p': {
|
||||||
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
|
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
|
||||||
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
|
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
|
||||||
|
|
|
@ -149,7 +149,8 @@ class Quarantine {
|
||||||
Cache cache_;
|
Cache cache_;
|
||||||
char pad2_[kCacheLineSize];
|
char pad2_[kCacheLineSize];
|
||||||
|
|
||||||
void NOINLINE Recycle(uptr min_size, Callback cb) {
|
void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
|
||||||
|
RELEASE(recycle_mutex_) {
|
||||||
Cache tmp;
|
Cache tmp;
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&cache_mutex_);
|
SpinMutexLock l(&cache_mutex_);
|
||||||
|
|
|
@ -1,281 +0,0 @@
|
||||||
//===-- sanitizer_rtems.cpp -----------------------------------------------===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is shared between various sanitizers' runtime libraries and
|
|
||||||
// implements RTEMS-specific functions.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "sanitizer_rtems.h"
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
|
|
||||||
#define posix_memalign __real_posix_memalign
|
|
||||||
#define free __real_free
|
|
||||||
#define memset __real_memset
|
|
||||||
|
|
||||||
#include "sanitizer_file.h"
|
|
||||||
#include "sanitizer_symbolizer.h"
|
|
||||||
#include <errno.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
// There is no mmap on RTEMS. Use memalign, etc.
|
|
||||||
#define __mmap_alloc_aligned posix_memalign
|
|
||||||
#define __mmap_free free
|
|
||||||
#define __mmap_memset memset
|
|
||||||
|
|
||||||
namespace __sanitizer {
|
|
||||||
|
|
||||||
#include "sanitizer_syscall_generic.inc"
|
|
||||||
|
|
||||||
void NORETURN internal__exit(int exitcode) {
|
|
||||||
_exit(exitcode);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr internal_sched_yield() {
|
|
||||||
return sched_yield();
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr internal_getpid() {
|
|
||||||
return getpid();
|
|
||||||
}
|
|
||||||
|
|
||||||
int internal_dlinfo(void *handle, int request, void *p) {
|
|
||||||
UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FileExists(const char *filename) {
|
|
||||||
struct stat st;
|
|
||||||
if (stat(filename, &st))
|
|
||||||
return false;
|
|
||||||
// Sanity check: filename is a regular file.
|
|
||||||
return S_ISREG(st.st_mode);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }
|
|
||||||
|
|
||||||
tid_t GetTid() { return GetThreadSelf(); }
|
|
||||||
|
|
||||||
void Abort() { abort(); }
|
|
||||||
|
|
||||||
int Atexit(void (*function)(void)) { return atexit(function); }
|
|
||||||
|
|
||||||
void SleepForSeconds(int seconds) { sleep(seconds); }
|
|
||||||
|
|
||||||
void SleepForMillis(int millis) { usleep(millis * 1000); }
|
|
||||||
|
|
||||||
bool SupportsColoredOutput(fd_t fd) { return false; }
|
|
||||||
|
|
||||||
void GetThreadStackTopAndBottom(bool at_initialization,
|
|
||||||
uptr *stack_top, uptr *stack_bottom) {
|
|
||||||
pthread_attr_t attr;
|
|
||||||
pthread_attr_init(&attr);
|
|
||||||
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
|
|
||||||
void *base = nullptr;
|
|
||||||
size_t size = 0;
|
|
||||||
CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
|
|
||||||
CHECK_EQ(pthread_attr_destroy(&attr), 0);
|
|
||||||
|
|
||||||
*stack_bottom = reinterpret_cast<uptr>(base);
|
|
||||||
*stack_top = *stack_bottom + size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
|
|
||||||
uptr *tls_addr, uptr *tls_size) {
|
|
||||||
uptr stack_top, stack_bottom;
|
|
||||||
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
|
|
||||||
*stk_addr = stack_bottom;
|
|
||||||
*stk_size = stack_top - stack_bottom;
|
|
||||||
*tls_addr = *tls_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializePlatformEarly() {}
|
|
||||||
void MaybeReexec() {}
|
|
||||||
void CheckASLR() {}
|
|
||||||
void CheckMPROTECT() {}
|
|
||||||
void DisableCoreDumperIfNecessary() {}
|
|
||||||
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
|
|
||||||
void SetAlternateSignalStack() {}
|
|
||||||
void UnsetAlternateSignalStack() {}
|
|
||||||
void InitTlsSize() {}
|
|
||||||
|
|
||||||
void SignalContext::DumpAllRegisters(void *context) {}
|
|
||||||
const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
|
|
||||||
|
|
||||||
enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
|
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
|
||||||
internal_memset(this, 0, sizeof(*this));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlockingMutex::Lock() {
|
|
||||||
CHECK_EQ(owner_, 0);
|
|
||||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
||||||
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
|
|
||||||
return;
|
|
||||||
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
|
|
||||||
internal_sched_yield();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlockingMutex::Unlock() {
|
|
||||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
||||||
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
|
|
||||||
CHECK_NE(v, MtxUnlocked);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
|
||||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
||||||
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr GetPageSize() { return getpagesize(); }
|
|
||||||
|
|
||||||
uptr GetMmapGranularity() { return GetPageSize(); }
|
|
||||||
|
|
||||||
uptr GetMaxVirtualAddress() {
|
|
||||||
return (1ULL << 32) - 1; // 0xffffffff
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
|
|
||||||
void* ptr = 0;
|
|
||||||
int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
|
|
||||||
if (UNLIKELY(res))
|
|
||||||
ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report);
|
|
||||||
__mmap_memset(ptr, 0, size);
|
|
||||||
IncreaseTotalMmap(size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
|
||||||
void* ptr = 0;
|
|
||||||
int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
|
|
||||||
if (UNLIKELY(res)) {
|
|
||||||
if (res == ENOMEM)
|
|
||||||
return nullptr;
|
|
||||||
ReportMmapFailureAndDie(size, mem_type, "allocate", false);
|
|
||||||
}
|
|
||||||
__mmap_memset(ptr, 0, size);
|
|
||||||
IncreaseTotalMmap(size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
|
||||||
const char *mem_type) {
|
|
||||||
CHECK(IsPowerOfTwo(size));
|
|
||||||
CHECK(IsPowerOfTwo(alignment));
|
|
||||||
void* ptr = 0;
|
|
||||||
int res = __mmap_alloc_aligned(&ptr, alignment, size);
|
|
||||||
if (res)
|
|
||||||
ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false);
|
|
||||||
__mmap_memset(ptr, 0, size);
|
|
||||||
IncreaseTotalMmap(size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
|
|
||||||
return MmapOrDie(size, mem_type, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnmapOrDie(void *addr, uptr size) {
|
|
||||||
if (!addr || !size) return;
|
|
||||||
__mmap_free(addr);
|
|
||||||
DecreaseTotalMmap(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
|
|
||||||
int flags;
|
|
||||||
switch (mode) {
|
|
||||||
case RdOnly: flags = O_RDONLY; break;
|
|
||||||
case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
|
|
||||||
case RdWr: flags = O_RDWR | O_CREAT; break;
|
|
||||||
}
|
|
||||||
fd_t res = open(filename, flags, 0660);
|
|
||||||
if (internal_iserror(res, errno_p))
|
|
||||||
return kInvalidFd;
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CloseFile(fd_t fd) {
|
|
||||||
close(fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
|
|
||||||
error_t *error_p) {
|
|
||||||
uptr res = read(fd, buff, buff_size);
|
|
||||||
if (internal_iserror(res, error_p))
|
|
||||||
return false;
|
|
||||||
if (bytes_read)
|
|
||||||
*bytes_read = res;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
|
|
||||||
error_t *error_p) {
|
|
||||||
uptr res = write(fd, buff, buff_size);
|
|
||||||
if (internal_iserror(res, error_p))
|
|
||||||
return false;
|
|
||||||
if (bytes_written)
|
|
||||||
*bytes_written = res;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
|
|
||||||
void DumpProcessMap() {}
|
|
||||||
|
|
||||||
// There is no page protection so everything is "accessible."
|
|
||||||
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
char **GetArgv() { return nullptr; }
|
|
||||||
char **GetEnviron() { return nullptr; }
|
|
||||||
|
|
||||||
const char *GetEnv(const char *name) {
|
|
||||||
return getenv(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
|
|
||||||
internal_strncpy(buf, "StubBinaryName", buf_len);
|
|
||||||
return internal_strlen(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
|
|
||||||
internal_strncpy(buf, "StubProcessName", buf_len);
|
|
||||||
return internal_strlen(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsPathSeparator(const char c) {
|
|
||||||
return c == '/';
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsAbsolutePath(const char *path) {
|
|
||||||
return path != nullptr && IsPathSeparator(path[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReportFile::Write(const char *buffer, uptr length) {
|
|
||||||
SpinMutexLock l(mu);
|
|
||||||
static const char *kWriteError =
|
|
||||||
"ReportFile::Write() can't output requested buffer!\n";
|
|
||||||
ReopenIfNecessary();
|
|
||||||
if (length != write(fd, buffer, length)) {
|
|
||||||
write(fd, kWriteError, internal_strlen(kWriteError));
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr MainThreadStackBase, MainThreadStackSize;
|
|
||||||
uptr MainThreadTlsBase, MainThreadTlsSize;
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
|
||||||
|
|
||||||
#endif // SANITIZER_RTEMS
|
|
|
@ -1,20 +0,0 @@
|
||||||
//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is shared between various sanitizers' runtime libraries and
|
|
||||||
// provides definitions for RTEMS-specific functions.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
#ifndef SANITIZER_RTEMS_H
|
|
||||||
#define SANITIZER_RTEMS_H
|
|
||||||
|
|
||||||
#include "sanitizer_platform.h"
|
|
||||||
#if SANITIZER_RTEMS
|
|
||||||
#include "sanitizer_common.h"
|
|
||||||
|
|
||||||
#endif // SANITIZER_RTEMS
|
|
||||||
#endif // SANITIZER_RTEMS_H
|
|
|
@ -160,6 +160,13 @@ DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
|
||||||
return sched_yield();
|
return sched_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DECLARE__REAL_AND_INTERNAL(void, usleep, u64 useconds) {
|
||||||
|
struct timespec ts;
|
||||||
|
ts.tv_sec = useconds / 1000000;
|
||||||
|
ts.tv_nsec = (useconds % 1000000) * 1000;
|
||||||
|
nanosleep(&ts, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
|
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
|
||||||
char *const argv[], char *const envp[]) {
|
char *const argv[], char *const envp[]) {
|
||||||
return _REAL(execve)(filename, argv, envp);
|
return _REAL(execve)(filename, argv, envp);
|
||||||
|
@ -211,6 +218,13 @@ uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------- sanitizer_common.h
|
// ----------------- sanitizer_common.h
|
||||||
|
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||||
|
// FIXME: implement actual blocking.
|
||||||
|
sched_yield();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count) {}
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
BlockingMutex::BlockingMutex() {
|
||||||
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
|
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
|
||||||
internal_memset(this, 0, sizeof(*this));
|
internal_memset(this, 0, sizeof(*this));
|
||||||
|
@ -231,9 +245,7 @@ void BlockingMutex::Unlock() {
|
||||||
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
|
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
|
||||||
CHECK_EQ((uptr)thr_self(), owner_);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
|
||||||
// Nope, this does not look right either. This means the frame after next does
|
// Nope, this does not look right either. This means the frame after next does
|
||||||
// not have a valid frame pointer, but we can still extract the caller PC.
|
// not have a valid frame pointer, but we can still extract the caller PC.
|
||||||
// Unfortunately, there is no way to decide between GCC and LLVM frame
|
// Unfortunately, there is no way to decide between GCC and LLVM frame
|
||||||
// layouts. Assume GCC.
|
// layouts. Assume LLVM.
|
||||||
return bp_prev - 1;
|
return bp_prev;
|
||||||
#else
|
#else
|
||||||
return (uhwptr*)bp;
|
return (uhwptr*)bp;
|
||||||
#endif
|
#endif
|
||||||
|
@ -109,21 +109,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
|
||||||
IsAligned((uptr)frame, sizeof(*frame)) &&
|
IsAligned((uptr)frame, sizeof(*frame)) &&
|
||||||
size < max_depth) {
|
size < max_depth) {
|
||||||
#ifdef __powerpc__
|
#ifdef __powerpc__
|
||||||
// PowerPC ABIs specify that the return address is saved on the
|
// PowerPC ABIs specify that the return address is saved at offset
|
||||||
// *caller's* stack frame. Thus we must dereference the back chain
|
// 16 of the *caller's* stack frame. Thus we must dereference the
|
||||||
// to find the caller frame before extracting it.
|
// back chain to find the caller frame before extracting it.
|
||||||
uhwptr *caller_frame = (uhwptr*)frame[0];
|
uhwptr *caller_frame = (uhwptr*)frame[0];
|
||||||
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
|
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
|
||||||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
|
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
|
||||||
break;
|
break;
|
||||||
// For most ABIs the offset where the return address is saved is two
|
|
||||||
// register sizes. The exception is the SVR4 ABI, which uses an
|
|
||||||
// offset of only one register size.
|
|
||||||
#ifdef _CALL_SYSV
|
|
||||||
uhwptr pc1 = caller_frame[1];
|
|
||||||
#else
|
|
||||||
uhwptr pc1 = caller_frame[2];
|
uhwptr pc1 = caller_frame[2];
|
||||||
#endif
|
|
||||||
#elif defined(__s390__)
|
#elif defined(__s390__)
|
||||||
uhwptr pc1 = frame[14];
|
uhwptr pc1 = frame[14];
|
||||||
#elif defined(__riscv)
|
#elif defined(__riscv)
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#ifndef SANITIZER_STACKTRACE_H
|
#ifndef SANITIZER_STACKTRACE_H
|
||||||
#define SANITIZER_STACKTRACE_H
|
#define SANITIZER_STACKTRACE_H
|
||||||
|
|
||||||
|
#include "sanitizer_common.h"
|
||||||
#include "sanitizer_internal_defs.h"
|
#include "sanitizer_internal_defs.h"
|
||||||
#include "sanitizer_platform.h"
|
#include "sanitizer_platform.h"
|
||||||
|
|
||||||
|
@ -32,7 +33,7 @@ static const u32 kStackTraceMax = 256;
|
||||||
// Fast unwind is the only option on Mac for now; we will need to
|
// Fast unwind is the only option on Mac for now; we will need to
|
||||||
// revisit this macro when slow unwind works on Mac, see
|
// revisit this macro when slow unwind works on Mac, see
|
||||||
// https://github.com/google/sanitizers/issues/137
|
// https://github.com/google/sanitizers/issues/137
|
||||||
#if SANITIZER_MAC || SANITIZER_RTEMS
|
#if SANITIZER_MAC
|
||||||
# define SANITIZER_CAN_SLOW_UNWIND 0
|
# define SANITIZER_CAN_SLOW_UNWIND 0
|
||||||
#else
|
#else
|
||||||
# define SANITIZER_CAN_SLOW_UNWIND 1
|
# define SANITIZER_CAN_SLOW_UNWIND 1
|
||||||
|
@ -56,6 +57,16 @@ struct StackTrace {
|
||||||
// Prints a symbolized stacktrace, followed by an empty line.
|
// Prints a symbolized stacktrace, followed by an empty line.
|
||||||
void Print() const;
|
void Print() const;
|
||||||
|
|
||||||
|
// Prints a symbolized stacktrace to the output string, followed by an empty
|
||||||
|
// line.
|
||||||
|
void PrintTo(InternalScopedString *output) const;
|
||||||
|
|
||||||
|
// Prints a symbolized stacktrace to the output buffer, followed by an empty
|
||||||
|
// line. Returns the number of symbols that should have been written to buffer
|
||||||
|
// (not including trailing '\0'). Thus, the string is truncated iff return
|
||||||
|
// value is not less than "out_buf_size".
|
||||||
|
uptr PrintTo(char *out_buf, uptr out_buf_size) const;
|
||||||
|
|
||||||
static bool WillUseFastUnwind(bool request_fast_unwind) {
|
static bool WillUseFastUnwind(bool request_fast_unwind) {
|
||||||
if (!SANITIZER_CAN_FAST_UNWIND)
|
if (!SANITIZER_CAN_FAST_UNWIND)
|
||||||
return false;
|
return false;
|
||||||
|
@ -185,5 +196,26 @@ static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
|
||||||
uptr local_stack; \
|
uptr local_stack; \
|
||||||
uptr sp = (uptr)&local_stack
|
uptr sp = (uptr)&local_stack
|
||||||
|
|
||||||
|
// GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
|
||||||
|
// Optimized x86 version is faster than GetCurrentPc because
|
||||||
|
// it does not involve a function call, instead it reads RIP register.
|
||||||
|
// Reads of RIP by an instruction return RIP pointing to the next
|
||||||
|
// instruction, which is exactly what we want here, thus 0 offset.
|
||||||
|
// It needs to be a macro because otherwise we will get the name
|
||||||
|
// of this function on the top of most stacks. Attribute artificial
|
||||||
|
// does not do what it claims to do, unfortunatley. And attribute
|
||||||
|
// __nodebug__ is clang-only. If we would have an attribute that
|
||||||
|
// would remove this function from debug info, we could simply make
|
||||||
|
// StackTrace::GetCurrentPc() faster.
|
||||||
|
#if defined(__x86_64__)
|
||||||
|
# define GET_CURRENT_PC() \
|
||||||
|
({ \
|
||||||
|
uptr pc; \
|
||||||
|
asm("lea 0(%%rip), %0" : "=r"(pc)); \
|
||||||
|
pc; \
|
||||||
|
})
|
||||||
|
#else
|
||||||
|
# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif // SANITIZER_STACKTRACE_H
|
#endif // SANITIZER_STACKTRACE_H
|
||||||
|
|
|
@ -18,46 +18,119 @@
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
void StackTrace::Print() const {
|
namespace {
|
||||||
|
|
||||||
|
class StackTraceTextPrinter {
|
||||||
|
public:
|
||||||
|
StackTraceTextPrinter(const char *stack_trace_fmt, char frame_delimiter,
|
||||||
|
InternalScopedString *output,
|
||||||
|
InternalScopedString *dedup_token)
|
||||||
|
: stack_trace_fmt_(stack_trace_fmt),
|
||||||
|
frame_delimiter_(frame_delimiter),
|
||||||
|
output_(output),
|
||||||
|
dedup_token_(dedup_token),
|
||||||
|
symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
|
||||||
|
|
||||||
|
bool ProcessAddressFrames(uptr pc) {
|
||||||
|
SymbolizedStack *frames = symbolize_
|
||||||
|
? Symbolizer::GetOrInit()->SymbolizePC(pc)
|
||||||
|
: SymbolizedStack::New(pc);
|
||||||
|
if (!frames)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||||
|
uptr prev_len = output_->length();
|
||||||
|
RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
|
||||||
|
symbolize_ ? &cur->info : nullptr,
|
||||||
|
common_flags()->symbolize_vs_style,
|
||||||
|
common_flags()->strip_path_prefix);
|
||||||
|
|
||||||
|
if (prev_len != output_->length())
|
||||||
|
output_->append("%c", frame_delimiter_);
|
||||||
|
|
||||||
|
ExtendDedupToken(cur);
|
||||||
|
}
|
||||||
|
frames->ClearAll();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Extend the dedup token by appending a new frame.
|
||||||
|
void ExtendDedupToken(SymbolizedStack *stack) {
|
||||||
|
if (!dedup_token_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (dedup_frames_-- > 0) {
|
||||||
|
if (dedup_token_->length())
|
||||||
|
dedup_token_->append("--");
|
||||||
|
if (stack->info.function != nullptr)
|
||||||
|
dedup_token_->append(stack->info.function);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *stack_trace_fmt_;
|
||||||
|
const char frame_delimiter_;
|
||||||
|
int dedup_frames_ = common_flags()->dedup_token_length;
|
||||||
|
uptr frame_num_ = 0;
|
||||||
|
InternalScopedString *output_;
|
||||||
|
InternalScopedString *dedup_token_;
|
||||||
|
const bool symbolize_ = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void CopyStringToBuffer(const InternalScopedString &str, char *out_buf,
|
||||||
|
uptr out_buf_size) {
|
||||||
|
if (!out_buf_size)
|
||||||
|
return;
|
||||||
|
|
||||||
|
CHECK_GT(out_buf_size, 0);
|
||||||
|
uptr copy_size = Min(str.length(), out_buf_size - 1);
|
||||||
|
internal_memcpy(out_buf, str.data(), copy_size);
|
||||||
|
out_buf[copy_size] = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void StackTrace::PrintTo(InternalScopedString *output) const {
|
||||||
|
CHECK(output);
|
||||||
|
|
||||||
|
InternalScopedString dedup_token;
|
||||||
|
StackTraceTextPrinter printer(common_flags()->stack_trace_format, '\n',
|
||||||
|
output, &dedup_token);
|
||||||
|
|
||||||
if (trace == nullptr || size == 0) {
|
if (trace == nullptr || size == 0) {
|
||||||
Printf(" <empty stack>\n\n");
|
output->append(" <empty stack>\n\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
InternalScopedString frame_desc;
|
|
||||||
InternalScopedString dedup_token;
|
|
||||||
int dedup_frames = common_flags()->dedup_token_length;
|
|
||||||
bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
|
|
||||||
uptr frame_num = 0;
|
|
||||||
for (uptr i = 0; i < size && trace[i]; i++) {
|
for (uptr i = 0; i < size && trace[i]; i++) {
|
||||||
// PCs in stack traces are actually the return addresses, that is,
|
// PCs in stack traces are actually the return addresses, that is,
|
||||||
// addresses of the next instructions after the call.
|
// addresses of the next instructions after the call.
|
||||||
uptr pc = GetPreviousInstructionPc(trace[i]);
|
uptr pc = GetPreviousInstructionPc(trace[i]);
|
||||||
SymbolizedStack *frames;
|
CHECK(printer.ProcessAddressFrames(pc));
|
||||||
if (symbolize)
|
}
|
||||||
frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
|
|
||||||
else
|
// Always add a trailing empty line after stack trace.
|
||||||
frames = SymbolizedStack::New(pc);
|
output->append("\n");
|
||||||
CHECK(frames);
|
|
||||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
// Append deduplication token, if non-empty.
|
||||||
frame_desc.clear();
|
|
||||||
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
|
|
||||||
cur->info.address, symbolize ? &cur->info : nullptr,
|
|
||||||
common_flags()->symbolize_vs_style,
|
|
||||||
common_flags()->strip_path_prefix);
|
|
||||||
Printf("%s\n", frame_desc.data());
|
|
||||||
if (dedup_frames-- > 0) {
|
|
||||||
if (dedup_token.length())
|
if (dedup_token.length())
|
||||||
dedup_token.append("--");
|
output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
|
||||||
if (cur->info.function != nullptr)
|
|
||||||
dedup_token.append(cur->info.function);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
|
||||||
|
CHECK(out_buf);
|
||||||
|
|
||||||
|
InternalScopedString output;
|
||||||
|
PrintTo(&output);
|
||||||
|
CopyStringToBuffer(output, out_buf, out_buf_size);
|
||||||
|
|
||||||
|
return output.length();
|
||||||
}
|
}
|
||||||
frames->ClearAll();
|
|
||||||
}
|
void StackTrace::Print() const {
|
||||||
// Always print a trailing empty line after stack trace.
|
InternalScopedString output;
|
||||||
Printf("\n");
|
PrintTo(&output);
|
||||||
if (dedup_token.length())
|
Printf("%s", output.data());
|
||||||
Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
|
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
|
||||||
|
@ -82,12 +155,15 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
|
||||||
UnwindSlow(pc, context, max_depth);
|
UnwindSlow(pc, context, max_depth);
|
||||||
else
|
else
|
||||||
UnwindSlow(pc, max_depth);
|
UnwindSlow(pc, max_depth);
|
||||||
|
// If there are too few frames, the program may be built with
|
||||||
|
// -fno-asynchronous-unwind-tables. Fall back to fast unwinder below.
|
||||||
|
if (size > 2 || size >= max_depth)
|
||||||
|
return;
|
||||||
#else
|
#else
|
||||||
UNREACHABLE("slow unwind requested but not available");
|
UNREACHABLE("slow unwind requested but not available");
|
||||||
#endif
|
#endif
|
||||||
} else {
|
|
||||||
UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
|
|
||||||
}
|
}
|
||||||
|
UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
|
static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
|
||||||
|
@ -112,41 +188,18 @@ extern "C" {
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
|
void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
|
||||||
uptr out_buf_size) {
|
uptr out_buf_size) {
|
||||||
if (!out_buf_size) return;
|
if (!out_buf_size)
|
||||||
pc = StackTrace::GetPreviousInstructionPc(pc);
|
|
||||||
SymbolizedStack *frame;
|
|
||||||
bool symbolize = RenderNeedsSymbolization(fmt);
|
|
||||||
if (symbolize)
|
|
||||||
frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
|
|
||||||
else
|
|
||||||
frame = SymbolizedStack::New(pc);
|
|
||||||
if (!frame) {
|
|
||||||
internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
|
|
||||||
out_buf[out_buf_size - 1] = 0;
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
pc = StackTrace::GetPreviousInstructionPc(pc);
|
||||||
|
|
||||||
|
InternalScopedString output;
|
||||||
|
StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
|
||||||
|
if (!printer.ProcessAddressFrames(pc)) {
|
||||||
|
output.clear();
|
||||||
|
output.append("<can't symbolize>");
|
||||||
}
|
}
|
||||||
InternalScopedString frame_desc;
|
CopyStringToBuffer(output, out_buf, out_buf_size);
|
||||||
uptr frame_num = 0;
|
|
||||||
// Reserve one byte for the final 0.
|
|
||||||
char *out_end = out_buf + out_buf_size - 1;
|
|
||||||
for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
|
|
||||||
cur = cur->next) {
|
|
||||||
frame_desc.clear();
|
|
||||||
RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
|
|
||||||
symbolize ? &cur->info : nullptr,
|
|
||||||
common_flags()->symbolize_vs_style,
|
|
||||||
common_flags()->strip_path_prefix);
|
|
||||||
if (!frame_desc.length())
|
|
||||||
continue;
|
|
||||||
// Reserve one byte for the terminating 0.
|
|
||||||
uptr n = out_end - out_buf - 1;
|
|
||||||
internal_strncpy(out_buf, frame_desc.data(), n);
|
|
||||||
out_buf += __sanitizer::Min<uptr>(n, frame_desc.length());
|
|
||||||
*out_buf++ = 0;
|
|
||||||
}
|
|
||||||
CHECK(out_buf <= out_end);
|
|
||||||
*out_buf = 0;
|
|
||||||
frame->ClearAll();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
|
|
@ -16,15 +16,14 @@
|
||||||
|
|
||||||
#if SANITIZER_FUCHSIA
|
#if SANITIZER_FUCHSIA
|
||||||
#include "sanitizer_symbolizer_fuchsia.h"
|
#include "sanitizer_symbolizer_fuchsia.h"
|
||||||
#elif SANITIZER_RTEMS
|
|
||||||
#include "sanitizer_symbolizer_rtems.h"
|
|
||||||
# endif
|
# endif
|
||||||
#include "sanitizer_stacktrace.h"
|
|
||||||
#include "sanitizer_symbolizer.h"
|
|
||||||
|
|
||||||
# include <limits.h>
|
# include <limits.h>
|
||||||
# include <unwind.h>
|
# include <unwind.h>
|
||||||
|
|
||||||
|
# include "sanitizer_stacktrace.h"
|
||||||
|
# include "sanitizer_symbolizer.h"
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
// This generic support for offline symbolizing is based on the
|
// This generic support for offline symbolizing is based on the
|
||||||
|
|
|
@ -120,7 +120,7 @@ void ReportMmapWriteExec(int prot) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO
|
#if !SANITIZER_FUCHSIA && !SANITIZER_GO
|
||||||
void StartReportDeadlySignal() {
|
void StartReportDeadlySignal() {
|
||||||
// Write the first message using fd=2, just in case.
|
// Write the first message using fd=2, just in case.
|
||||||
// It may actually fail to write in case stderr is closed.
|
// It may actually fail to write in case stderr is closed.
|
||||||
|
@ -250,17 +250,17 @@ void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
|
||||||
|
|
||||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
|
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
|
||||||
|
|
||||||
static atomic_uintptr_t reporting_thread = {0};
|
atomic_uintptr_t ScopedErrorReportLock::reporting_thread_ = {0};
|
||||||
static StaticSpinMutex CommonSanitizerReportMutex;
|
StaticSpinMutex ScopedErrorReportLock::mutex_;
|
||||||
|
|
||||||
ScopedErrorReportLock::ScopedErrorReportLock() {
|
void ScopedErrorReportLock::Lock() {
|
||||||
uptr current = GetThreadSelf();
|
uptr current = GetThreadSelf();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
uptr expected = 0;
|
uptr expected = 0;
|
||||||
if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
|
if (atomic_compare_exchange_strong(&reporting_thread_, &expected, current,
|
||||||
memory_order_relaxed)) {
|
memory_order_relaxed)) {
|
||||||
// We've claimed reporting_thread so proceed.
|
// We've claimed reporting_thread so proceed.
|
||||||
CommonSanitizerReportMutex.Lock();
|
mutex_.Lock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,13 +282,11 @@ ScopedErrorReportLock::ScopedErrorReportLock() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedErrorReportLock::~ScopedErrorReportLock() {
|
void ScopedErrorReportLock::Unlock() {
|
||||||
CommonSanitizerReportMutex.Unlock();
|
mutex_.Unlock();
|
||||||
atomic_store_relaxed(&reporting_thread, 0);
|
atomic_store_relaxed(&reporting_thread_, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScopedErrorReportLock::CheckLocked() {
|
void ScopedErrorReportLock::CheckLocked() { mutex_.CheckLocked(); }
|
||||||
CommonSanitizerReportMutex.CheckLocked();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// This file is shared between various sanitizers' runtime libraries.
|
|
||||||
//
|
|
||||||
// Define RTEMS's string formats and limits for the markup symbolizer.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
#ifndef SANITIZER_SYMBOLIZER_RTEMS_H
|
|
||||||
#define SANITIZER_SYMBOLIZER_RTEMS_H
|
|
||||||
|
|
||||||
#include "sanitizer_internal_defs.h"
|
|
||||||
|
|
||||||
namespace __sanitizer {
|
|
||||||
|
|
||||||
// The Myriad RTEMS symbolizer currently only parses backtrace lines,
|
|
||||||
// so use a format that the symbolizer understands. For other
|
|
||||||
// markups, keep them the same as the Fuchsia's.
|
|
||||||
|
|
||||||
// This is used by UBSan for type names, and by ASan for global variable names.
|
|
||||||
constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
|
|
||||||
constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
|
|
||||||
|
|
||||||
// Function name or equivalent from PC location.
|
|
||||||
constexpr const char *kFormatFunction = "{{{pc:%p}}}";
|
|
||||||
constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
|
|
||||||
|
|
||||||
// Global variable name or equivalent from data memory address.
|
|
||||||
constexpr const char *kFormatData = "{{{data:%p}}}";
|
|
||||||
|
|
||||||
// One frame in a backtrace (printed on a line by itself).
|
|
||||||
constexpr const char *kFormatFrame = " [%u] IP: %p";
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
|
||||||
|
|
||||||
#endif // SANITIZER_SYMBOLIZER_RTEMS_H
|
|
|
@ -99,6 +99,9 @@ void ThreadContextBase::Reset() {
|
||||||
|
|
||||||
// ThreadRegistry implementation.
|
// ThreadRegistry implementation.
|
||||||
|
|
||||||
|
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory)
|
||||||
|
: ThreadRegistry(factory, UINT32_MAX, UINT32_MAX, 0) {}
|
||||||
|
|
||||||
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
||||||
u32 thread_quarantine_size, u32 max_reuse)
|
u32 thread_quarantine_size, u32 max_reuse)
|
||||||
: context_factory_(factory),
|
: context_factory_(factory),
|
||||||
|
@ -106,13 +109,10 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
||||||
thread_quarantine_size_(thread_quarantine_size),
|
thread_quarantine_size_(thread_quarantine_size),
|
||||||
max_reuse_(max_reuse),
|
max_reuse_(max_reuse),
|
||||||
mtx_(),
|
mtx_(),
|
||||||
n_contexts_(0),
|
|
||||||
total_threads_(0),
|
total_threads_(0),
|
||||||
alive_threads_(0),
|
alive_threads_(0),
|
||||||
max_alive_threads_(0),
|
max_alive_threads_(0),
|
||||||
running_threads_(0) {
|
running_threads_(0) {
|
||||||
threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
|
|
||||||
"ThreadRegistry");
|
|
||||||
dead_threads_.clear();
|
dead_threads_.clear();
|
||||||
invalid_threads_.clear();
|
invalid_threads_.clear();
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,8 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
||||||
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
|
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
|
||||||
uptr *alive) {
|
uptr *alive) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
if (total) *total = n_contexts_;
|
if (total)
|
||||||
|
*total = threads_.size();
|
||||||
if (running) *running = running_threads_;
|
if (running) *running = running_threads_;
|
||||||
if (alive) *alive = alive_threads_;
|
if (alive) *alive = alive_threads_;
|
||||||
}
|
}
|
||||||
|
@ -137,11 +138,11 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
|
||||||
ThreadContextBase *tctx = QuarantinePop();
|
ThreadContextBase *tctx = QuarantinePop();
|
||||||
if (tctx) {
|
if (tctx) {
|
||||||
tid = tctx->tid;
|
tid = tctx->tid;
|
||||||
} else if (n_contexts_ < max_threads_) {
|
} else if (threads_.size() < max_threads_) {
|
||||||
// Allocate new thread context and tid.
|
// Allocate new thread context and tid.
|
||||||
tid = n_contexts_++;
|
tid = threads_.size();
|
||||||
tctx = context_factory_(tid);
|
tctx = context_factory_(tid);
|
||||||
threads_[tid] = tctx;
|
threads_.push_back(tctx);
|
||||||
} else {
|
} else {
|
||||||
#if !SANITIZER_GO
|
#if !SANITIZER_GO
|
||||||
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
|
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
|
||||||
|
@ -169,7 +170,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
|
||||||
void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
|
void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
|
||||||
void *arg) {
|
void *arg) {
|
||||||
CheckLocked();
|
CheckLocked();
|
||||||
for (u32 tid = 0; tid < n_contexts_; tid++) {
|
for (u32 tid = 0; tid < threads_.size(); tid++) {
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
if (tctx == 0)
|
if (tctx == 0)
|
||||||
continue;
|
continue;
|
||||||
|
@ -179,7 +180,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
|
||||||
|
|
||||||
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
|
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
for (u32 tid = 0; tid < n_contexts_; tid++) {
|
for (u32 tid = 0; tid < threads_.size(); tid++) {
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
if (tctx != 0 && cb(tctx, arg))
|
if (tctx != 0 && cb(tctx, arg))
|
||||||
return tctx->tid;
|
return tctx->tid;
|
||||||
|
@ -190,7 +191,7 @@ u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
|
||||||
ThreadContextBase *
|
ThreadContextBase *
|
||||||
ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
|
ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
|
||||||
CheckLocked();
|
CheckLocked();
|
||||||
for (u32 tid = 0; tid < n_contexts_; tid++) {
|
for (u32 tid = 0; tid < threads_.size(); tid++) {
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
if (tctx != 0 && cb(tctx, arg))
|
if (tctx != 0 && cb(tctx, arg))
|
||||||
return tctx;
|
return tctx;
|
||||||
|
@ -211,7 +212,6 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
|
||||||
|
|
||||||
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
|
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
|
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
|
||||||
|
@ -221,7 +221,7 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
|
||||||
|
|
||||||
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
|
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
for (u32 tid = 0; tid < n_contexts_; tid++) {
|
for (u32 tid = 0; tid < threads_.size(); tid++) {
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
if (tctx != 0 && tctx->user_id == user_id &&
|
if (tctx != 0 && tctx->user_id == user_id &&
|
||||||
tctx->status != ThreadStatusInvalid) {
|
tctx->status != ThreadStatusInvalid) {
|
||||||
|
@ -233,7 +233,6 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
|
||||||
|
|
||||||
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
|
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
if (tctx->status == ThreadStatusInvalid) {
|
if (tctx->status == ThreadStatusInvalid) {
|
||||||
|
@ -254,7 +253,6 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
|
||||||
do {
|
do {
|
||||||
{
|
{
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
if (tctx->status == ThreadStatusInvalid) {
|
if (tctx->status == ThreadStatusInvalid) {
|
||||||
|
@ -280,7 +278,6 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
CHECK_GT(alive_threads_, 0);
|
CHECK_GT(alive_threads_, 0);
|
||||||
alive_threads_--;
|
alive_threads_--;
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
bool dead = tctx->detached;
|
bool dead = tctx->detached;
|
||||||
|
@ -306,7 +303,6 @@ void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
|
||||||
void *arg) {
|
void *arg) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
running_threads_++;
|
running_threads_++;
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
CHECK_EQ(ThreadStatusCreated, tctx->status);
|
CHECK_EQ(ThreadStatusCreated, tctx->status);
|
||||||
|
@ -339,7 +335,6 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() {
|
||||||
|
|
||||||
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
|
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
|
||||||
BlockingMutexLock l(&mtx_);
|
BlockingMutexLock l(&mtx_);
|
||||||
CHECK_LT(tid, n_contexts_);
|
|
||||||
ThreadContextBase *tctx = threads_[tid];
|
ThreadContextBase *tctx = threads_[tid];
|
||||||
CHECK_NE(tctx, 0);
|
CHECK_NE(tctx, 0);
|
||||||
CHECK_NE(tctx->status, ThreadStatusInvalid);
|
CHECK_NE(tctx->status, ThreadStatusInvalid);
|
||||||
|
|
|
@ -85,22 +85,22 @@ class ThreadContextBase {
|
||||||
|
|
||||||
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
|
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
|
||||||
|
|
||||||
class ThreadRegistry {
|
class MUTEX ThreadRegistry {
|
||||||
public:
|
public:
|
||||||
|
ThreadRegistry(ThreadContextFactory factory);
|
||||||
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
|
||||||
u32 thread_quarantine_size, u32 max_reuse = 0);
|
u32 thread_quarantine_size, u32 max_reuse);
|
||||||
void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
|
void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
|
||||||
uptr *alive = nullptr);
|
uptr *alive = nullptr);
|
||||||
uptr GetMaxAliveThreads();
|
uptr GetMaxAliveThreads();
|
||||||
|
|
||||||
void Lock() { mtx_.Lock(); }
|
void Lock() ACQUIRE() { mtx_.Lock(); }
|
||||||
void CheckLocked() { mtx_.CheckLocked(); }
|
void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
|
||||||
void Unlock() { mtx_.Unlock(); }
|
void Unlock() RELEASE() { mtx_.Unlock(); }
|
||||||
|
|
||||||
// Should be guarded by ThreadRegistryLock.
|
// Should be guarded by ThreadRegistryLock.
|
||||||
ThreadContextBase *GetThreadLocked(u32 tid) {
|
ThreadContextBase *GetThreadLocked(u32 tid) {
|
||||||
DCHECK_LT(tid, n_contexts_);
|
return threads_.empty() ? nullptr : threads_[tid];
|
||||||
return threads_[tid];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
|
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
|
||||||
|
@ -137,15 +137,13 @@ class ThreadRegistry {
|
||||||
|
|
||||||
BlockingMutex mtx_;
|
BlockingMutex mtx_;
|
||||||
|
|
||||||
u32 n_contexts_; // Number of created thread contexts,
|
|
||||||
// at most max_threads_.
|
|
||||||
u64 total_threads_; // Total number of created threads. May be greater than
|
u64 total_threads_; // Total number of created threads. May be greater than
|
||||||
// max_threads_ if contexts were reused.
|
// max_threads_ if contexts were reused.
|
||||||
uptr alive_threads_; // Created or running.
|
uptr alive_threads_; // Created or running.
|
||||||
uptr max_alive_threads_;
|
uptr max_alive_threads_;
|
||||||
uptr running_threads_;
|
uptr running_threads_;
|
||||||
|
|
||||||
ThreadContextBase **threads_; // Array of thread contexts is leaked.
|
InternalMmapVector<ThreadContextBase *> threads_;
|
||||||
IntrusiveList<ThreadContextBase> dead_threads_;
|
IntrusiveList<ThreadContextBase> dead_threads_;
|
||||||
IntrusiveList<ThreadContextBase> invalid_threads_;
|
IntrusiveList<ThreadContextBase> invalid_threads_;
|
||||||
|
|
||||||
|
|
42
libsanitizer/sanitizer_common/sanitizer_thread_safety.h
Normal file
42
libsanitizer/sanitizer_common/sanitizer_thread_safety.h
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
|
// See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is shared between sanitizer tools.
|
||||||
|
//
|
||||||
|
// Wrappers around thread safety annotations.
|
||||||
|
// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#ifndef SANITIZER_THREAD_SAFETY_H
|
||||||
|
#define SANITIZER_THREAD_SAFETY_H
|
||||||
|
|
||||||
|
#if defined(__clang__)
|
||||||
|
# define THREAD_ANNOTATION(x) __attribute__((x))
|
||||||
|
#else
|
||||||
|
# define THREAD_ANNOTATION(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define MUTEX THREAD_ANNOTATION(capability("mutex"))
|
||||||
|
#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
|
||||||
|
#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
|
||||||
|
#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
|
||||||
|
#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
|
||||||
|
#define REQUIRES_SHARED(...) \
|
||||||
|
THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
|
||||||
|
#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
|
||||||
|
#define ACQUIRE_SHARED(...) \
|
||||||
|
THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
|
||||||
|
#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
|
||||||
|
#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
|
||||||
|
#define RELEASE_SHARED(...) \
|
||||||
|
THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
|
||||||
|
#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
|
||||||
|
#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
|
||||||
|
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
|
||||||
|
|
||||||
|
#endif
|
|
@ -44,6 +44,9 @@ TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
|
||||||
#define TraceLoggingUnregister(x)
|
#define TraceLoggingUnregister(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// For WaitOnAddress
|
||||||
|
# pragma comment(lib, "synchronization.lib")
|
||||||
|
|
||||||
// A macro to tell the compiler that this part of the code cannot be reached,
|
// A macro to tell the compiler that this part of the code cannot be reached,
|
||||||
// if the compiler supports this feature. Since we're using this in
|
// if the compiler supports this feature. Since we're using this in
|
||||||
// code that is called when terminating the process, the expansion of the
|
// code that is called when terminating the process, the expansion of the
|
||||||
|
@ -541,13 +544,7 @@ bool IsAbsolutePath(const char *path) {
|
||||||
IsPathSeparator(path[2]);
|
IsPathSeparator(path[2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SleepForSeconds(int seconds) {
|
void internal_usleep(u64 useconds) { Sleep(useconds / 1000); }
|
||||||
Sleep(seconds * 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SleepForMillis(int millis) {
|
|
||||||
Sleep(millis);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 NanoTime() {
|
u64 NanoTime() {
|
||||||
static LARGE_INTEGER frequency = {};
|
static LARGE_INTEGER frequency = {};
|
||||||
|
@ -819,6 +816,17 @@ uptr GetRSS() {
|
||||||
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
|
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
|
||||||
void internal_join_thread(void *th) { }
|
void internal_join_thread(void *th) { }
|
||||||
|
|
||||||
|
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||||
|
WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FutexWake(atomic_uint32_t *p, u32 count) {
|
||||||
|
if (count == 1)
|
||||||
|
WakeByAddressSingle(p);
|
||||||
|
else
|
||||||
|
WakeByAddressAll(p);
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------- BlockingMutex ---------------- {{{1
|
// ---------------------- BlockingMutex ---------------- {{{1
|
||||||
|
|
||||||
BlockingMutex::BlockingMutex() {
|
BlockingMutex::BlockingMutex() {
|
||||||
|
@ -838,9 +846,7 @@ void BlockingMutex::Unlock() {
|
||||||
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
|
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockingMutex::CheckLocked() {
|
void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
|
||||||
CHECK_EQ(owner_, GetThreadSelf());
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr GetTlsSize() {
|
uptr GetTlsSize() {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue