libsanitizer: merge from upstream (61a6439f35b6de28)

This commit is contained in:
Kito Cheng 2024-11-01 15:33:03 +08:00
parent b8ecd96aea
commit fa321004f3
238 changed files with 7250 additions and 4738 deletions

View File

@ -1,4 +1,4 @@
c425db2eb558c26377edc04e062c0c1f999b2770
61a6439f35b6de28ff4aff4450d6fca970292fd5
The first line of this file holds the git revision number of the
last merge done from the master library sources.

View File

@ -46,7 +46,6 @@ asan_files = \
asan_suppressions.cpp \
asan_thread.cpp \
asan_win.cpp \
asan_win_dll_thunk.cpp \
asan_win_dynamic_runtime_thunk.cpp \
asan_interceptors_vfork.S

View File

@ -160,8 +160,7 @@ am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
asan_posix.lo asan_premap_shadow.lo asan_report.lo asan_rtl.lo \
asan_shadow_setup.lo asan_stack.lo asan_stats.lo \
asan_suppressions.lo asan_thread.lo asan_win.lo \
asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo \
asan_interceptors_vfork.lo
asan_win_dynamic_runtime_thunk.lo asan_interceptors_vfork.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
@ -457,7 +456,6 @@ asan_files = \
asan_suppressions.cpp \
asan_thread.cpp \
asan_win.cpp \
asan_win_dll_thunk.cpp \
asan_win_dynamic_runtime_thunk.cpp \
asan_interceptors_vfork.S
@ -619,7 +617,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dynamic_runtime_thunk.Plo@am__quote@
.S.o:

View File

@ -717,7 +717,15 @@ struct Allocator {
return;
}
RunFreeHooks(ptr);
if (RunFreeHooks(ptr)) {
// Someone used __sanitizer_ignore_free_hook() and decided that they
// didn't want the memory to __sanitizer_ignore_free_hook freed right now.
// When they call free() on this pointer again at a later time, we should
// ignore the alloc-type mismatch and allow them to deallocate the pointer
// through free(), rather than the initial alloc type.
m->alloc_type = FROM_MALLOC;
return;
}
// Must mark the chunk as quarantined before any changes to its metadata.
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.

View File

@ -182,42 +182,44 @@ static_assert(SizeClassMap::kNumClassesRounded <= 32,
"allocator size and SizeClassMap tunings that allows us to "
"reliably run all bringup tests in a sanitized environment.");
# else
# else // SANITIZER_RISCV64
// These are the default allocator tunings for non-RISCV environments where the
// VMA is usually 48 bits and we have lots of space.
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# endif
# elif defined(__powerpc64__)
# endif // SANITIZER_RISCV64
# else // SANITIZER_FUCHSIA
# if SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
# else // SANITIZER_APPLE
const uptr kAllocatorSpace = ~(uptr)0;
# endif // SANITIZER_APPLE
# if defined(__powerpc64__)
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
# elif defined(__aarch64__) && SANITIZER_ANDROID
// Android needs to support 39, 42 and 48 bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif SANITIZER_RISCV64
const uptr kAllocatorSpace = ~(uptr)0;
# elif SANITIZER_RISCV64
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryDenseSizeClassMap SizeClassMap;
# elif defined(__sparc__)
const uptr kAllocatorSpace = ~(uptr)0;
# elif defined(__sparc__)
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
# elif SANITIZER_APPLE
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x500000000000ULL;
# else
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# endif
# endif // defined(__powerpc64__) etc.
# endif // SANITIZER_FUCHSIA
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
@ -232,7 +234,7 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#else // Fallback to SizeClassAllocator32.
#else // SANITIZER_CAN_USE_ALLOCATOR64. Fallback to SizeClassAllocator32.
typedef CompactSizeClassMap SizeClassMap;
template <typename AddressSpaceViewTy>
struct AP32 {

View File

@ -20,24 +20,20 @@
namespace __asan {
AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) {
Init(t->tid, t->name);
}
AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) {
if (tid == kInvalidTid) {
Init(tid, "");
} else {
asanThreadRegistry().CheckLocked();
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
Init(tid, t->name);
if (!t) {
internal_snprintf(name, sizeof(name), "T-1");
return;
}
int len = internal_snprintf(name, sizeof(name), "T%llu", t->unique_id);
CHECK(((unsigned int)len) < sizeof(name));
if (internal_strlen(t->name))
internal_snprintf(&name[len], sizeof(name) - len, " (%s)", t->name);
}
void AsanThreadIdAndName::Init(u32 tid, const char *tname) {
int len = internal_snprintf(name, sizeof(name), "T%d", tid);
CHECK(((unsigned int)len) < sizeof(name));
if (tname[0] != '\0')
internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname);
AsanThreadIdAndName::AsanThreadIdAndName(u32 tid)
: AsanThreadIdAndName(
tid == kInvalidTid ? nullptr : GetThreadContextByTidLocked(tid)) {
asanThreadRegistry().CheckLocked();
}
void DescribeThread(AsanThreadContext *context) {
@ -48,9 +44,20 @@ void DescribeThread(AsanThreadContext *context) {
return;
}
context->announced = true;
AsanThreadContext *parent_context =
context->parent_tid == kInvalidTid
? nullptr
: GetThreadContextByTidLocked(context->parent_tid);
// `context->parent_tid` may point to reused slot. Check `unique_id` which
// is always smaller for the parent, always greater for a new user.
if (context->unique_id <= parent_context->unique_id)
parent_context = nullptr;
InternalScopedString str;
str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
if (!parent_context) {
str.Append(" created by unknown thread\n");
Printf("%s", str.data());
return;
@ -60,11 +67,8 @@ void DescribeThread(AsanThreadContext *context) {
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
GetThreadContextByTidLocked(context->parent_tid);
if (flags()->print_full_thread_history)
DescribeThread(parent_context);
}
}
// Shadow descriptions
@ -245,11 +249,11 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
InternalScopedString str;
str.AppendF(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
str.AppendF(" '");
str.Append(" '");
for (uptr i = 0; i < var.name_len; ++i) {
str.AppendF("%c", var.name_pos[i]);
}
str.AppendF("'");
str.Append("'");
if (var.line > 0) {
str.AppendF(" (line %zd)", var.line);
}
@ -260,7 +264,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
str.AppendF("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.Default());
} else {
str.AppendF("\n");
str.Append("\n");
}
Printf("%s", str.data());
}
@ -292,7 +296,7 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
str.AppendF(" global variable '%s' defined in '",
MaybeDemangleGlobalName(g.name));
PrintGlobalLocation(&str, g, /*print_module_name=*/false);
str.AppendF("' (0x%zx) of size %zu\n", g.beg, g.size);
str.AppendF("' (%p) of size %zu\n", (void *)g.beg, g.size);
str.Append(d.Default());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());

View File

@ -35,8 +35,6 @@ class AsanThreadIdAndName {
const char *c_str() const { return &name[0]; }
private:
void Init(u32 tid, const char *tname);
char name[128];
};

View File

@ -327,9 +327,6 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
" old_mid : %p\n"
" new_mid : %p\n",
(void *)beg, (void *)end, (void *)old_mid, (void *)new_mid);
uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %zu\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
@ -347,9 +344,20 @@ void ErrorBadParamsToAnnotateDoubleEndedContiguousContainer::Print() {
(void *)storage_beg, (void *)storage_end, (void *)old_container_beg,
(void *)old_container_end, (void *)new_container_beg,
(void *)new_container_end);
uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!IsAligned(storage_beg, granularity))
Report("ERROR: storage_beg is not aligned by %zu\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorBadParamsToCopyContiguousContainerAnnotations::Print() {
Report(
"ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_copy_contiguous_container_annotations:\n"
" src_storage_beg : %p\n"
" src_storage_end : %p\n"
" dst_storage_beg : %p\n"
" new_storage_end : %p\n",
(void *)old_storage_beg, (void *)old_storage_end, (void *)new_storage_beg,
(void *)new_storage_end);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}

View File

@ -353,6 +353,24 @@ struct ErrorBadParamsToAnnotateDoubleEndedContiguousContainer : ErrorBase {
void Print();
};
struct ErrorBadParamsToCopyContiguousContainerAnnotations : ErrorBase {
const BufferedStackTrace *stack;
uptr old_storage_beg, old_storage_end, new_storage_beg, new_storage_end;
ErrorBadParamsToCopyContiguousContainerAnnotations() = default; // (*)
ErrorBadParamsToCopyContiguousContainerAnnotations(
u32 tid, BufferedStackTrace *stack_, uptr old_storage_beg_,
uptr old_storage_end_, uptr new_storage_beg_, uptr new_storage_end_)
: ErrorBase(tid, 10,
"bad-__sanitizer_annotate_double_ended_contiguous_container"),
stack(stack_),
old_storage_beg(old_storage_beg_),
old_storage_end(old_storage_end_),
new_storage_beg(new_storage_beg_),
new_storage_end(new_storage_end_) {}
void Print();
};
struct ErrorODRViolation : ErrorBase {
__asan_global global1, global2;
u32 stack_id1, stack_id2;
@ -421,6 +439,7 @@ struct ErrorGeneric : ErrorBase {
macro(StringFunctionSizeOverflow) \
macro(BadParamsToAnnotateContiguousContainer) \
macro(BadParamsToAnnotateDoubleEndedContiguousContainer) \
macro(BadParamsToCopyContiguousContainerAnnotations) \
macro(ODRViolation) \
macro(InvalidPointerPair) \
macro(Generic)

View File

@ -11,14 +11,16 @@
// ASan flag parsing logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_flags.h"
#include "asan_activation.h"
#include "asan_interface_internal.h"
#include "asan_stack.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_win_interception.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_platform.h"
@ -47,7 +49,21 @@ static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
#undef ASAN_FLAG
}
void InitializeFlags() {
static void DisplayHelpMessages(FlagParser *parser) {
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) {
ReportUnrecognizedFlags();
}
if (common_flags()->help) {
parser->PrintFlagDescriptions();
}
}
static void InitializeDefaultFlags() {
Flags *f = flags();
FlagParser asan_parser;
// Set the default values and prepare for parsing ASan and common flags.
SetCommonFlagsDefaults();
{
@ -60,10 +76,8 @@ void InitializeFlags() {
cf.exitcode = 1;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser asan_parser;
RegisterAsanFlags(&asan_parser, f);
RegisterCommonFlags(&asan_parser);
@ -126,13 +140,12 @@ void InitializeFlags() {
InitializeCommonFlags();
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
// TODO(samsonov): print all of the flags (ASan, LSan, common).
DisplayHelpMessages(&asan_parser);
}
if (common_flags()->help) {
// TODO(samsonov): print all of the flags (ASan, LSan, common).
asan_parser.PrintFlagDescriptions();
}
static void ProcessFlags() {
Flags *f = flags();
// Flag validation:
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
@ -199,6 +212,67 @@ void InitializeFlags() {
}
}
void InitializeFlags() {
InitializeDefaultFlags();
ProcessFlags();
#if SANITIZER_WINDOWS
// On Windows, weak symbols are emulated by having the user program
// register which weak functions are defined.
// The ASAN DLL will initialize flags prior to user module initialization,
// so __asan_default_options will not point to the user definition yet.
// We still want to ensure we capture when options are passed via
// __asan_default_options, so we add a callback to be run
// when it is registered with the runtime.
// There is theoretically time between the initial ProcessFlags and
// registering the weak callback where a weak function could be added and we
// would miss it, but in practice, InitializeFlags will always happen under
// the loader lock (if built as a DLL) and so will any calls to
// __sanitizer_register_weak_function.
AddRegisterWeakFunctionCallback(
reinterpret_cast<uptr>(__asan_default_options), []() {
FlagParser asan_parser;
RegisterAsanFlags(&asan_parser, flags());
RegisterCommonFlags(&asan_parser);
asan_parser.ParseString(__asan_default_options());
DisplayHelpMessages(&asan_parser);
ProcessFlags();
});
# if CAN_SANITIZE_UB
AddRegisterWeakFunctionCallback(
reinterpret_cast<uptr>(__ubsan_default_options), []() {
FlagParser ubsan_parser;
__ubsan::RegisterUbsanFlags(&ubsan_parser, __ubsan::flags());
RegisterCommonFlags(&ubsan_parser);
ubsan_parser.ParseString(__ubsan_default_options());
// To match normal behavior, do not print UBSan help.
ProcessFlags();
});
# endif
# if CAN_SANITIZE_LEAKS
AddRegisterWeakFunctionCallback(
reinterpret_cast<uptr>(__lsan_default_options), []() {
FlagParser lsan_parser;
__lsan::RegisterLsanFlags(&lsan_parser, __lsan::flags());
RegisterCommonFlags(&lsan_parser);
lsan_parser.ParseString(__lsan_default_options());
// To match normal behavior, do not print LSan help.
ProcessFlags();
});
# endif
#endif
}
} // namespace __asan
SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {

View File

@ -57,8 +57,6 @@ void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
void InitializePlatformExceptionHandlers() {}
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
@ -123,8 +121,7 @@ static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
// In lieu of AsanThread::Create.
AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
u32 tid = asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
u32 tid = asanThreadRegistry().CreateThread(0, detached, parent_tid, thread);
asanThreadRegistry().SetThreadName(tid, name);
return thread;
@ -240,6 +237,8 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// So this doesn't install any atexit hook like on other platforms.
void InstallAtExitCheckLeaks() {}
void InstallAtForkHandler() {}
} // namespace __asan
namespace __lsan {

View File

@ -21,31 +21,32 @@
#include "asan_suppressions.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_dense_map.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "sanitizer_common/sanitizer_thread_safety.h"
namespace __asan {
typedef __asan_global Global;
struct ListOfGlobals {
const Global *g;
ListOfGlobals *next;
struct GlobalListNode {
const Global *g = nullptr;
GlobalListNode *next = nullptr;
};
typedef IntrusiveList<GlobalListNode> ListOfGlobals;
static Mutex mu_for_globals;
static ListOfGlobals *list_of_all_globals;
static ListOfGlobals list_of_all_globals SANITIZER_GUARDED_BY(mu_for_globals);
static const int kDynamicInitGlobalsInitialCapacity = 512;
struct DynInitGlobal {
Global g;
bool initialized;
Global g = {};
bool initialized = false;
DynInitGlobal *next = nullptr;
};
typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
// We want to remember where a certain range of globals was registered.
struct GlobalRegistrationSite {
@ -55,6 +56,39 @@ struct GlobalRegistrationSite {
typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector;
static GlobalRegistrationSiteVector *global_registration_site_vector;
static ListOfGlobals &GlobalsByIndicator(uptr odr_indicator)
SANITIZER_REQUIRES(mu_for_globals) {
using MapOfGlobals = DenseMap<uptr, ListOfGlobals>;
static MapOfGlobals *globals_by_indicator = nullptr;
if (!globals_by_indicator) {
alignas(
alignof(MapOfGlobals)) static char placeholder[sizeof(MapOfGlobals)];
globals_by_indicator = new (placeholder) MapOfGlobals();
}
return (*globals_by_indicator)[odr_indicator];
}
static const char *current_dynamic_init_module_name
SANITIZER_GUARDED_BY(mu_for_globals) = nullptr;
using DynInitGlobalsByModule =
DenseMap<const char *, IntrusiveList<DynInitGlobal>>;
// TODO: Add a NoDestroy helper, this patter is very common in sanitizers.
static DynInitGlobalsByModule &DynInitGlobals()
SANITIZER_REQUIRES(mu_for_globals) {
static DynInitGlobalsByModule *globals_by_module = nullptr;
if (!globals_by_module) {
alignas(alignof(DynInitGlobalsByModule)) static char
placeholder[sizeof(DynInitGlobalsByModule)];
globals_by_module = new (placeholder) DynInitGlobalsByModule();
}
return *globals_by_module;
}
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
@ -73,6 +107,35 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
const uptr kMinimalDistanceFromAnotherGlobal = 64;
static void AddGlobalToList(ListOfGlobals &list, const Global *g) {
list.push_front(new (GetGlobalLowLevelAllocator()) GlobalListNode{g});
}
static void UnpoisonDynamicGlobals(IntrusiveList<DynInitGlobal> &dyn_globals,
bool mark_initialized) {
for (auto &dyn_g : dyn_globals) {
const Global *g = &dyn_g.g;
if (dyn_g.initialized)
continue;
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
if (mark_initialized)
dyn_g.initialized = true;
}
}
static void PoisonDynamicGlobals(
const IntrusiveList<DynInitGlobal> &dyn_globals) {
for (auto &dyn_g : dyn_globals) {
const Global *g = &dyn_g.g;
if (dyn_g.initialized)
continue;
PoisonShadowForGlobal(g, kAsanInitializationOrderMagic);
}
}
static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
@ -114,8 +177,8 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
if (!flags()->report_globals) return 0;
Lock lock(&mu_for_globals);
int res = 0;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
for (const auto &l : list_of_all_globals) {
const Global &g = *l.g;
if (flags()->report_globals >= 2)
ReportGlobal(g, "Search");
if (IsAddressNearGlobal(addr, g)) {
@ -138,23 +201,47 @@ enum GlobalSymbolState {
// Check ODR violation for given global G via special ODR indicator. We use
// this method in case compiler instruments global variables through their
// local aliases.
static void CheckODRViolationViaIndicator(const Global *g) {
static void CheckODRViolationViaIndicator(const Global *g)
SANITIZER_REQUIRES(mu_for_globals) {
// Instrumentation requests to skip ODR check.
if (g->odr_indicator == UINTPTR_MAX)
return;
ListOfGlobals &relevant_globals = GlobalsByIndicator(g->odr_indicator);
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
if (*odr_indicator == UNREGISTERED) {
if (*odr_indicator == REGISTERED) {
// If *odr_indicator is REGISTERED, some module have already registered
// externally visible symbol with the same name. This is an ODR violation.
for (const auto &l : relevant_globals) {
if ((flags()->detect_odr_violation >= 2 || g->size != l.g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g), l.g,
FindRegistrationSite(l.g));
}
} else { // UNREGISTERED
*odr_indicator = REGISTERED;
return;
}
// If *odr_indicator is DEFINED, some module have already registered
// externally visible symbol with the same name. This is an ODR violation.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->odr_indicator == l->g->odr_indicator &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
AddGlobalToList(relevant_globals, g);
}
// Check ODR violation for given global G by checking if it's already poisoned.
// We use this method in case compiler doesn't use private aliases for global
// variables.
static void CheckODRViolationViaPoisoning(const Global *g)
SANITIZER_REQUIRES(mu_for_globals) {
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (const auto &l : list_of_all_globals) {
if (g->beg == l.g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l.g->size) &&
!IsODRViolationSuppressed(g->name)) {
ReportODRViolation(g, FindRegistrationSite(g), l.g,
FindRegistrationSite(l.g));
}
}
}
}
@ -181,7 +268,7 @@ static inline bool UseODRIndicator(const Global *g) {
// Register a global variable.
// This function may be called more than once for every global
// so we store the globals in a map.
static void RegisterGlobal(const Global *g) {
static void RegisterGlobal(const Global *g) SANITIZER_REQUIRES(mu_for_globals) {
CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Added");
@ -203,24 +290,22 @@ static void RegisterGlobal(const Global *g) {
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
else
CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
ListOfGlobals *l = new (GetGlobalLowLevelAllocator()) ListOfGlobals;
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
AddGlobalToList(list_of_all_globals, g);
if (g->has_dynamic_init) {
if (!dynamic_init_globals) {
dynamic_init_globals = new (GetGlobalLowLevelAllocator()) VectorOfGlobals;
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };
dynamic_init_globals->push_back(dyn_global);
DynInitGlobals()[g->module_name].push_back(
new (GetGlobalLowLevelAllocator()) DynInitGlobal{*g, false});
}
}
static void UnregisterGlobal(const Global *g) {
static void UnregisterGlobal(const Global *g)
SANITIZER_REQUIRES(mu_for_globals) {
CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Removed");
@ -242,18 +327,14 @@ static void UnregisterGlobal(const Global *g) {
}
void StopInitOrderChecking() {
Lock lock(&mu_for_globals);
if (!flags()->check_initialization_order || !dynamic_init_globals)
if (!flags()->check_initialization_order)
return;
Lock lock(&mu_for_globals);
flags()->check_initialization_order = false;
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
DynInitGlobals().forEach([&](auto &kv) {
UnpoisonDynamicGlobals(kv.second, /*mark_initialized=*/false);
return true;
});
}
static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
@ -325,8 +406,8 @@ void __asan_unregister_image_globals(uptr *flag) {
}
void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
if (*flag) return;
if (!start) return;
if (*flag || start == stop)
return;
CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
__asan_global *globals_start = (__asan_global*)start;
__asan_global *globals_stop = (__asan_global*)stop;
@ -335,8 +416,8 @@ void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
}
void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
if (!*flag) return;
if (!start) return;
if (!*flag || start == stop)
return;
CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
__asan_global *globals_start = (__asan_global*)start;
__asan_global *globals_stop = (__asan_global*)stop;
@ -408,47 +489,94 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// poisons all global variables not defined in this TU, so that a dynamic
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!CanPoisonMemory() ||
!dynamic_init_globals)
if (!flags()->check_initialization_order || !CanPoisonMemory())
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(module_name);
CHECK(AsanInited());
Lock lock(&mu_for_globals);
if (current_dynamic_init_module_name == module_name)
return;
if (flags()->report_globals >= 3)
Printf("DynInitPoison module: %s\n", module_name);
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (dyn_g.initialized)
continue;
if (g->module_name != module_name)
PoisonShadowForGlobal(g, kAsanInitializationOrderMagic);
else if (!strict_init_order)
dyn_g.initialized = true;
if (current_dynamic_init_module_name == nullptr) {
// First call, poison all globals from other modules.
DynInitGlobals().forEach([&](auto &kv) {
if (kv.first != module_name) {
PoisonDynamicGlobals(kv.second);
} else {
UnpoisonDynamicGlobals(kv.second,
/*mark_initialized=*/!strict_init_order);
}
return true;
});
} else {
// Module changed.
PoisonDynamicGlobals(DynInitGlobals()[current_dynamic_init_module_name]);
UnpoisonDynamicGlobals(DynInitGlobals()[module_name],
/*mark_initialized=*/!strict_init_order);
}
current_dynamic_init_module_name = module_name;
}
// Maybe SANITIZER_CAN_USE_PREINIT_ARRAY is to conservative for `.init_array`,
// however we should not make mistake here. If `UnpoisonBeforeMain` was not
// executed at all we will have false reports on globals.
#if SANITIZER_CAN_USE_PREINIT_ARRAY
// This optimization aims to reduce the overhead of `__asan_after_dynamic_init`
// calls by leveraging incremental unpoisoning/poisoning in
// `__asan_before_dynamic_init`. We expect most `__asan_after_dynamic_init
// calls` to be no-ops. However, to ensure all globals are unpoisoned before the
// `main`, we force `UnpoisonBeforeMain` to fully execute
// `__asan_after_dynamic_init`.
// With lld, `UnpoisonBeforeMain` runs after standard `.init_array`, making it
// the final `__asan_after_dynamic_init` call for the static runtime. In
// contrast, GNU ld executes it earlier, causing subsequent
// `__asan_after_dynamic_init` calls to perform full unpoisoning, losing the
// optimization.
bool allow_after_dynamic_init SANITIZER_GUARDED_BY(mu_for_globals) = false;
static void UnpoisonBeforeMain(void) {
{
Lock lock(&mu_for_globals);
if (allow_after_dynamic_init)
return;
allow_after_dynamic_init = true;
}
if (flags()->report_globals >= 3)
Printf("UnpoisonBeforeMain\n");
__asan_after_dynamic_init();
}
__attribute__((section(".init_array.65537"), used)) static void (
*asan_after_init_array)(void) = UnpoisonBeforeMain;
#else
// Incremental poisoning is disabled, unpoison globals immediately.
static constexpr bool allow_after_dynamic_init = true;
#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
// This method runs immediately after dynamic initialization in each TU, when
// all dynamically initialized globals except for those defined in the current
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
!CanPoisonMemory() ||
!dynamic_init_globals)
if (!flags()->check_initialization_order || !CanPoisonMemory())
return;
CHECK(AsanInited());
Lock lock(&mu_for_globals);
// FIXME: Optionally report that we're unpoisoning globals from a module.
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (!dyn_g.initialized) {
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
}
if (!allow_after_dynamic_init)
return;
if (!current_dynamic_init_module_name)
return;
if (flags()->report_globals >= 3)
Printf("DynInitUnpoison\n");
DynInitGlobals().forEach([&](auto &kv) {
UnpoisonDynamicGlobals(kv.second, /*mark_initialized=*/false);
return true;
});
current_dynamic_init_module_name = nullptr;
}

View File

@ -17,10 +17,10 @@ namespace __asan {
#pragma section(".ASAN$GA", read, write)
#pragma section(".ASAN$GZ", read, write)
extern "C" __declspec(allocate(".ASAN$GA"))
ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {};
extern "C" __declspec(allocate(".ASAN$GZ"))
ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {};
extern "C" alignas(sizeof(__asan_global))
__declspec(allocate(".ASAN$GA")) __asan_global __asan_globals_start = {};
extern "C" alignas(sizeof(__asan_global))
__declspec(allocate(".ASAN$GZ")) __asan_global __asan_globals_end = {};
#pragma comment(linker, "/merge:.ASAN=.data")
static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
@ -28,7 +28,9 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
__asan_global *end = &__asan_globals_end;
uptr bytediff = (uptr)end - (uptr)start;
if (bytediff % sizeof(__asan_global) != 0) {
#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)
# if defined(SANITIZER_DLL_THUNK) || \
defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) || \
defined(SANITIZER_STATIC_RUNTIME_THUNK)
__debugbreak();
#else
CHECK("corrupt asan global array");

View File

@ -96,14 +96,16 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \
if (AsanInitIsRunning()) \
return REAL(func)(__VA_ARGS__); \
if (SANITIZER_APPLE && UNLIKELY(!AsanInited())) \
return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \
if constexpr (SANITIZER_APPLE) { \
if (UNLIKELY(!AsanInited())) \
return REAL(func)(__VA_ARGS__); \
} else { \
if (!TryAsanInitFromRtl()) \
return REAL(func)(__VA_ARGS__); \
} \
} while (false)
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
@ -194,10 +196,13 @@ static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
__lsan::ScopedInterceptorDisabler disabler
#endif
#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_ASAN_INITED()
# define SIGNAL_INTERCEPTOR_ENTER() \
do { \
AsanInitFromRtl(); \
} while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
# include "sanitizer_common/sanitizer_common_interceptors.inc"
# include "sanitizer_common/sanitizer_signal_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
// for them.
@ -328,7 +333,7 @@ INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
}
# endif
DEFINE_REAL_PTHREAD_FUNCTIONS
DEFINE_INTERNAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SWAPCONTEXT
@ -506,7 +511,7 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char *, strcat, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcat);
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_length = internal_strlen(from);
ASAN_READ_RANGE(ctx, from, from_length + 1);
@ -527,7 +532,7 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncat);
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
@ -546,16 +551,16 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char *, strcpy, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcpy);
#if SANITIZER_APPLE
if (UNLIKELY(!AsanInited()))
return REAL(strcpy)(to, from);
#endif
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (AsanInitIsRunning()) {
return REAL(strcpy)(to, from);
if constexpr (SANITIZER_APPLE) {
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (UNLIKELY(!AsanInited()))
return REAL(strcpy)(to, from);
} else {
if (!TryAsanInitFromRtl())
return REAL(strcpy)(to, from);
}
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_size = internal_strlen(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
@ -565,12 +570,22 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
return REAL(strcpy)(to, from);
}
// Windows doesn't always define the strdup identifier,
// and when it does it's a macro defined to either _strdup
// or _strdup_dbg, _strdup_dbg ends up calling _strdup, so
// we want to intercept that. push/pop_macro are used to avoid problems
// if this file ends up including <string.h> in the future.
# if SANITIZER_WINDOWS
# pragma push_macro("strdup")
# undef strdup
# define strdup _strdup
# endif
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
if (UNLIKELY(!AsanInited()))
if (UNLIKELY(!TryAsanInitFromRtl()))
return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
@ -583,13 +598,12 @@ INTERCEPTOR(char*, strdup, const char *s) {
return reinterpret_cast<char*>(new_mem);
}
#if ASAN_INTERCEPT___STRDUP
# if ASAN_INTERCEPT___STRDUP
INTERCEPTOR(char*, __strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
if (UNLIKELY(!AsanInited()))
if (UNLIKELY(!TryAsanInitFromRtl()))
return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
@ -606,7 +620,7 @@ INTERCEPTOR(char*, __strdup, const char *s) {
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
@ -632,13 +646,38 @@ static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
void *ctx; \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
ENSURE_ASAN_INITED(); \
AsanInitFromRtl(); \
return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \
}
INTERCEPTOR_STRTO_BASE(long, strtol)
INTERCEPTOR_STRTO_BASE(long long, strtoll)
# if SANITIZER_WINDOWS
INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
// REAL(strtol) may be ntdll!strtol, which doesn't set errno. Instead,
// call REAL(strtoll) and do the range check ourselves.
COMPILER_CHECK(sizeof(long) == sizeof(u32));
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strtol);
AsanInitFromRtl();
long long result = StrtolImpl(ctx, REAL(strtoll), nptr, endptr, base);
if (result > INT32_MAX) {
errno = errno_ERANGE;
return INT32_MAX;
}
if (result < INT32_MIN) {
errno = errno_ERANGE;
return INT32_MIN;
}
return (long)result;
}
# else
INTERCEPTOR_STRTO_BASE(long, strtol)
# endif
# if SANITIZER_GLIBC
INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol)
INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
@ -647,11 +686,9 @@ INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
INTERCEPTOR(int, atoi, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
#if SANITIZER_APPLE
if (UNLIKELY(!AsanInited()))
if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
return REAL(atoi)(nptr);
# endif
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoi)(nptr);
}
@ -669,11 +706,9 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atol);
#if SANITIZER_APPLE
if (UNLIKELY(!AsanInited()))
if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
return REAL(atol)(nptr);
# endif
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atol)(nptr);
}
@ -687,7 +722,7 @@ INTERCEPTOR(long, atol, const char *nptr) {
INTERCEPTOR(long long, atoll, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
ENSURE_ASAN_INITED();
AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoll)(nptr);
}
@ -708,12 +743,10 @@ static void AtCxaAtexit(void *unused) {
#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_APPLE
if (UNLIKELY(!AsanInited()))
if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
return REAL(__cxa_atexit)(func, arg, dso_handle);
# endif
ENSURE_ASAN_INITED();
#if CAN_SANITIZE_LEAKS
AsanInitFromRtl();
# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@ -724,8 +757,8 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#if ASAN_INTERCEPT_ATEXIT
INTERCEPTOR(int, atexit, void (*func)()) {
ENSURE_ASAN_INITED();
#if CAN_SANITIZE_LEAKS
AsanInitFromRtl();
# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
// Avoid calling real atexit as it is unreachable on at least on Linux.
@ -739,7 +772,7 @@ INTERCEPTOR(int, atexit, void (*func)()) {
extern "C" {
extern int _pthread_atfork(void (*prepare)(), void (*parent)(),
void (*child)());
};
}
INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
void (*child)()) {
@ -753,8 +786,8 @@ INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
#endif
#if ASAN_INTERCEPT_VFORK
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
DEFINE_REAL(int, vfork,)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
#endif
// ---------------------- InitializeAsanInterceptors ---------------- {{{1
@ -773,7 +806,7 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncpy);
ASAN_INTERCEPT_FUNC(strdup);
#if ASAN_INTERCEPT___STRDUP
# if ASAN_INTERCEPT___STRDUP
ASAN_INTERCEPT_FUNC(__strdup);
#endif
#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
@ -869,6 +902,10 @@ void InitializeAsanInterceptors() {
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
# if SANITIZER_WINDOWS
# pragma pop_macro("strdup")
# endif
} // namespace __asan
#endif // !SANITIZER_FUCHSIA

View File

@ -24,14 +24,6 @@ namespace __asan {
void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
#define ENSURE_ASAN_INITED() \
do { \
CHECK(!AsanInitIsRunning()); \
if (UNLIKELY(!AsanInited())) { \
AsanInitFromRtl(); \
} \
} while (0)
} // namespace __asan
// There is no general interception at all on Fuchsia.
@ -79,12 +71,7 @@ void InitializePlatformInterceptors();
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \
(!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__)))
# define ASAN_INTERCEPT___CXA_THROW 1
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# else
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
# endif
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else

View File

@ -60,6 +60,7 @@ class AsanThread;
using __sanitizer::StackTrace;
void AsanInitFromRtl();
bool TryAsanInitFromRtl();
// asan_win.cpp
void InitializePlatformExceptionHandlers();
@ -79,7 +80,6 @@ void ReplaceSystemMalloc();
// asan_linux.cpp / asan_mac.cpp / asan_win.cpp
uptr FindDynamicShadowStart();
void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
@ -125,13 +125,13 @@ void *AsanDlSymNext(const char *sym);
bool HandleDlopenInit();
void InstallAtExitCheckLeaks();
void InstallAtForkHandler();
#define ASAN_ON_ERROR() \
if (&__asan_on_error) \
__asan_on_error()
bool AsanInited();
bool AsanInitIsRunning(); // Used to avoid infinite recursion in __asan_init().
extern bool replace_intrin_cached;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error

View File

@ -33,7 +33,6 @@
# include "asan_premap_shadow.h"
# include "asan_thread.h"
# include "sanitizer_common/sanitizer_flags.h"
# include "sanitizer_common/sanitizer_freebsd.h"
# include "sanitizer_common/sanitizer_hash.h"
# include "sanitizer_common/sanitizer_libc.h"
# include "sanitizer_common/sanitizer_procmaps.h"
@ -48,22 +47,12 @@
# if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
# include <ucontext.h>
extern "C" void *_DYNAMIC;
# elif SANITIZER_NETBSD
# include <link_elf.h>
# include <ucontext.h>
extern Elf_Dyn _DYNAMIC;
# else
# include <link.h>
# include <sys/ucontext.h>
extern ElfW(Dyn) _DYNAMIC[];
# endif
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
// 32-bit mode.
# if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
__FreeBSD_version <= 902001 // v9.2
# define ucontext_t xucontext_t
# endif
typedef enum {
@ -84,11 +73,6 @@ void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
bool IsSystemHeapAddress(uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
return &_DYNAMIC;
}
# if ASAN_PREMAP_SHADOW
uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
@ -109,7 +93,8 @@ uptr FindDynamicShadowStart() {
# endif
return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
/*min_shadow_base_alignment*/ 0, kHighMemEnd,
GetMmapGranularity());
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@ -148,6 +133,11 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0;
# endif
# if SANITIZER_FREEBSD
// Ignore vDSO.
if (internal_strcmp(info->dlpi_name, "[vdso]") == 0)
return 0;
# endif
*name = info->dlpi_name;
return 1;
@ -197,10 +187,7 @@ void AsanCheckIncompatibleRT() {
MemoryMappedSegment segment(filename, sizeof(filename));
while (proc_maps.Next(&segment)) {
if (IsDynamicRTName(segment.filename)) {
Report(
"Your application is linked against "
"incompatible ASan runtimes.\n");
Die();
ReportIncompatibleRT();
}
}
__asan_rt_version = ASAN_RT_VERSION_STATIC;

View File

@ -49,14 +49,10 @@ void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
bool IsSystemHeapAddress (uptr addr) { return false; }
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
uptr FindDynamicShadowStart() {
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
/*min_shadow_base_alignment*/ 0, kHighMemEnd,
GetMmapGranularity());
}
// No-op. Mac does not support static linkage anyway.
@ -139,11 +135,11 @@ typedef void (*dispatch_mach_handler_function_t)(void *context,
dispatch_mach_reason reason,
dispatch_mach_msg_t message,
mach_error_t error);
#if !defined(MISSING_BLOCKS_SUPPORT)
# if !defined(MISSING_BLOCKS_SUPPORT)
typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason,
dispatch_mach_msg_t message,
mach_error_t error);
#endif
# endif
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {

View File

@ -25,13 +25,12 @@
# include "sanitizer_common/sanitizer_allocator_checks.h"
# include "sanitizer_common/sanitizer_allocator_dlsym.h"
# include "sanitizer_common/sanitizer_errno.h"
# include "sanitizer_common/sanitizer_tls_get_addr.h"
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan;
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
static bool UseImpl() { return AsanInitIsRunning(); }
static bool UseImpl() { return !TryAsanInitFromRtl(); }
static void OnAllocate(const void *ptr, uptr size) {
# if CAN_SANITIZE_LEAKS
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
@ -65,7 +64,6 @@ INTERCEPTOR(void, cfree, void *ptr) {
INTERCEPTOR(void*, malloc, uptr size) {
if (DlsymAlloc::Use())
return DlsymAlloc::Allocate(size);
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@ -73,7 +71,6 @@ INTERCEPTOR(void*, malloc, uptr size) {
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (DlsymAlloc::Use())
return DlsymAlloc::Callocate(nmemb, size);
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
@ -81,14 +78,13 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
return DlsymAlloc::Realloc(ptr, size);
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
#if SANITIZER_INTERCEPT_REALLOCARRAY
INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) {
ENSURE_ASAN_INITED();
AsanInitFromRtl();
GET_STACK_TRACE_MALLOC;
return asan_reallocarray(ptr, nmemb, size, &stack);
}
@ -102,9 +98,7 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
DTLS_on_libc_memalign(res, size);
return res;
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_MEMALIGN
@ -188,11 +182,11 @@ struct MallocDebugL {
void* (*valloc)(uptr size);
};
ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = {
alignas(32) const MallocDebugK asan_malloc_dispatch_k = {
WRAP(malloc), WRAP(free), WRAP(calloc),
WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)};
ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = {
alignas(32) const MallocDebugL asan_malloc_dispatch_l = {
WRAP(calloc), WRAP(free), WRAP(mallinfo),
WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign),
WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc),

View File

@ -22,7 +22,10 @@
using namespace __asan;
#define COMMON_MALLOC_ZONE_NAME "asan"
#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
# define COMMON_MALLOC_ENTER() \
do { \
AsanInitFromRtl(); \
} while (false)
# define COMMON_MALLOC_SANITIZER_INITIALIZED AsanInited()
# define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
# define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()

View File

@ -58,97 +58,69 @@ using namespace __asan;
// MD: Memory allocation functions are defined in the CRT .dll,
// so we have to intercept them before they are called for the first time.
#if ASAN_DYNAMIC
# define ALLOCATION_FUNCTION_ATTRIBUTE
#else
# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#endif
extern "C" {
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(void *ptr) {
__declspec(noinline) size_t _msize(void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize_base(void *ptr) {
return _msize(ptr);
}
__declspec(noinline) size_t _msize_base(void *ptr) { return _msize(ptr); }
ALLOCATION_FUNCTION_ATTRIBUTE
void free(void *ptr) {
__declspec(noinline) void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void _free_dbg(void *ptr, int) {
free(ptr);
}
__declspec(noinline) void _free_dbg(void *ptr, int) { free(ptr); }
ALLOCATION_FUNCTION_ATTRIBUTE
void _free_base(void *ptr) {
free(ptr);
}
__declspec(noinline) void _free_base(void *ptr) { free(ptr); }
ALLOCATION_FUNCTION_ATTRIBUTE
void *malloc(size_t size) {
__declspec(noinline) void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_malloc_base(size_t size) {
__declspec(noinline) void *_malloc_base(size_t size) { return malloc(size); }
__declspec(noinline) void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
__declspec(noinline) void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_base(size_t nmemb, size_t size) {
__declspec(noinline) void *_calloc_base(size_t nmemb, size_t size) {
return calloc(nmemb, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
__declspec(noinline) void *_calloc_dbg(size_t nmemb, size_t size, int,
const char *, int) {
return calloc(nmemb, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
__declspec(noinline) void *_calloc_impl(size_t nmemb, size_t size,
int *errno_tmp) {
return calloc(nmemb, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
__declspec(noinline) void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
__declspec(noinline) void *_realloc_dbg(void *ptr, size_t size, int) {
UNREACHABLE("_realloc_dbg should not exist!");
return 0;
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_base(void *ptr, size_t size) {
__declspec(noinline) void *_realloc_base(void *ptr, size_t size) {
return realloc(ptr, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc(void *p, size_t n, size_t elem_size) {
__declspec(noinline) void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
const size_t size = n * elem_size;
@ -166,23 +138,41 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
return new_alloc;
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc_base(void *p, size_t n, size_t elem_size) {
__declspec(noinline) void *_recalloc_base(void *p, size_t n, size_t elem_size) {
return _recalloc(p, n, elem_size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_expand(void *memblock, size_t size) {
__declspec(noinline) void *_expand(void *memblock, size_t size) {
// _expand is used in realloc-like functions to resize the buffer if possible.
// We don't want memory to stand still while resizing buffers, so return 0.
return 0;
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_expand_dbg(void *memblock, size_t size) {
__declspec(noinline) void *_expand_dbg(void *memblock, size_t size) {
return _expand(memblock, size);
}
__declspec(dllexport) size_t __cdecl __asan_msize(void *ptr) {
return _msize(ptr);
}
__declspec(dllexport) void __cdecl __asan_free(void *const ptr) { free(ptr); }
__declspec(dllexport) void *__cdecl __asan_malloc(const size_t size) {
return malloc(size);
}
__declspec(dllexport) void *__cdecl __asan_calloc(const size_t nmemb,
const size_t size) {
return calloc(nmemb, size);
}
__declspec(dllexport) void *__cdecl __asan_realloc(void *const ptr,
const size_t size) {
return realloc(ptr, size);
}
__declspec(dllexport) void *__cdecl __asan_recalloc(void *const ptr,
const size_t nmemb,
const size_t size) {
return _recalloc(ptr, nmemb, size);
}
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
@ -487,7 +477,6 @@ static void TryToOverrideFunction(const char *fname, uptr new_func) {
}
void ReplaceSystemMalloc() {
#if defined(ASAN_DYNAMIC)
TryToOverrideFunction("free", (uptr)free);
TryToOverrideFunction("_free_base", (uptr)free);
TryToOverrideFunction("malloc", (uptr)malloc);
@ -543,8 +532,6 @@ void ReplaceSystemMalloc() {
// allocation API will be directed to ASan's heap. We don't currently
// intercept all calls to HeapAlloc. If we did, we would have to check on
// HeapFree whether the pointer came from ASan of from the system.
#endif // defined(ASAN_DYNAMIC)
}
} // namespace __asan

View File

@ -0,0 +1,229 @@
//===-- asan_malloc_win_thunk.cpp
//-----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Windows-specific malloc interception.
// This is included statically for projects statically linking
// with the C Runtime (/MT, /MTd) in order to provide ASAN-aware
// versions of the C allocation functions.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_STATIC_RUNTIME_THUNK
# include "..\sanitizer_common\sanitizer_allocator_interface.h"
// #include "asan_win_thunk_common.h"
// Preserve stack traces with noinline.
# define STATIC_MALLOC_INTERFACE __declspec(noinline)
extern "C" {
__declspec(dllimport) size_t __cdecl __asan_msize(void *ptr);
__declspec(dllimport) void __cdecl __asan_free(void *const ptr);
__declspec(dllimport) void *__cdecl __asan_malloc(const size_t size);
__declspec(dllimport) void *__cdecl __asan_calloc(const size_t nmemb,
const size_t size);
__declspec(dllimport) void *__cdecl __asan_realloc(void *const ptr,
const size_t size);
__declspec(dllimport) void *__cdecl __asan_recalloc(void *const ptr,
const size_t nmemb,
const size_t size);
// Avoid tailcall optimization to preserve stack frames.
# pragma optimize("", off)
// _msize
STATIC_MALLOC_INTERFACE size_t _msize(void *ptr) { return __asan_msize(ptr); }
STATIC_MALLOC_INTERFACE size_t _msize_base(void *ptr) {
return __asan_msize(ptr);
}
STATIC_MALLOC_INTERFACE size_t _msize_dbg(void *ptr) {
return __asan_msize(ptr);
}
// free
STATIC_MALLOC_INTERFACE void free(void *const ptr) { return __asan_free(ptr); }
STATIC_MALLOC_INTERFACE void _free_base(void *const ptr) {
return __asan_free(ptr);
}
STATIC_MALLOC_INTERFACE void _free_dbg(void *const ptr) {
return __asan_free(ptr);
}
// malloc
STATIC_MALLOC_INTERFACE void *malloc(const size_t size) {
return __asan_malloc(size);
}
STATIC_MALLOC_INTERFACE void *_malloc_base(const size_t size) {
return __asan_malloc(size);
}
STATIC_MALLOC_INTERFACE void *_malloc_dbg(const size_t size) {
return __asan_malloc(size);
}
// calloc
STATIC_MALLOC_INTERFACE void *calloc(const size_t nmemb, const size_t size) {
return __asan_calloc(nmemb, size);
}
STATIC_MALLOC_INTERFACE void *_calloc_base(const size_t nmemb,
const size_t size) {
return __asan_calloc(nmemb, size);
}
STATIC_MALLOC_INTERFACE void *_calloc_impl(const size_t nmemb,
const size_t size,
int *const errno_tmp) {
// Provided by legacy msvcrt.
(void)errno_tmp;
return __asan_calloc(nmemb, size);
}
STATIC_MALLOC_INTERFACE void *_calloc_dbg(const size_t nmemb, const size_t size,
int, const char *, int) {
return __asan_calloc(nmemb, size);
}
// realloc
STATIC_MALLOC_INTERFACE void *realloc(void *const ptr, const size_t size) {
return __asan_realloc(ptr, size);
}
STATIC_MALLOC_INTERFACE void *_realloc_base(void *const ptr,
const size_t size) {
return __asan_realloc(ptr, size);
}
STATIC_MALLOC_INTERFACE void *_realloc_dbg(void *const ptr, const size_t size,
int, const char *, int) {
return __asan_realloc(ptr, size);
}
// recalloc
STATIC_MALLOC_INTERFACE void *_recalloc(void *const ptr, const size_t nmemb,
const size_t size) {
return __asan_recalloc(ptr, nmemb, size);
}
STATIC_MALLOC_INTERFACE void *_recalloc_base(void *const ptr,
const size_t nmemb,
const size_t size) {
return __asan_recalloc(ptr, nmemb, size);
}
STATIC_MALLOC_INTERFACE void *_recalloc_dbg(void *const ptr, const size_t nmemb,
const size_t size, int,
const char *, int) {
return __asan_recalloc(ptr, nmemb, size);
}
// expand
STATIC_MALLOC_INTERFACE void *_expand(void *, size_t) {
// _expand is used in realloc-like functions to resize the buffer if possible.
// We don't want memory to stand still while resizing buffers, so return 0.
return nullptr;
}
STATIC_MALLOC_INTERFACE void *_expand_dbg(void *, size_t, int, const char *,
int) {
return nullptr;
}
// We need to provide symbols for all the debug CRT functions if we decide to
// provide any. Most of these functions make no sense under ASan and so we
// make them no-ops.
long _CrtSetBreakAlloc(long const) { return ~0; }
void _CrtSetDbgBlockType(void *const, int const) { return; }
typedef int(__cdecl *CRT_ALLOC_HOOK)(int, void *, size_t, int, long,
const unsigned char *, int);
CRT_ALLOC_HOOK _CrtGetAllocHook() { return nullptr; }
CRT_ALLOC_HOOK _CrtSetAllocHook(CRT_ALLOC_HOOK const hook) { return hook; }
int _CrtCheckMemory() { return 1; }
int _CrtSetDbgFlag(int const new_bits) { return new_bits; }
typedef void (*CrtDoForAllClientObjectsCallback)(void *, void *);
void _CrtDoForAllClientObjects(CrtDoForAllClientObjectsCallback const,
void *const) {
return;
}
int _CrtIsValidPointer(void const *const p, unsigned int const, int const) {
return p != nullptr;
}
int _CrtIsValidHeapPointer(void const *const block) {
if (!block) {
return 0;
}
return __sanitizer_get_ownership(block);
}
int _CrtIsMemoryBlock(void const *const, unsigned const, long *const,
char **const, int *const) {
return 0;
}
int _CrtReportBlockType(void const *const) { return -1; }
typedef void(__cdecl *CRT_DUMP_CLIENT)(void *, size_t);
CRT_DUMP_CLIENT _CrtGetDumpClient() { return nullptr; }
CRT_DUMP_CLIENT _CrtSetDumpClient(CRT_DUMP_CLIENT new_client) {
return new_client;
}
void _CrtMemCheckpoint(void *const) { return; }
int _CrtMemDifference(void *const, void const *const, void const *const) {
return 0;
}
void _CrtMemDumpAllObjectsSince(void const *const) { return; }
int _CrtDumpMemoryLeaks() { return 0; }
void _CrtMemDumpStatistics(void const *const) { return; }
int _crtDbgFlag{0};
long _crtBreakAlloc{-1};
CRT_DUMP_CLIENT _pfnDumpClient{nullptr};
int *__p__crtDbgFlag() { return &_crtDbgFlag; }
long *__p__crtBreakAlloc() { return &_crtBreakAlloc; }
// TODO: These were added upstream but conflict with definitions in ucrtbased.
// int _CrtDbgReport(int, const char *, int, const char *, const char *, ...) {
// ShowStatsAndAbort();
// }
//
// int _CrtDbgReportW(int reportType, const wchar_t *, int, const wchar_t *,
// const wchar_t *, ...) {
// ShowStatsAndAbort();
// }
//
// int _CrtSetReportMode(int, int) { return 0; }
} // extern "C"
#endif // SANITIZER_STATIC_RUNTIME_THUNK

View File

@ -72,7 +72,10 @@
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
//
// Default Linux/RISCV64 Sv39 mapping:
// Default Linux/RISCV64 Sv39 mapping with SHADOW_OFFSET == 0xd55550000;
// (the exact location of SHADOW_OFFSET may vary depending the dynamic probing
// by FindDynamicShadowStart).
//
// || `[0x1555550000, 0x3fffffffff]` || HighMem ||
// || `[0x0fffffa000, 0x1555555fff]` || HighShadow ||
// || `[0x0effffa000, 0x0fffff9fff]` || ShadowGap ||
@ -186,11 +189,11 @@
# elif SANITIZER_FREEBSD && defined(__aarch64__)
# define ASAN_SHADOW_OFFSET_CONST 0x0000800000000000
# elif SANITIZER_RISCV64
# define ASAN_SHADOW_OFFSET_CONST 0x0000000d55550000
# define ASAN_SHADOW_OFFSET_DYNAMIC
# elif defined(__aarch64__)
# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
# elif defined(__powerpc64__)
# define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__s390x__)
# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
# elif SANITIZER_FREEBSD

View File

@ -48,15 +48,6 @@ COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
using namespace __asan;
// FreeBSD prior v9.2 have wrong definition of 'size_t'.
// http://svnweb.freebsd.org/base?view=revision&revision=232261
#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
#include <sys/param.h>
#if __FreeBSD_version <= 902001 // v9.2
#define size_t unsigned
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.

View File

@ -16,6 +16,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
@ -410,7 +411,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *new_mid_p) {
if (!flags()->detect_container_overflow)
return;
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
VPrintf(3, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
uptr storage_beg = reinterpret_cast<uptr>(beg_p);
uptr storage_end = reinterpret_cast<uptr>(end_p);
@ -479,7 +480,7 @@ void __sanitizer_annotate_double_ended_contiguous_container(
if (!flags()->detect_container_overflow)
return;
VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
VPrintf(3, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
storage_end_p, old_container_beg_p, old_container_end_p,
new_container_beg_p, new_container_end_p);
@ -576,6 +577,185 @@ void __sanitizer_annotate_double_ended_contiguous_container(
}
}
// Marks the specified number of bytes in a granule as accessible or
// poisones the whole granule with kAsanContiguousContainerOOBMagic value.
static void SetContainerGranule(uptr ptr, u8 n) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
u8 s = (n == granularity) ? 0 : (n ? n : kAsanContiguousContainerOOBMagic);
*(u8 *)MemToShadow(ptr) = s;
}
// Performs a byte-by-byte copy of ASan annotations (shadow memory values).
// Result may be different due to ASan limitations, but result cannot lead
// to false positives (more memory than requested may get unpoisoned).
static void SlowCopyContainerAnnotations(uptr src_beg, uptr src_end,
uptr dst_beg, uptr dst_end) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
uptr dst_end_down = RoundDownTo(dst_end, granularity);
uptr src_ptr = src_beg;
uptr dst_ptr = dst_beg;
while (dst_ptr < dst_end) {
uptr granule_beg = RoundDownTo(dst_ptr, granularity);
uptr granule_end = granule_beg + granularity;
uptr unpoisoned_bytes = 0;
uptr end = Min(granule_end, dst_end);
for (; dst_ptr != end; ++dst_ptr, ++src_ptr)
if (!AddressIsPoisoned(src_ptr))
unpoisoned_bytes = dst_ptr - granule_beg + 1;
if (dst_ptr == dst_end && dst_end != dst_end_down &&
!AddressIsPoisoned(dst_end))
continue;
if (unpoisoned_bytes != 0 || granule_beg >= dst_beg)
SetContainerGranule(granule_beg, unpoisoned_bytes);
else if (!AddressIsPoisoned(dst_beg))
SetContainerGranule(granule_beg, dst_beg - granule_beg);
}
}
// Performs a byte-by-byte copy of ASan annotations (shadow memory values),
// going through bytes in reversed order, but not reversing annotations.
// Result may be different due to ASan limitations, but result cannot lead
// to false positives (more memory than requested may get unpoisoned).
static void SlowReversedCopyContainerAnnotations(uptr src_beg, uptr src_end,
uptr dst_beg, uptr dst_end) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
uptr dst_end_down = RoundDownTo(dst_end, granularity);
uptr src_ptr = src_end;
uptr dst_ptr = dst_end;
while (dst_ptr > dst_beg) {
uptr granule_beg = RoundDownTo(dst_ptr - 1, granularity);
uptr unpoisoned_bytes = 0;
uptr end = Max(granule_beg, dst_beg);
for (; dst_ptr != end; --dst_ptr, --src_ptr)
if (unpoisoned_bytes == 0 && !AddressIsPoisoned(src_ptr - 1))
unpoisoned_bytes = dst_ptr - granule_beg;
if (dst_ptr >= dst_end_down && !AddressIsPoisoned(dst_end))
continue;
if (granule_beg == dst_ptr || unpoisoned_bytes != 0)
SetContainerGranule(granule_beg, unpoisoned_bytes);
else if (!AddressIsPoisoned(dst_beg))
SetContainerGranule(granule_beg, dst_beg - granule_beg);
}
}
// A helper function for __sanitizer_copy_contiguous_container_annotations,
// has assumption about begin and end of the container.
// Should not be used stand alone.
static void CopyContainerFirstGranuleAnnotation(uptr src_beg, uptr dst_beg) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
// First granule
uptr src_beg_down = RoundDownTo(src_beg, granularity);
uptr dst_beg_down = RoundDownTo(dst_beg, granularity);
if (dst_beg_down == dst_beg)
return;
if (!AddressIsPoisoned(src_beg))
*(u8 *)MemToShadow(dst_beg_down) = *(u8 *)MemToShadow(src_beg_down);
else if (!AddressIsPoisoned(dst_beg))
SetContainerGranule(dst_beg_down, dst_beg - dst_beg_down);
}
// A helper function for __sanitizer_copy_contiguous_container_annotations,
// has assumption about begin and end of the container.
// Should not be used stand alone.
static void CopyContainerLastGranuleAnnotation(uptr src_end, uptr dst_end) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
// Last granule
uptr src_end_down = RoundDownTo(src_end, granularity);
uptr dst_end_down = RoundDownTo(dst_end, granularity);
if (dst_end_down == dst_end || !AddressIsPoisoned(dst_end))
return;
if (AddressIsPoisoned(src_end))
*(u8 *)MemToShadow(dst_end_down) = *(u8 *)MemToShadow(src_end_down);
else
SetContainerGranule(dst_end_down, src_end - src_end_down);
}
// This function copies ASan memory annotations (poisoned/unpoisoned states)
// from one buffer to another.
// It's main purpose is to help with relocating trivially relocatable objects,
// which memory may be poisoned, without calling copy constructor.
// However, it does not move memory content itself, only annotations.
// If the buffers aren't aligned (the distance between buffers isn't
// granule-aligned)
// // src_beg % granularity != dst_beg % granularity
// the function handles this by going byte by byte, slowing down performance.
// The old buffer annotations are not removed. If necessary,
// user can unpoison old buffer with __asan_unpoison_memory_region.
void __sanitizer_copy_contiguous_container_annotations(const void *src_beg_p,
const void *src_end_p,
const void *dst_beg_p,
const void *dst_end_p) {
if (!flags()->detect_container_overflow)
return;
VPrintf(3, "contiguous_container_src: %p %p\n", src_beg_p, src_end_p);
VPrintf(3, "contiguous_container_dst: %p %p\n", dst_beg_p, dst_end_p);
uptr src_beg = reinterpret_cast<uptr>(src_beg_p);
uptr src_end = reinterpret_cast<uptr>(src_end_p);
uptr dst_beg = reinterpret_cast<uptr>(dst_beg_p);
uptr dst_end = reinterpret_cast<uptr>(dst_end_p);
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
if (src_beg > src_end || (dst_end - dst_beg) != (src_end - src_beg)) {
GET_STACK_TRACE_FATAL_HERE;
ReportBadParamsToCopyContiguousContainerAnnotations(
src_beg, src_end, dst_beg, dst_end, &stack);
}
if (src_beg == src_end || src_beg == dst_beg)
return;
// Due to support for overlapping buffers, we may have to copy elements
// in reversed order, when destination buffer starts in the middle of
// the source buffer (or shares first granule with it).
//
// When buffers are not granule-aligned (or distance between them,
// to be specific), annotatios have to be copied byte by byte.
//
// The only remaining edge cases involve edge granules,
// when the container starts or ends within a granule.
uptr src_beg_up = RoundUpTo(src_beg, granularity);
uptr src_end_up = RoundUpTo(src_end, granularity);
bool copy_in_reversed_order = src_beg < dst_beg && dst_beg <= src_end_up;
if (src_beg % granularity != dst_beg % granularity ||
RoundDownTo(dst_end - 1, granularity) <= dst_beg) {
if (copy_in_reversed_order)
SlowReversedCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
else
SlowCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
return;
}
// As buffers are granule-aligned, we can just copy annotations of granules
// from the middle.
uptr dst_beg_up = RoundUpTo(dst_beg, granularity);
uptr dst_end_down = RoundDownTo(dst_end, granularity);
if (copy_in_reversed_order)
CopyContainerLastGranuleAnnotation(src_end, dst_end);
else
CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
if (dst_beg_up < dst_end_down) {
internal_memmove((u8 *)MemToShadow(dst_beg_up),
(u8 *)MemToShadow(src_beg_up),
(dst_end_down - dst_beg_up) / granularity);
}
if (copy_in_reversed_order)
CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
else
CopyContainerLastGranuleAnnotation(src_end, dst_end);
}
static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
CHECK_LE(begin, end);
constexpr uptr kMaxRangeToCheck = 32;

View File

@ -59,10 +59,10 @@ bool PlatformUnpoisonStacks() {
// Since we're on the signal alternate stack, we cannot find the DEFAULT
// stack bottom using a local variable.
uptr default_bottom, tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
&tls_size);
UnpoisonStack(default_bottom, default_bottom + stack_size, "default");
uptr stack_begin, stack_end, tls_begin, tls_end;
GetThreadStackAndTls(/*main=*/false, &stack_begin, &stack_end, &tls_begin,
&tls_end);
UnpoisonStack(stack_begin, stack_end, "default");
return true;
}
@ -146,7 +146,44 @@ void PlatformTSDDtor(void *tsd) {
# endif
AsanThread::TSDDtor(tsd);
}
#endif
# endif
static void BeforeFork() {
VReport(2, "BeforeFork tid: %llu\n", GetTid());
if (CAN_SANITIZE_LEAKS) {
__lsan::LockGlobal();
}
// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
// stuff we need.
__lsan::LockThreads();
__lsan::LockAllocator();
StackDepotLockBeforeFork();
}
static void AfterFork(bool fork_child) {
StackDepotUnlockAfterFork(fork_child);
// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
// the stuff we need.
__lsan::UnlockAllocator();
__lsan::UnlockThreads();
if (CAN_SANITIZE_LEAKS) {
__lsan::UnlockGlobal();
}
VReport(2, "AfterFork tid: %llu\n", GetTid());
}
void InstallAtForkHandler() {
# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE || \
(SANITIZER_LINUX && SANITIZER_SPARC)
// While other Linux targets use clone in internal_fork which doesn't
// trigger pthread_atfork handlers, Linux/sparc64 uses __fork, causing a
// hang.
return; // FIXME: Implement FutexWait.
# endif
pthread_atfork(
&BeforeFork, []() { AfterFork(/* fork_child= */ false); },
[]() { AfterFork(/* fork_child= */ true); });
}
void InstallAtExitCheckLeaks() {
if (CAN_SANITIZE_LEAKS) {

View File

@ -15,10 +15,8 @@
using namespace __asan;
#if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
// This code linked into the main executable when -fsanitize=address is in
// the link flags. It can only use exported interface functions.
__attribute__((section(".preinit_array"), used))
void (*__local_asan_preinit)(void) = __asan_init;
// This section is linked into the main executable when -fsanitize=address is
// specified to perform initialization at a very early stage.
__attribute__((section(".preinit_array"), used)) static auto preinit =
__asan_init;
#endif

View File

@ -33,7 +33,8 @@ uptr PremapShadowSize() {
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
/*min_shadow_base_alignment*/ 0, kHighMemEnd,
GetMmapGranularity());
}
bool PremapShadowFailed() {

View File

@ -24,6 +24,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@ -32,8 +33,11 @@ namespace __asan {
// -------------------- User-specified callbacks ----------------- {{{1
static void (*error_report_callback)(const char*);
static char *error_message_buffer = nullptr;
static uptr error_message_buffer_pos = 0;
using ErrorMessageBuffer = InternalMmapVectorNoCtor<char, true>;
alignas(
alignof(ErrorMessageBuffer)) static char error_message_buffer_placeholder
[sizeof(ErrorMessageBuffer)];
static ErrorMessageBuffer *error_message_buffer = nullptr;
static Mutex error_message_buf_mutex;
static const unsigned kAsanBuggyPcPoolSize = 25;
static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
@ -42,17 +46,14 @@ void AppendToErrorMessageBuffer(const char *buffer) {
Lock l(&error_message_buf_mutex);
if (!error_message_buffer) {
error_message_buffer =
(char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
error_message_buffer_pos = 0;
new (error_message_buffer_placeholder) ErrorMessageBuffer();
error_message_buffer->Initialize(kErrorMessageBufferSize);
}
uptr length = internal_strlen(buffer);
RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos);
uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos;
internal_strncpy(error_message_buffer + error_message_buffer_pos,
buffer, remaining);
error_message_buffer[kErrorMessageBufferSize - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message.
error_message_buffer_pos += Min(remaining, length);
uptr error_message_buffer_len = error_message_buffer->size();
uptr buffer_len = internal_strlen(buffer);
error_message_buffer->resize(error_message_buffer_len + buffer_len);
internal_memcpy(error_message_buffer->data() + error_message_buffer_len,
buffer, buffer_len);
}
// ---------------------- Helper functions ----------------------- {{{1
@ -158,14 +159,14 @@ class ScopedInErrorReport {
// Copy the message buffer so that we could start logging without holding a
// lock that gets acquired during printing.
InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
InternalScopedString buffer_copy;
{
Lock l(&error_message_buf_mutex);
internal_memcpy(buffer_copy.data(),
error_message_buffer, kErrorMessageBufferSize);
error_message_buffer->push_back('\0');
buffer_copy.Append(error_message_buffer->data());
// Clear error_message_buffer so that if we find other errors
// we don't re-log this error.
error_message_buffer_pos = 0;
error_message_buffer->clear();
}
LogFullErrorReport(buffer_copy.data());
@ -366,6 +367,16 @@ void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
in_report.ReportError(error);
}
void ReportBadParamsToCopyContiguousContainerAnnotations(
uptr old_storage_beg, uptr old_storage_end, uptr new_storage_beg,
uptr new_storage_end, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
ErrorBadParamsToCopyContiguousContainerAnnotations error(
GetCurrentTidOrInvalid(), stack, old_storage_beg, old_storage_end,
new_storage_beg, new_storage_end);
in_report.ReportError(error);
}
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;

View File

@ -88,6 +88,9 @@ void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
uptr storage_beg, uptr storage_end, uptr old_container_beg,
uptr old_container_end, uptr new_container_beg, uptr new_container_end,
BufferedStackTrace *stack);
void ReportBadParamsToCopyContiguousContainerAnnotations(
uptr old_storage_beg, uptr old_storage_end, uptr new_storage_beg,
uptr new_storage_end, BufferedStackTrace *stack);
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2);

View File

@ -71,16 +71,16 @@ static void CheckUnwind() {
}
// -------------------------- Globals --------------------- {{{1
static int asan_inited = 0;
static int asan_init_is_running = 0;
static StaticSpinMutex asan_inited_mutex;
static atomic_uint8_t asan_inited = {0};
void SetAsanInited(u32 val) { asan_inited = val; }
static void SetAsanInited() {
atomic_store(&asan_inited, 1, memory_order_release);
}
void SetAsanInitIsRunning(u32 val) { asan_init_is_running = val; }
bool AsanInited() { return asan_inited == 1; }
bool AsanInitIsRunning() { return asan_init_is_running == 1; }
bool AsanInited() {
return atomic_load(&asan_inited, memory_order_acquire) == 1;
}
bool replace_intrin_cached;
@ -382,7 +382,7 @@ void PrintAddressSpaceLayout() {
Printf("SHADOW_SCALE: %d\n", (int)ASAN_SHADOW_SCALE);
Printf("SHADOW_GRANULARITY: %d\n", (int)ASAN_SHADOW_GRANULARITY);
Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)ASAN_SHADOW_OFFSET);
Printf("SHADOW_OFFSET: %p\n", (void *)ASAN_SHADOW_OFFSET);
CHECK(ASAN_SHADOW_SCALE >= 3 && ASAN_SHADOW_SCALE <= 7);
if (kMidMemBeg)
CHECK(kMidShadowBeg > kLowShadowEnd &&
@ -390,12 +390,10 @@ void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
static void AsanInitInternal() {
static bool AsanInitInternal() {
if (LIKELY(AsanInited()))
return;
return true;
SanitizerToolName = "AddressSanitizer";
CHECK(!AsanInitIsRunning() && "ASan init calls itself!");
SetAsanInitIsRunning(1);
CacheBinaryName();
@ -408,11 +406,12 @@ static void AsanInitInternal() {
// Stop performing init at this point if we are being loaded via
// dlopen() and the platform supports it.
if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) {
SetAsanInitIsRunning(0);
VReport(1, "AddressSanitizer init is being performed for dlopen().\n");
return;
return false;
}
// Make sure we are not statically linked.
__interception::DoesNotSupportStaticLinking();
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
AvoidCVE_2016_2143();
@ -424,9 +423,6 @@ static void AsanInitInternal() {
InitializeHighMemEnd();
// Make sure we are not statically linked.
AsanDoesNotSupportStaticLinkage();
// Install tool-specific callbacks in sanitizer_common.
AddDieCallback(AsanDie);
SetCheckUnwindCallback(CheckUnwind);
@ -470,8 +466,7 @@ static void AsanInitInternal() {
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
replace_intrin_cached = flags()->replace_intrin;
SetAsanInited(1);
SetAsanInitIsRunning(0);
SetAsanInited();
if (flags()->atexit)
Atexit(asan_atexit);
@ -483,9 +478,6 @@ static void AsanInitInternal() {
if (flags()->start_deactivated)
AsanDeactivate();
// interceptors
InitTlsSize();
// Create main thread.
AsanThread *main_thread = CreateMainThread();
CHECK_EQ(0, main_thread->tid());
@ -497,6 +489,8 @@ static void AsanInitInternal() {
InstallAtExitCheckLeaks();
}
InstallAtForkHandler();
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
#endif
@ -515,14 +509,29 @@ static void AsanInitInternal() {
VReport(1, "AddressSanitizer Init done\n");
WaitForDebugger(flags()->sleep_after_init, "after init");
return true;
}
// Initialize as requested from some part of ASan runtime library (interceptors,
// allocator, etc).
void AsanInitFromRtl() {
if (LIKELY(AsanInited()))
return;
SpinMutexLock lock(&asan_inited_mutex);
AsanInitInternal();
}
bool TryAsanInitFromRtl() {
if (LIKELY(AsanInited()))
return true;
if (!asan_inited_mutex.TryLock())
return false;
bool result = AsanInitInternal();
asan_inited_mutex.Unlock();
return result;
}
#if ASAN_DYNAMIC
// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
// (and thus normal initializers from .preinit_array or modules haven't run).
@ -568,10 +577,8 @@ static void UnpoisonDefaultStack() {
} else {
CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds.
uptr tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
&tls_size);
top = bottom + stack_size;
uptr tls_begin, tls_end;
GetThreadStackAndTls(/*main=*/false, &bottom, &top, &tls_begin, &tls_end);
}
UnpoisonStack(bottom, top, "default");
@ -593,7 +600,7 @@ static void UnpoisonFakeStack() {
using namespace __asan;
void NOINLINE __asan_handle_no_return() {
if (AsanInitIsRunning())
if (UNLIKELY(!AsanInited()))
return;
if (!PlatformUnpoisonStacks())
@ -623,7 +630,7 @@ void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
// We use this call as a trigger to wake up ASan from deactivated state.
void __asan_init() {
AsanActivate();
AsanInitInternal();
AsanInitFromRtl();
}
void __asan_version_mismatch_check() {

View File

@ -27,7 +27,12 @@ FNAME(reg, op, s, i): ;\
#define ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, s) \
mov %##reg,%r10 ;\
shr $0x3,%r10 ;\
.if ASAN_SHADOW_OFFSET_CONST < 0x80000000 ;\
movsbl ASAN_SHADOW_OFFSET_CONST(%r10),%r10d ;\
.else ;\
movabsq $ASAN_SHADOW_OFFSET_CONST,%r11 ;\
movsbl (%r10,%r11),%r10d ;\
.endif ;\
test %r10d,%r10d ;\
jne CLABEL(reg, op, s, add) ;\
RLABEL(reg, op, s, add): ;\
@ -84,7 +89,12 @@ ENDF
#define ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, s, c) \
mov %##reg,%r10 ;\
shr $0x3,%r10 ;\
.if ASAN_SHADOW_OFFSET_CONST < 0x80000000 ;\
##c $0x0,ASAN_SHADOW_OFFSET_CONST(%r10) ;\
.else ;\
movabsq $ASAN_SHADOW_OFFSET_CONST,%r11 ;\
##c $0x0,(%r10,%r11) ;\
.endif ;\
jne FLABEL(reg, op, s, add) ;\
retq ;\

View File

@ -20,7 +20,7 @@
namespace __asan {
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
alignas(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kInterceptorName[] = "interceptor_name";
static const char kInterceptorViaFunction[] = "interceptor_via_fun";
@ -39,8 +39,7 @@ void InitializeSuppressions() {
suppression_ctx = new (suppression_placeholder)
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__asan_default_suppressions)
suppression_ctx->Parse(__asan_default_suppressions());
suppression_ctx->Parse(__asan_default_suppressions());
}
bool IsInterceptorSuppressed(const char *interceptor_name) {
@ -81,9 +80,10 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
}
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr));
const SymbolizedStack *frames = symbolized_stack.get();
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
if (!function_name) {
continue;
@ -91,11 +91,9 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
// Match "interceptor_via_fun" suppressions.
if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
&s)) {
frames->ClearAll();
return true;
}
}
frames->ClearAll();
}
}
return false;

View File

@ -21,6 +21,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_thread_history.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __asan {
@ -28,10 +29,7 @@ namespace __asan {
// AsanThreadContext implementation.
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg);
if (args->stack)
stack_id = StackDepotPut(*args->stack);
thread = args->thread;
thread = static_cast<AsanThread *>(arg);
thread->set_context(this);
}
@ -44,10 +42,15 @@ static ThreadRegistry *asan_thread_registry;
static ThreadArgRetval *thread_data;
static Mutex mu_for_thread_context;
// TODO(leonardchan@): It should be possible to make LowLevelAllocator
// threadsafe and consolidate this one into the GlobalLoweLevelAllocator.
// We should be able to do something similar to what's in
// sanitizer_stack_store.cpp.
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
Lock lock(&mu_for_thread_context);
return new (GetGlobalLowLevelAllocator()) AsanThreadContext(tid);
return new (allocator_for_thread_context) AsanThreadContext(tid);
}
static void InitThreads() {
@ -62,10 +65,10 @@ static void InitThreads() {
// thread before all TSD destructors will be called for it.
// MIPS requires aligned address
static ALIGNED(alignof(
ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ALIGNED(alignof(
ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
alignas(alignof(ThreadRegistry)) static char
thread_registry_placeholder[sizeof(ThreadRegistry)];
alignas(alignof(ThreadArgRetval)) static char
thread_data_placeholder[sizeof(ThreadArgRetval)];
asan_thread_registry =
new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
@ -101,8 +104,8 @@ AsanThread *AsanThread::Create(const void *start_data, uptr data_size,
CHECK_LE(data_size, availible_size);
internal_memcpy(thread->start_data_, start_data, data_size);
}
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
asanThreadRegistry().CreateThread(0, detached, parent_tid,
stack ? StackDepotPut(*stack) : 0, thread);
return thread;
}
@ -301,13 +304,10 @@ AsanThread *CreateMainThread() {
// OS-specific implementations that need more information passed through.
void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_begin_, &tls_size);
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_top_,
&tls_begin_, &tls_end_);
stack_top_ = RoundDownTo(stack_top_, ASAN_SHADOW_GRANULARITY);
stack_bottom_ = RoundDownTo(stack_bottom_, ASAN_SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
if (stack_top_ != stack_bottom_) {
@ -556,6 +556,12 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
threads);
}
void PrintThreads() {
InternalScopedString out;
PrintThreadHistory(__asan::asanThreadRegistry(), out);
Report("%s\n", out.data());
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1

View File

@ -36,21 +36,16 @@ class AsanThread;
class AsanThreadContext final : public ThreadContextBase {
public:
explicit AsanThreadContext(int tid)
: ThreadContextBase(tid), announced(false),
destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
: ThreadContextBase(tid),
announced(false),
destructor_iterations(GetPthreadDestructorIterations()),
thread(nullptr) {}
bool announced;
u8 destructor_iterations;
u32 stack_id;
AsanThread *thread;
void OnCreated(void *arg) override;
void OnFinished() override;
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
};
// AsanThreadContext objects are never freed, so we need many of them.

View File

@ -203,6 +203,8 @@ void InitializePlatformInterceptors() {
void InstallAtExitCheckLeaks() {}
void InstallAtForkHandler() {}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
@ -264,16 +266,10 @@ void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); }
// }}}
// ---------------------- Various stuff ---------------- {{{
void *AsanDoesNotSupportStaticLinkage() {
#if defined(_DEBUG)
#error Please build the runtime with a non-debug CRT: /MD or /MT
#endif
return 0;
}
uptr FindDynamicShadowStart() {
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
/*min_shadow_base_alignment*/ 0, kHighMemEnd,
GetMmapGranularity());
}
void AsanCheckDynamicRTPrereqs() {}

View File

@ -0,0 +1,112 @@
//===-- asan_win_common_runtime_thunk.cpp --------------------------- -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines things that need to be present in the application modules
// to interact with the ASan DLL runtime correctly and can't be implemented
// using the default "import library" generated when linking the DLL.
//
// This includes:
// - Cloning shadow memory dynamic address from ASAN DLL
// - Creating weak aliases to default implementation imported from asan dll
// - Forwarding the detect_stack_use_after_return runtime option
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
#if defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) || \
defined(SANITIZER_STATIC_RUNTIME_THUNK)
# define SANITIZER_IMPORT_INTERFACE 1
# define WIN32_LEAN_AND_MEAN
# include "asan_win_common_runtime_thunk.h"
# include <windows.h>
# include "sanitizer_common/sanitizer_win_defs.h"
# include "sanitizer_common/sanitizer_win_thunk_interception.h"
// Define weak alias for all weak functions imported from asan dll.
# define INTERFACE_FUNCTION(Name)
# define INTERFACE_WEAK_FUNCTION(Name) REGISTER_WEAK_FUNCTION(Name)
# include "asan_interface.inc"
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
// used when linking an MD runtime with a set of object files on Windows.
//
// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
// so normally we would just dllimport it. Unfortunately, the dllimport
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the variable that is constant
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return;
__declspec(dllimport) void *__asan_get_shadow_memory_dynamic_address();
void *__asan_shadow_memory_dynamic_address;
static void __asan_initialize_cloned_variables() {
__asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
__asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
}
}
static int asan_thunk_init() {
__asan_initialize_cloned_variables();
# ifdef SANITIZER_STATIC_RUNTIME_THUNK
__asan_initialize_static_thunk();
# endif
return 0;
}
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == DLL_PROCESS_ATTACH) {
asan_thunk_init();
}
}
// Our cloned variables must be initialized before C/C++ constructors. If TLS
// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
// initializer is needed as a backup.
extern "C" __declspec(allocate(".CRT$XIB")) int (*__asan_thunk_init)() =
asan_thunk_init;
WIN_FORCE_LINK(__asan_thunk_init)
extern "C" __declspec(allocate(".CRT$XLAB")) void(WINAPI *__asan_tls_init)(
void *, unsigned long, void *) = asan_thread_init;
WIN_FORCE_LINK(__asan_tls_init)
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
// of each module (see also asan_win.cpp).
extern "C" {
__declspec(dllimport) int __asan_set_seh_filter();
static int SetSEHFilter() { return __asan_set_seh_filter(); }
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
extern "C" __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
WIN_FORCE_LINK(__asan_seh_interceptor)
}
WIN_FORCE_LINK(__asan_dso_reg_hook)
#endif // defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) ||
// defined(SANITIZER_STATIC_RUNTIME_THUNK)

View File

@ -0,0 +1,38 @@
//===-- asan_win_common_runtime_thunk.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines things that need to be present in the application modules
// to interact with the ASan DLL runtime correctly and can't be implemented
// using the default "import library" generated when linking the DLL.
//
//===----------------------------------------------------------------------===//
#if defined(SANITIZER_STATIC_RUNTIME_THUNK) || \
defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)
# include "sanitizer_common/sanitizer_win_defs.h"
# pragma section(".CRT$XIB", long, \
read) // C initializer (during C init before dyninit)
# pragma section(".CRT$XID", long, \
read) // First C initializer after CRT initializers
# pragma section(".CRT$XCAB", long, \
read) // First C++ initializer after startup initializers
# pragma section(".CRT$XTW", long, read) // First ASAN globals terminator
# pragma section(".CRT$XTY", long, read) // Last ASAN globals terminator
# pragma section(".CRT$XLAB", long, read) // First TLS initializer
# ifdef SANITIZER_STATIC_RUNTIME_THUNK
extern "C" void __asan_initialize_static_thunk();
# endif
#endif // defined(SANITIZER_STATIC_RUNTIME_THUNK) ||
// defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)

View File

@ -1,165 +0,0 @@
//===-- asan_win_dll_thunk.cpp --------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines a family of thunks that should be statically linked into
// the DLLs that have ASan instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DLL_THUNK
#include "asan_init_version.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_win_defs.h"
#include "sanitizer_common/sanitizer_win_dll_thunk.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// ASan own interface functions.
#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
#include "asan_interface.inc"
// Memory allocation functions.
INTERCEPT_WRAP_V_W(free)
INTERCEPT_WRAP_V_W(_free_base)
INTERCEPT_WRAP_V_WW(_free_dbg)
INTERCEPT_WRAP_W_W(malloc)
INTERCEPT_WRAP_W_W(_malloc_base)
INTERCEPT_WRAP_W_WWWW(_malloc_dbg)
INTERCEPT_WRAP_W_WW(calloc)
INTERCEPT_WRAP_W_WW(_calloc_base)
INTERCEPT_WRAP_W_WWWWW(_calloc_dbg)
INTERCEPT_WRAP_W_WWW(_calloc_impl)
INTERCEPT_WRAP_W_WW(realloc)
INTERCEPT_WRAP_W_WW(_realloc_base)
INTERCEPT_WRAP_W_WWW(_realloc_dbg)
INTERCEPT_WRAP_W_WWW(_recalloc)
INTERCEPT_WRAP_W_WWW(_recalloc_base)
INTERCEPT_WRAP_W_W(_msize)
INTERCEPT_WRAP_W_W(_msize_base)
INTERCEPT_WRAP_W_W(_expand)
INTERCEPT_WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp)
# if defined(_MSC_VER) && !defined(__clang__)
// Disable warnings such as: 'void memchr(void)': incorrect number of arguments
// for intrinsic function, expected '3' arguments.
# pragma warning(push)
# pragma warning(disable : 4392)
# endif
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
INTERCEPT_LIBRARY_FUNCTION(atoll);
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
INTERCEPT_LIBRARY_FUNCTION(memchr);
#endif
INTERCEPT_LIBRARY_FUNCTION(memcmp);
INTERCEPT_LIBRARY_FUNCTION(memcpy);
INTERCEPT_LIBRARY_FUNCTION(memmove);
INTERCEPT_LIBRARY_FUNCTION(memset);
INTERCEPT_LIBRARY_FUNCTION(strcat);
INTERCEPT_LIBRARY_FUNCTION(strchr);
INTERCEPT_LIBRARY_FUNCTION(strcmp);
INTERCEPT_LIBRARY_FUNCTION(strcpy);
INTERCEPT_LIBRARY_FUNCTION(strcspn);
INTERCEPT_LIBRARY_FUNCTION(strdup);
INTERCEPT_LIBRARY_FUNCTION(strlen);
INTERCEPT_LIBRARY_FUNCTION(strncat);
INTERCEPT_LIBRARY_FUNCTION(strncmp);
INTERCEPT_LIBRARY_FUNCTION(strncpy);
INTERCEPT_LIBRARY_FUNCTION(strnlen);
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
INTERCEPT_LIBRARY_FUNCTION(strrchr);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(strtoll);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
# if defined(_MSC_VER) && !defined(__clang__)
# pragma warning(pop)
# endif
#ifdef _WIN64
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
#else
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
// _except_handler4 checks -GS cookie which is different for each module, so we
// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
#endif
// Windows specific functions not included in asan_interface.inc.
INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
using namespace __sanitizer;
extern "C" {
int __asan_option_detect_stack_use_after_return;
uptr __asan_shadow_memory_dynamic_address;
} // extern "C"
static int asan_dll_thunk_init() {
typedef void (*fntype)();
static fntype fn = 0;
// asan_dll_thunk_init is expected to be called by only one thread.
if (fn) return 0;
// Ensure all interception was executed.
__dll_thunk_init();
fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
__asan_shadow_memory_dynamic_address =
(uptr)__asan_get_shadow_memory_dynamic_address();
#ifndef _WIN64
INTERCEPT_FUNCTION(_except_handler4);
#endif
// In DLLs, the callbacks are expected to return 0,
// otherwise CRT initialization fails.
return 0;
}
#pragma section(".CRT$XIB", long, read)
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init;
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init();
}
#pragma section(".CRT$XLAB", long, read)
__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
WIN_FORCE_LINK(__asan_dso_reg_hook)
#endif // SANITIZER_DLL_THUNK

View File

@ -8,76 +8,17 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines things that need to be present in the application modules
// to interact with the ASan DLL runtime correctly and can't be implemented
// using the default "import library" generated when linking the DLL RTL.
//
// This includes:
// - creating weak aliases to default implementation imported from asan dll.
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handler
// This file defines things that need to be present for application modules
// that are dynamic linked with the C Runtime.
//
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
#define SANITIZER_IMPORT_INTERFACE 1
#include "sanitizer_common/sanitizer_win_defs.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
// Define weak alias for all weak functions imported from asan dll.
#define INTERFACE_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
#include "asan_interface.inc"
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XIB", long, read)
#pragma section(".CRT$XID", long, read)
#pragma section(".CRT$XCAB", long, read)
#pragma section(".CRT$XTW", long, read)
#pragma section(".CRT$XTY", long, read)
#pragma section(".CRT$XLAB", long, read)
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
// used when linking an MD runtime with a set of object files on Windows.
//
// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
// so normally we would just dllimport it. Unfortunately, the dllimport
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the variable that is constant
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return;
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
void* __asan_shadow_memory_dynamic_address;
}
static int InitializeClonedVariables() {
__asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
__asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
return 0;
}
static void NTAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
}
// Our cloned variables must be initialized before C/C++ constructors. If TLS
// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
// initializer is needed as a backup.
__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
InitializeClonedVariables;
__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
# include "asan_win_common_runtime_thunk.h"
# include "sanitizer_common/sanitizer_win_defs.h"
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
@ -88,43 +29,26 @@ __declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
// using atexit() that calls a small subset of C terminators
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
// are there.
extern "C" int __cdecl atexit(void (__cdecl *f)(void));
extern "C" int __cdecl atexit(void(__cdecl *f)(void));
extern "C" void __cdecl _initterm(void *a, void *b);
namespace {
__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0;
__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0;
__declspec(allocate(".CRT$XTW")) void *before_global_dtors = 0;
__declspec(allocate(".CRT$XTY")) void *after_global_dtors = 0;
void UnregisterGlobals() {
_initterm(&before_global_dtors, &after_global_dtors);
}
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
int ScheduleUnregisterGlobals() { return atexit(UnregisterGlobals); }
} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
// initializers (think ctors for globals), UnregisterGlobals gets executed after
// dtors for C++ globals.
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
extern "C" __declspec(allocate(".CRT$XID")) int (
*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
WIN_FORCE_LINK(__asan_schedule_unregister_globals)
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
// of each module (see also asan_win.cpp).
extern "C" {
__declspec(dllimport) int __asan_set_seh_filter();
static int SetSEHFilter() { return __asan_set_seh_filter(); }
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
WIN_FORCE_LINK(__asan_dso_reg_hook)
#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK

View File

@ -0,0 +1,113 @@
//===-- asan_win_static_runtime_thunk.cpp ---------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines a family of thunks that should be statically linked into
// modules that are statically linked with the C Runtime in order to delegate
// the calls to the ASAN runtime DLL.
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_STATIC_RUNTIME_THUNK
# include "asan_init_version.h"
# include "asan_interface_internal.h"
# include "asan_win_common_runtime_thunk.h"
# include "sanitizer_common/sanitizer_platform_interceptors.h"
# include "sanitizer_common/sanitizer_win_defs.h"
# include "sanitizer_common/sanitizer_win_thunk_interception.h"
# if defined(_MSC_VER) && !defined(__clang__)
// Disable warnings such as: 'void memchr(void)': incorrect number of arguments
// for intrinsic function, expected '3' arguments.
# pragma warning(push)
# pragma warning(disable : 4392)
# endif
# define INTERCEPT_LIBRARY_FUNCTION_ASAN(X) \
INTERCEPT_LIBRARY_FUNCTION(X, "__asan_wrap_" #X)
INTERCEPT_LIBRARY_FUNCTION_ASAN(atoi);
INTERCEPT_LIBRARY_FUNCTION_ASAN(atol);
INTERCEPT_LIBRARY_FUNCTION_ASAN(atoll);
INTERCEPT_LIBRARY_FUNCTION_ASAN(frexp);
INTERCEPT_LIBRARY_FUNCTION_ASAN(longjmp);
# if SANITIZER_INTERCEPT_MEMCHR
INTERCEPT_LIBRARY_FUNCTION_ASAN(memchr);
# endif
INTERCEPT_LIBRARY_FUNCTION_ASAN(memcmp);
INTERCEPT_LIBRARY_FUNCTION_ASAN(memcpy);
# ifndef _WIN64
// memmove and memcpy share an implementation on amd64
INTERCEPT_LIBRARY_FUNCTION_ASAN(memmove);
# endif
INTERCEPT_LIBRARY_FUNCTION_ASAN(memset);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strcat);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strchr);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strcmp);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strcpy);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strcspn);
INTERCEPT_LIBRARY_FUNCTION_ASAN(_strdup);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strlen);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strncat);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strncmp);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strncpy);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strnlen);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strpbrk);
// INTERCEPT_LIBRARY_FUNCTION_ASAN(strrchr);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strspn);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strstr);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strtok);
INTERCEPT_LIBRARY_FUNCTION_ASAN(wcslen);
INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsnlen);
// Note: Don't intercept strtol(l). They are supposed to set errno for out-of-
// range values, but since the ASan runtime is linked against the dynamic CRT,
// its errno is different from the one in the current module.
# if defined(_MSC_VER) && !defined(__clang__)
# pragma warning(pop)
# endif
# ifdef _WIN64
INTERCEPT_LIBRARY_FUNCTION_ASAN(__C_specific_handler);
# else
extern "C" void abort();
INTERCEPT_LIBRARY_FUNCTION_ASAN(_except_handler3);
// _except_handler4 checks -GS cookie which is different for each module, so we
// can't use INTERCEPT_LIBRARY_FUNCTION_ASAN(_except_handler4), need to apply
// manually
extern "C" int _except_handler4(void *, void *, void *, void *);
static int (*real_except_handler4)(void *, void *, void *,
void *) = &_except_handler4;
static int intercept_except_handler4(void *a, void *b, void *c, void *d) {
__asan_handle_no_return();
return real_except_handler4(a, b, c, d);
}
# endif
// Windows specific functions not included in asan_interface.inc.
// INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
// INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
// INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
extern "C" void __asan_initialize_static_thunk() {
# ifndef _WIN64
if (real_except_handler4 == &_except_handler4) {
// Single threaded, no need for synchronization.
if (!__sanitizer_override_function_by_addr(
reinterpret_cast<__sanitizer::uptr>(&intercept_except_handler4),
reinterpret_cast<__sanitizer::uptr>(&_except_handler4),
reinterpret_cast<__sanitizer::uptr*>(&real_except_handler4))) {
abort();
}
}
# endif
}
#endif // SANITIZER_DLL_THUNK

View File

@ -1,22 +0,0 @@
//===-- asan_win_weak_interception.cpp ------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This module should be included in Address Sanitizer when it is implemented as
// a shared library on Windows (dll), in order to delegate the calls of weak
// functions to the implementation in the main executable when a strong
// definition is provided.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DYNAMIC
#include "sanitizer_common/sanitizer_win_weak_interception.h"
#include "asan_interface_internal.h"
// Check if strong definitions for weak functions are present in the main
// executable. If that is the case, override dll functions to point to strong
// implementations.
#define INTERFACE_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
#include "asan_interface.inc"
#endif // SANITIZER_DYNAMIC

View File

@ -260,9 +260,10 @@
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
CFI_START SEPARATOR \
DECLARE_FUNC_ENCODING \
name: SEPARATOR BTI_C
name: \
SEPARATOR CFI_START \
SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \

View File

@ -357,8 +357,6 @@ __attribute__((constructor(0))) void __hwasan_init() {
hwasan_init_is_running = 1;
SanitizerToolName = "HWAddressSanitizer";
InitTlsSize();
CacheBinaryName();
InitializeFlags();
@ -367,6 +365,8 @@ __attribute__((constructor(0))) void __hwasan_init() {
__sanitizer_set_report_path(common_flags()->log_path);
InitializePlatformEarly();
AndroidTestTlsSlot();
DisableCoreDumperIfNecessary();
@ -678,6 +678,8 @@ uptr __hwasan_tag_pointer(uptr p, u8 tag) {
return AddTagToPointer(p, tag);
}
u8 __hwasan_get_tag_from_pointer(uptr p) { return GetTagFromPointer(p); }
void __hwasan_handle_longjmp(const void *sp_dst) {
uptr dst = (uptr)sp_dst;
// HWASan does not support tagged SP.
@ -690,7 +692,7 @@ void __hwasan_handle_longjmp(const void *sp_dst) {
"WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
"stack top: %p; target %p; distance: %p (%zd)\n"
"False positive error reports may follow\n",
(void *)sp, (void *)dst, dst - sp);
(void *)sp, (void *)dst, dst - sp, dst - sp);
return;
}
TagMemory(sp, dst - sp, 0);

View File

@ -104,9 +104,9 @@ static inline void *UntagPtr(const void *tagged_ptr) {
}
static inline uptr AddTagToPointer(uptr p, tag_t tag) {
return InTaggableRegion(p)
? ((p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift))
: p;
return InTaggableRegion(p) ? ((p & ~kAddressTagMask) |
((uptr)(tag & kTagMask) << kAddressTagShift))
: p;
}
namespace __hwasan {
@ -139,14 +139,14 @@ void hwasan_free(void *ptr, StackTrace *stack);
void InstallAtExitHandler();
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
UNINITIALIZED BufferedStackTrace stack; \
if (hwasan_inited) \
stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
nullptr, common_flags()->fast_unwind_on_malloc, \
common_flags()->malloc_context_size)
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
UNINITIALIZED BufferedStackTrace stack; \
if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)

View File

@ -17,7 +17,6 @@
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_mallinfo.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
using namespace __hwasan;
@ -62,10 +61,7 @@ void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
void *ptr = hwasan_memalign(alignment, size, &stack);
if (ptr)
DTLS_on_libc_memalign(ptr, size);
return ptr;
return hwasan_memalign(alignment, size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
@ -184,7 +180,7 @@ INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
INTERCEPTOR_ALIAS(void, cfree, void *ptr);
INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo,);
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
# endif

View File

@ -44,7 +44,7 @@ enum {
// Initialized in HwasanAllocatorInit, an never changed.
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
alignas(16) static u8 tail_magic[kShadowAlignment - 1];
static uptr max_malloc_size;
bool HwasanChunkView::IsAllocated() const {
@ -289,6 +289,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK(tagged_ptr);
void *untagged_ptr = UntagPtr(tagged_ptr);
if (RunFreeHooks(tagged_ptr))
return;
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
return;
@ -302,8 +305,6 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
return;
}
RunFreeHooks(tagged_ptr);
uptr orig_size = meta->GetRequestedSize();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->GetAllocStackId();

View File

@ -140,7 +140,6 @@ __attribute__((always_inline, nodebug)) static inline uptr ShortTagSize(
__attribute__((always_inline, nodebug)) static inline bool
PossiblyShortTagMatches(tag_t mem_tag, uptr ptr, uptr sz) {
DCHECK(IsAligned(ptr, kShadowAlignment));
tag_t ptr_tag = GetTagFromPointer(ptr);
if (ptr_tag == mem_tag)
return true;

View File

@ -36,15 +36,20 @@ decltype(__hwasan_shadow)* __hwasan_premap_shadow();
namespace __hwasan {
// We cannot call anything in libc here (see comment above), so we need to
// assume the biggest allowed page size.
// Android max page size is defined as 16k here:
// https://android.googlesource.com/platform/bionic/+/main/libc/platform/bionic/page.h#41
static constexpr uptr kMaxGranularity = 16384;
// Conservative upper limit.
static uptr PremapShadowSize() {
return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale,
GetMmapGranularity());
return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale, kMaxGranularity);
}
static uptr PremapShadow() {
return MapDynamicShadow(PremapShadowSize(), kShadowScale,
kShadowBaseAlignment, kHighMemEnd);
kShadowBaseAlignment, kHighMemEnd, kMaxGranularity);
}
static bool IsPremapShadowAvailable() {
@ -56,7 +61,7 @@ static bool IsPremapShadowAvailable() {
}
static uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
const uptr granularity = GetMmapGranularity();
const uptr granularity = kMaxGranularity;
const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow);
const uptr premap_shadow_size = PremapShadowSize();
const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
@ -109,7 +114,7 @@ uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
if (IsPremapShadowAvailable())
return FindPremappedShadowStart(shadow_size_bytes);
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
kHighMemEnd, kMaxGranularity);
}
} // namespace __hwasan
@ -135,7 +140,7 @@ uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
RingBufferSize());
# endif
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
kHighMemEnd, GetMmapGranularity());
}
} // namespace __hwasan

View File

@ -84,3 +84,10 @@ HWASAN_FLAG(bool, malloc_bisect_dump, false,
// are untagged before the call.
HWASAN_FLAG(bool, fail_without_syscall_abi, true,
"Exit if fail to request relaxed syscall ABI.")
HWASAN_FLAG(
uptr, fixed_shadow_base, -1,
"If not -1, HWASan will attempt to allocate the shadow at this address, "
"instead of choosing one dynamically."
"Tip: this can be combined with the compiler option, "
"-hwasan-mapping-offset, to optimize the instrumentation.")

View File

@ -334,10 +334,10 @@ INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
}
# endif
DEFINE_REAL_PTHREAD_FUNCTIONS
DEFINE_INTERNAL_PTHREAD_FUNCTIONS
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
DEFINE_REAL(int, vfork,)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
@ -520,6 +520,7 @@ void InitializeInterceptors() {
CHECK_EQ(inited, 0);
# if HWASAN_WITH_INTERCEPTORS
__interception::DoesNotSupportStaticLinking();
InitializeCommonInterceptors();
(void)(read_iovec);

View File

@ -160,6 +160,9 @@ void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __hwasan_tag_pointer(uptr p, u8 tag);
SANITIZER_INTERFACE_ATTRIBUTE
u8 __hwasan_get_tag_from_pointer(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_mismatch(uptr addr, u8 ts);

View File

@ -106,8 +106,22 @@ static uptr GetHighMemEnd() {
}
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
__hwasan_shadow_memory_dynamic_address =
FindDynamicShadowStart(shadow_size_bytes);
// FIXME: Android should init flags before shadow.
if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) {
__hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
uptr beg = __hwasan_shadow_memory_dynamic_address;
uptr end = beg + shadow_size_bytes;
if (!MemoryRangeIsAvailable(beg, end)) {
Report(
"FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",
(void *)beg, (void *)end);
DumpProcessMap();
CHECK(MemoryRangeIsAvailable(beg, end));
}
} else {
__hwasan_shadow_memory_dynamic_address =
FindDynamicShadowStart(shadow_size_bytes);
}
}
static void MaybeDieIfNoTaggingAbi(const char *message) {
@ -246,9 +260,6 @@ bool InitShadow() {
CHECK_GT(kLowShadowEnd, kLowShadowStart);
CHECK_GT(kLowShadowStart, kLowMemEnd);
if (Verbosity())
PrintAddressSpaceLayout();
// Reserve shadow memory.
ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
@ -262,6 +273,9 @@ bool InitShadow() {
if (kHighShadowEnd + 1 < kHighMemStart)
ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
if (Verbosity())
PrintAddressSpaceLayout();
return true;
}
@ -294,25 +308,6 @@ void InstallAtExitHandler() { atexit(HwasanAtExit); }
// ---------------------- TSD ---------------- {{{1
extern "C" void __hwasan_thread_enter() {
hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
}
extern "C" void __hwasan_thread_exit() {
Thread *t = GetCurrentThread();
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
if (t) {
// Block async signals on the thread as the handler can be instrumented.
// After this point instrumented code can't access essential data from TLS
// and will crash.
// Bionic already calls __hwasan_thread_exit with blocked signals.
if (SANITIZER_GLIBC)
BlockSignals();
hwasanThreadList().ReleaseThread(t);
}
}
# if HWASAN_WITH_INTERCEPTORS
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
@ -504,12 +499,8 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context) {
}
void Thread::InitStackAndTls(const InitState *) {
uptr tls_size;
uptr stack_size;
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
&tls_size);
stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_top_, &tls_begin_,
&tls_end_);
}
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
@ -536,16 +527,34 @@ uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
return AddTagToPointer(p, tag);
}
static void BeforeFork() {
VReport(2, "BeforeFork tid: %llu\n", GetTid());
if (CAN_SANITIZE_LEAKS) {
__lsan::LockGlobal();
}
// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
// stuff we need.
__lsan::LockThreads();
__lsan::LockAllocator();
StackDepotLockBeforeFork();
}
static void AfterFork(bool fork_child) {
StackDepotUnlockAfterFork(fork_child);
// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
// the stuff we need.
__lsan::UnlockAllocator();
__lsan::UnlockThreads();
if (CAN_SANITIZE_LEAKS) {
__lsan::UnlockGlobal();
}
VReport(2, "AfterFork tid: %llu\n", GetTid());
}
void HwasanInstallAtForkHandler() {
auto before = []() {
HwasanAllocatorLock();
StackDepotLockAll();
};
auto after = []() {
StackDepotUnlockAll();
HwasanAllocatorUnlock();
};
pthread_atfork(before, after, after);
pthread_atfork(
&BeforeFork, []() { AfterFork(/* fork_child= */ false); },
[]() { AfterFork(/* fork_child= */ true); });
}
void InstallAtExitCheckLeaks() {
@ -561,4 +570,25 @@ void InstallAtExitCheckLeaks() {
} // namespace __hwasan
using namespace __hwasan;
extern "C" void __hwasan_thread_enter() {
hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
}
extern "C" void __hwasan_thread_exit() {
Thread *t = GetCurrentThread();
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
if (t) {
// Block async signals on the thread as the handler can be instrumented.
// After this point instrumented code can't access essential data from TLS
// and will crash.
// Bionic already calls __hwasan_thread_exit with blocked signals.
if (SANITIZER_GLIBC)
BlockSignals();
hwasanThreadList().ReleaseThread(t);
}
}
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD

View File

@ -14,10 +14,8 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_hwasan_preinit, because it's not intended to
// be exported.
// This code linked into the main executable when -fsanitize=hwaddress is in
// the link flags. It can only use exported interface functions.
__attribute__((section(".preinit_array"), used)) static void (
*__local_hwasan_preinit)(void) = __hwasan_init;
// This section is linked into the main executable when -fsanitize=hwaddress is
// specified to perform initialization at a very early stage.
__attribute__((section(".preinit_array"), used)) static auto preinit =
__hwasan_init;
#endif

View File

@ -27,6 +27,7 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
@ -40,7 +41,7 @@ class ScopedReport {
public:
explicit ScopedReport(bool fatal) : fatal(fatal) {
Lock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
error_message_ptr_ = &error_message_;
++hwasan_report_count;
}
@ -205,6 +206,7 @@ static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
tag_t addr_tag, uptr untagged_addr) {
uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
bool found_local = false;
InternalScopedString location;
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
uptr record = *record_addr;
@ -212,35 +214,104 @@ static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
break;
tag_t base_tag =
reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
const uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
CHECK_LT(fp, kRecordFPModulus);
uptr pc_mask = (1ULL << kRecordFPShift) - 1;
uptr pc = record & pc_mask;
FrameInfo frame;
if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
for (LocalInfo &local : frame.locals) {
if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
continue;
tag_t obj_tag = base_tag ^ local.tag_offset;
if (obj_tag != addr_tag)
continue;
// Calculate the offset from the object address to the faulting
// address. Because we only store bits 4-19 of FP (bits 0-3 are
// guaranteed to be zero), the calculation is performed mod 2^20 and may
// harmlessly underflow if the address mod 2^20 is below the object
// address.
uptr obj_offset =
(untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
if (obj_offset >= local.size)
continue;
if (!found_local) {
Printf("Potentially referenced stack objects:\n");
found_local = true;
if (!Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame))
continue;
for (LocalInfo &local : frame.locals) {
if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
continue;
if (!(local.name && internal_strlen(local.name)) &&
!(local.function_name && internal_strlen(local.function_name)) &&
!(local.decl_file && internal_strlen(local.decl_file)))
continue;
tag_t obj_tag = base_tag ^ local.tag_offset;
if (obj_tag != addr_tag)
continue;
// We only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
// So we know only `FP % kRecordFPModulus`, and we can only calculate
// `local_beg % kRecordFPModulus`.
// Out of all possible `local_beg` we will only consider 2 candidates
// nearest to the `untagged_addr`.
uptr local_beg_mod = (fp + local.frame_offset) % kRecordFPModulus;
// Pick `local_beg` in the same 1 MiB block as `untagged_addr`.
uptr local_beg =
RoundDownTo(untagged_addr, kRecordFPModulus) + local_beg_mod;
// Pick the largest `local_beg <= untagged_addr`. It's either the current
// one or the one before.
if (local_beg > untagged_addr)
local_beg -= kRecordFPModulus;
uptr offset = -1ull;
const char *whence;
const char *cause = nullptr;
uptr best_beg;
// Try two 1 MiB blocks options and pick nearest one.
for (uptr i = 0; i < 2; ++i, local_beg += kRecordFPModulus) {
uptr local_end = local_beg + local.size;
if (local_beg > local_end)
continue; // This is a wraparound.
if (local_beg <= untagged_addr && untagged_addr < local_end) {
offset = untagged_addr - local_beg;
whence = "inside";
cause = "use-after-scope";
best_beg = local_beg;
break; // This is as close at it can be.
}
if (untagged_addr >= local_end) {
uptr new_offset = untagged_addr - local_end;
if (new_offset < offset) {
offset = new_offset;
whence = "after";
cause = "stack-buffer-overflow";
best_beg = local_beg;
}
} else {
uptr new_offset = local_beg - untagged_addr;
if (new_offset < offset) {
offset = new_offset;
whence = "before";
cause = "stack-buffer-overflow";
best_beg = local_beg;
}
}
Printf(" %s in %s %s:%d\n", local.name, local.function_name,
local.decl_file, local.decl_line);
}
frame.Clear();
// To fail the `untagged_addr` must be near nullptr, which is impossible
// with Linux user space memory layout.
if (!cause)
continue;
if (!found_local) {
Printf("\nPotentially referenced stack objects:\n");
found_local = true;
}
Decorator d;
Printf("%s", d.Error());
Printf("Cause: %s\n", cause);
Printf("%s", d.Default());
Printf("%s", d.Location());
StackTracePrinter::GetOrInit()->RenderSourceLocation(
&location, local.decl_file, local.decl_line, /* column= */ 0,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf(
"%p is located %zd bytes %s a %zd-byte local variable %s "
"[%p,%p) "
"in %s %s\n",
untagged_addr, offset, whence, local.size, local.name, best_beg,
best_beg + local.size, local.function_name, location.data());
location.clear();
Printf("%s\n", d.Default());
}
frame.Clear();
}
if (found_local)
@ -257,14 +328,16 @@ static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
break;
uptr pc_mask = (1ULL << 48) - 1;
uptr pc = record & pc_mask;
frame_desc.AppendF(" record_addr:0x%zx record:0x%zx",
reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
frame_desc.AppendF(" record_addr:%p record:0x%zx",
reinterpret_cast<const void *>(record_addr), record);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
const SymbolizedStack *frame = symbolized_stack.get();
if (frame) {
StackTracePrinter::GetOrInit()->RenderFrame(
&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
frame->ClearAll();
}
Printf("%s\n", frame_desc.data());
frame_desc.clear();
@ -353,7 +426,7 @@ static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
print_tag(s, row + i);
s.Append(row + i == addr ? "]" : " ");
}
s.AppendF("\n");
s.Append("\n");
}
}
@ -363,7 +436,7 @@ static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
InternalScopedString s;
addr = MemToShadow(addr);
s.AppendF(
"Memory tags around the buggy address (one tag corresponds to %zd "
"\nMemory tags around the buggy address (one tag corresponds to %zd "
"bytes):\n",
kShadowAlignment);
PrintTagInfoAroundAddr(addr, kShadowLines, s,
@ -383,10 +456,10 @@ static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
tag_t short_tag = get_short_tag(tag_addr);
s.AppendF("%02x", short_tag);
} else {
s.AppendF("..");
s.Append("..");
}
});
s.AppendF(
s.Append(
"See "
"https://clang.llvm.org/docs/"
"HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
@ -745,8 +818,6 @@ void BaseReport::PrintAddressDescription() const {
// Check stack first. If the address is on the stack of a live thread, we
// know it cannot be a heap / global overflow.
for (const auto &sa : allocations.stack) {
// TODO(fmayer): figure out how to distinguish use-after-return and
// stack-buffer-overflow.
Printf("%s", d.Error());
Printf("\nCause: stack tag-mismatch\n");
Printf("%s", d.Location());
@ -803,8 +874,10 @@ void BaseReport::PrintAddressDescription() const {
}
// Print the remaining threads, as an extra information, 1 line per thread.
if (flags()->print_live_threads_info)
if (flags()->print_live_threads_info) {
Printf("\n");
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
}
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
@ -912,16 +985,16 @@ TailOverwrittenReport::~TailOverwrittenReport() {
InternalScopedString s;
u8 *tail = tail_copy;
s.AppendF("Tail contains: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
s.Append("Tail contains: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(".. ");
for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
s.AppendF("\n");
s.AppendF("Expected: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
s.Append("\n");
s.Append("Expected: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(".. ");
for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
s.AppendF("\n");
s.AppendF(" ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(" ");
s.Append("\n");
s.Append(" ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(" ");
for (uptr i = 0; i < tail_size; i++)
s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
@ -1020,7 +1093,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
// See the frame breakdown defined in __hwasan_tag_mismatch (from
// hwasan_tag_mismatch_{aarch64,riscv64}.S).
void ReportRegisters(const uptr *frame, uptr pc) {
Printf("Registers where the failure occurred (pc %p):\n", pc);
Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
// We explicitly print a single line (4 registers/line) each iteration to
// reduce the amount of logcat error messages printed. Each Printf() will

View File

@ -68,6 +68,7 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
}
Print("Creating : ");
}
ClearShadowForThreadStackAndTLS();
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
@ -217,6 +218,11 @@ void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
__hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
// TODO: implement.
}
void PrintThreads() {
// TODO: implement.
}
} // namespace __lsan

View File

@ -1,5 +1,6 @@
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@ -13,15 +14,15 @@ ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
void InitThreadList(uptr storage, uptr size) {
CHECK_EQ(hwasan_thread_list, nullptr);
static ALIGNED(alignof(
HwasanThreadList)) char thread_list_placeholder[sizeof(HwasanThreadList)];
alignas(alignof(HwasanThreadList)) static char
thread_list_placeholder[sizeof(HwasanThreadList)];
hwasan_thread_list =
new (thread_list_placeholder) HwasanThreadList(storage, size);
CHECK_EQ(thread_data, nullptr);
static ALIGNED(alignof(
ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
alignas(alignof(ThreadArgRetval)) static char
thread_data_placeholder[sizeof(ThreadArgRetval)];
thread_data = new (thread_data_placeholder) ThreadArgRetval();
}

View File

@ -18,7 +18,7 @@
// * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
// * All stack ring buffers are located within (2**kShadowBaseAlignment)
// sized region below and adjacent to the shadow region.
// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 7), and is
// aligned to twice its size. The value of N can be different for each buffer.
//
// These constrains guarantee that, given an address A of any element of the
@ -47,7 +47,6 @@
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@ -56,7 +55,10 @@ static uptr RingBufferSize() {
uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
// FIXME: increase the limit to 8 once this bug is fixed:
// https://bugs.llvm.org/show_bug.cgi?id=39030
for (int shift = 1; shift < 7; ++shift) {
// Note that we *cannot* do that on Android, as the runtime will indefinitely
// have to support code that is compiled with ashr, which only works with
// shifts up to 6.
for (int shift = 0; shift < 7; ++shift) {
uptr size = 4096 * (1ULL << shift);
if (size >= desired_bytes)
return size;

View File

@ -62,13 +62,20 @@ size_t SANITIZER_CDECL __sanitizer_get_free_bytes(void);
size_t SANITIZER_CDECL __sanitizer_get_unmapped_bytes(void);
/* Malloc hooks that may be optionally provided by user.
__sanitizer_malloc_hook(ptr, size) is called immediately after
allocation of "size" bytes, which returned "ptr".
__sanitizer_free_hook(ptr) is called immediately before
deallocation of "ptr". */
- __sanitizer_malloc_hook(ptr, size) is called immediately after allocation
of "size" bytes, which returned "ptr".
- __sanitizer_free_hook(ptr) is called immediately before deallocation of
"ptr".
- __sanitizer_ignore_free_hook(ptr) is called immediately before deallocation
of "ptr", and if it returns a non-zero value, the deallocation of "ptr"
will not take place. This allows software to make free a no-op until it
calls free() again in the same pointer at a later time. Hint: read this as
"ignore the free" rather than "ignore the hook".
*/
void SANITIZER_CDECL __sanitizer_malloc_hook(const volatile void *ptr,
size_t size);
void SANITIZER_CDECL __sanitizer_free_hook(const volatile void *ptr);
int SANITIZER_CDECL __sanitizer_ignore_free_hook(const volatile void *ptr);
/* Installs a pair of hooks for malloc/free.
Several (currently, 5) hook pairs may be installed, they are executed

View File

@ -193,6 +193,43 @@ void SANITIZER_CDECL __sanitizer_annotate_double_ended_contiguous_container(
const void *old_container_beg, const void *old_container_end,
const void *new_container_beg, const void *new_container_end);
/// Copies memory annotations from a source storage region to a destination
/// storage region. After the operation, the destination region has the same
/// memory annotations as the source region, as long as sanitizer limitations
/// allow it (more bytes may be unpoisoned than in the source region, resulting
/// in more false negatives, but never false positives). If the source and
/// destination regions overlap, only the minimal required changes are made to
/// preserve the correct annotations. Old storage bytes that are not in the new
/// storage should have the same annotations, as long as sanitizer limitations
/// allow it.
///
/// This function is primarily designed to be used when moving trivially
/// relocatable objects that may have poisoned memory, making direct copying
/// problematic under sanitizer. However, this function does not move memory
/// content itself, only annotations.
///
/// A contiguous container is a container that keeps all of its elements in a
/// contiguous region of memory. The container owns the region of memory
/// <c>[src_begin, src_end)</c> and <c>[dst_begin, dst_end)</c>. The memory
/// within these regions may be alternately poisoned and non-poisoned, with
/// possibly smaller poisoned and unpoisoned regions.
///
/// If this function fully poisons a granule, it is marked as "container
/// overflow".
///
/// Argument requirements: The destination container must have the same size as
/// the source container, which is inferred from the beginning and end of the
/// source region. Addresses may be granule-unaligned, but this may affect
/// performance.
///
/// \param src_begin Begin of the source container region.
/// \param src_end End of the source container region.
/// \param dst_begin Begin of the destination container region.
/// \param dst_end End of the destination container region.
void SANITIZER_CDECL __sanitizer_copy_contiguous_container_annotations(
const void *src_begin, const void *src_end, const void *dst_begin,
const void *dst_end);
/// Returns true if the contiguous container <c>[beg, end)</c> is properly
/// poisoned.
///
@ -293,7 +330,7 @@ void SANITIZER_CDECL __sanitizer_symbolize_global(void *data_ptr,
#define __sanitizer_return_address() \
__builtin_extract_return_addr(__builtin_return_address(0))
#else
void *SANITIZER_CDECL _ReturnAddress(void);
void *_ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
#define __sanitizer_return_address() _ReturnAddress()
#endif

View File

@ -44,6 +44,10 @@ void SANITIZER_CDECL __hwasan_tag_memory(const volatile void *p,
void *SANITIZER_CDECL __hwasan_tag_pointer(const volatile void *p,
unsigned char tag);
/// Get tag from the pointer.
unsigned char SANITIZER_CDECL
__hwasan_get_tag_from_pointer(const volatile void *p);
// Set memory tag from the current SP address to the given address to zero.
// This is meant to annotate longjmp and other non-local jumps.
// This function needs to know the (almost) exact destination frame address;

View File

@ -1856,6 +1856,15 @@
__sanitizer_syscall_pre_impl_sigaltstack((long)ss, (long)oss)
#define __sanitizer_syscall_post_sigaltstack(res, ss, oss) \
__sanitizer_syscall_post_impl_sigaltstack(res, (long)ss, (long)oss)
#define __sanitizer_syscall_pre_futex(uaddr, futex_op, val, timeout, uaddr2, \
val3) \
__sanitizer_syscall_pre_impl_futex((long)uaddr, (long)futex_op, (long)val, \
(long)timeout, (long)uaddr2, (long)val3)
#define __sanitizer_syscall_post_futex(res, uaddr, futex_op, val, timeout, \
uaddr2, val3) \
__sanitizer_syscall_post_impl_futex(res, (long)uaddr, (long)futex_op, \
(long)val, (long)timeout, (long)uaddr2, \
(long)val3)
// And now a few syscalls we don't handle yet.
#define __sanitizer_syscall_pre_afs_syscall(...)
@ -1875,7 +1884,6 @@
#define __sanitizer_syscall_pre_fchown32(...)
#define __sanitizer_syscall_pre_ftime(...)
#define __sanitizer_syscall_pre_ftruncate64(...)
#define __sanitizer_syscall_pre_futex(...)
#define __sanitizer_syscall_pre_getegid32(...)
#define __sanitizer_syscall_pre_geteuid32(...)
#define __sanitizer_syscall_pre_getgid32(...)
@ -1954,7 +1962,6 @@
#define __sanitizer_syscall_post_fchown32(res, ...)
#define __sanitizer_syscall_post_ftime(res, ...)
#define __sanitizer_syscall_post_ftruncate64(res, ...)
#define __sanitizer_syscall_post_futex(res, ...)
#define __sanitizer_syscall_post_getegid32(res, ...)
#define __sanitizer_syscall_post_geteuid32(res, ...)
#define __sanitizer_syscall_post_getgid32(res, ...)
@ -3093,6 +3100,11 @@ void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
long oldact, long sz);
void __sanitizer_syscall_pre_impl_sigaltstack(long ss, long oss);
void __sanitizer_syscall_post_impl_sigaltstack(long res, long ss, long oss);
void __sanitizer_syscall_pre_impl_futex(long uaddr, long futex_op, long val,
long timeout, long uaddr2, long val3);
void __sanitizer_syscall_post_impl_futex(long res, long uaddr, long futex_op,
long val, long timeout, long uaddr2,
long val3);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -59,6 +59,12 @@ const char *SANITIZER_CDECL __memprof_default_options(void);
/// \returns 0 on success.
int SANITIZER_CDECL __memprof_profile_dump(void);
/// Closes the existing file descriptor, if it is valid and not stdout or
/// stderr, and resets the internal state such that the profile filename is
/// reopened on the next profile dump attempt. This can be used to enable
/// multiple rounds of profiling on the same binary.
void SANITIZER_CDECL __memprof_profile_reset(void);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -0,0 +1,75 @@
//===-- sanitizer/nsan_interface.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Public interface for nsan.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_NSAN_INTERFACE_H
#define SANITIZER_NSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
/// User-provided default option settings.
///
/// You can provide your own implementation of this function to return a string
/// containing NSan runtime options (for example,
/// <c>verbosity=1:halt_on_error=0</c>).
///
/// \returns Default options string.
const char *__nsan_default_options(void);
// Dumps nsan shadow data for a block of `size_bytes` bytes of application
// memory at location `addr`.
//
// Each line contains application address, shadow types, then values.
// Unknown types are shown as `__`, while known values are shown as
// `f`, `d`, `l` for float, double, and long double respectively. Position is
// shown as a single hex digit. The shadow value itself appears on the line that
// contains the first byte of the value.
// FIXME: Show both shadow and application value.
//
// Example: `__nsan_dump_shadow_mem(addr, 32, 8, 0)` might print:
//
// 0x0add7359: __ f0 f1 f2 f3 __ __ __ (42.000)
// 0x0add7361: __ d1 d2 d3 d4 d5 d6 d7
// 0x0add7369: d8 f0 f1 f2 f3 __ __ f2 (-1.000) (12.5)
// 0x0add7371: f3 __ __ __ __ __ __ __
//
// This means that there is:
// - a shadow double for the float at address 0x0add7360, with value 42;
// - a shadow float128 for the double at address 0x0add7362, with value -1;
// - a shadow double for the float at address 0x0add736a, with value 12.5;
// There was also a shadow double for the float at address 0x0add736e, but bytes
// f0 and f1 were overwritten by one or several stores, so that the shadow value
// is no longer valid.
// The argument `reserved` can be any value. Its true value is provided by the
// instrumentation.
void __nsan_dump_shadow_mem(const char *addr, size_t size_bytes,
size_t bytes_per_line, size_t reserved);
// Explicitly dumps a value.
// FIXME: vector versions ?
void __nsan_dump_float(float value);
void __nsan_dump_double(double value);
void __nsan_dump_longdouble(long double value);
// Explicitly checks a value.
// FIXME: vector versions ?
void __nsan_check_float(float value);
void __nsan_check_double(double value);
void __nsan_check_longdouble(long double value);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_NSAN_INTERFACE_H

View File

@ -0,0 +1,75 @@
//===-- sanitizer/rtsan_interface.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of RealtimeSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_RTSAN_INTERFACE_H
#define SANITIZER_RTSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
// Disable all RTSan error reporting.
// Must be paired with a call to `__rtsan_enable`
void SANITIZER_CDECL __rtsan_disable(void);
// Re-enable all RTSan error reporting.
// Must follow a call to `__rtsan_disable`.
void SANITIZER_CDECL __rtsan_enable(void);
#ifdef __cplusplus
} // extern "C"
namespace __rtsan {
#if defined(__has_feature) && __has_feature(realtime_sanitizer)
class ScopedDisabler {
public:
ScopedDisabler() { __rtsan_disable(); }
~ScopedDisabler() { __rtsan_enable(); }
#if __cplusplus >= 201103L
ScopedDisabler(const ScopedDisabler &) = delete;
ScopedDisabler &operator=(const ScopedDisabler &) = delete;
ScopedDisabler(ScopedDisabler &&) = delete;
ScopedDisabler &operator=(ScopedDisabler &&) = delete;
#else
private:
ScopedDisabler(const ScopedDisabler &);
ScopedDisabler &operator=(const ScopedDisabler &);
#endif // __cplusplus >= 201103L
};
#else
class ScopedDisabler {
public:
ScopedDisabler() {}
#if __cplusplus >= 201103L
ScopedDisabler(const ScopedDisabler &) = delete;
ScopedDisabler &operator=(const ScopedDisabler &) = delete;
ScopedDisabler(ScopedDisabler &&) = delete;
ScopedDisabler &operator=(ScopedDisabler &&) = delete;
#else
private:
ScopedDisabler(const ScopedDisabler &);
ScopedDisabler &operator=(const ScopedDisabler &);
#endif // __cplusplus >= 201103L
};
#endif // defined(__has_feature) && __has_feature(realtime_sanitizer)
} // namespace __rtsan
#endif // __cplusplus
#endif // SANITIZER_RTSAN_INTERFACE_H

View File

@ -13,6 +13,8 @@
#ifndef SANITIZER_UBSAN_INTERFACE_H
#define SANITIZER_UBSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -25,8 +25,19 @@
// These typedefs should be used only in the interceptor definitions to replace
// the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
// On Windows the system headers (basetsd.h) provide a conflicting definition
// of SIZE_T/SSIZE_T that do not match the real size_t/ssize_t for 32-bit
// systems (using long instead of the expected int). Work around the typedef
// redefinition by #defining SIZE_T instead of using a typedef.
// TODO: We should be using __sanitizer::usize (and a new ssize) instead of
// these new macros as long as we ensure they match the real system definitions.
#if SANITIZER_WINDOWS
// Ensure that (S)SIZE_T were already defined as we are about to override them.
# include <basetsd.h>
#endif
#define SIZE_T __sanitizer::usize
#define SSIZE_T __sanitizer::sptr
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::u64 UINTMAX_T;
@ -338,16 +349,20 @@ const interpose_substitution substitution_##func_name[] \
#endif
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
// so we use casting via an integral type __interception::uptr,
// assuming that system is POSIX-compliant. Using other hacks seem
// challenging, as we don't even pass function type to
// INTERCEPT_FUNCTION macro, only its name.
// so we use casts via uintptr_t (the local __sanitizer::uptr equivalent).
namespace __interception {
#if defined(_WIN64)
typedef unsigned long long uptr;
#if defined(__ELF__) && !SANITIZER_FUCHSIA
// The use of interceptors makes many sanitizers unusable for static linking.
// Define a function, if called, will cause a linker error (undefined _DYNAMIC).
// However, -static-pie (which is not common) cannot be detected at link time.
extern uptr kDynamic[] asm("_DYNAMIC");
inline void DoesNotSupportStaticLinking() {
[[maybe_unused]] volatile auto x = &kDynamic;
}
#else
typedef unsigned long uptr;
#endif // _WIN64
inline void DoesNotSupportStaticLinking() {}
#endif
} // namespace __interception
#define INCLUDED_FROM_INTERCEPTION_LIB

View File

@ -28,12 +28,14 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
// Cast func to type of REAL(func) before casting to uptr in case it is an
// overloaded function, which is the case for some glibc functions when
// _FORTIFY_SOURCE is used. This disambiguates which overload to use.
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, (::__interception::uptr *)&REAL(func), \
(::__interception::uptr)(decltype(REAL(func)))&(func), \
(::__interception::uptr) &TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
@ -41,7 +43,7 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
::__interception::InterceptFunction( \
#func, symver, \
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)(decltype(REAL(func)))&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \

View File

@ -12,28 +12,35 @@
//===----------------------------------------------------------------------===//
#include "interception.h"
#include "sanitizer_common/sanitizer_type_traits.h"
#if SANITIZER_LINUX || SANITIZER_APPLE
#include <sys/types.h>
#if __has_include(<sys/types.h>)
# include <sys/types.h>
#endif
#include <stddef.h>
#include <stdint.h>
COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::uptr, ::uintptr_t>::value));
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::sptr, ::intptr_t>::value));
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::usize, ::size_t>::value));
COMPILER_CHECK((__sanitizer::is_same<::PTRDIFF_T, ::ptrdiff_t>::value));
COMPILER_CHECK((__sanitizer::is_same<::SIZE_T, ::size_t>::value));
#if !SANITIZER_WINDOWS
// No ssize_t on Windows.
COMPILER_CHECK((__sanitizer::is_same<::SSIZE_T, ::ssize_t>::value));
#endif
// TODO: These are not actually the same type on Linux (long vs long long)
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
COMPILER_CHECK(sizeof(::UINTMAX_T) == sizeof(uintmax_t));
# if SANITIZER_GLIBC || SANITIZER_ANDROID
#if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
# endif
#endif
// The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
#if !SANITIZER_WINDOWS && (SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64)
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif

View File

@ -27,7 +27,7 @@
//
// 1) Detour
//
// The Detour hooking technique is assuming the presence of an header with
// The Detour hooking technique is assuming the presence of a header with
// padding and an overridable 2-bytes nop instruction (mov edi, edi). The
// nop instruction can safely be replaced by a 2-bytes jump without any need
// to save the instruction. A jump to the target is encoded in the function
@ -47,7 +47,7 @@
//
// func: jmp <label> --> func: jmp <hook>
//
// On an 64-bit architecture, a trampoline is inserted.
// On a 64-bit architecture, a trampoline is inserted.
//
// func: jmp <label> --> func: jmp <tramp>
// [...]
@ -60,7 +60,7 @@
//
// 3) HotPatch
//
// The HotPatch hooking is assuming the presence of an header with padding
// The HotPatch hooking is assuming the presence of a header with padding
// and a first instruction with at least 2-bytes.
//
// The reason to enforce the 2-bytes limitation is to provide the minimal
@ -80,7 +80,7 @@
// real: <instr>
// jmp <body>
//
// On an 64-bit architecture:
// On a 64-bit architecture:
//
// head: 6 x nop head: jmp QWORD [addr1]
// func: <instr> --> func: jmp short <head>
@ -110,7 +110,7 @@
// <instr>
// jmp <body>
//
// On an 64-bit architecture:
// On a 64-bit architecture:
//
// func: <instr> --> func: jmp QWORD [addr1]
// <instr>
@ -130,6 +130,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <psapi.h>
namespace __interception {
@ -339,7 +340,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
UNUSED static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
UNUSED static const uptr kTrampolineScanLimitRange = 1ull << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
@ -385,7 +386,30 @@ void TestOnlyReleaseTrampolineRegions() {
}
}
static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
static uptr AllocateMemoryForTrampoline(uptr func_address, size_t size) {
uptr image_address = func_address;
#if SANITIZER_WINDOWS64
// Allocate memory after the module (DLL or EXE file), but within 2GB
// of the start of the module so that any address within the module can be
// referenced with PC-relative operands.
// This allows us to not just jump to the trampoline with a PC-relative
// offset, but to relocate any instructions that we copy to the trampoline
// which have references to the original module. If we can't find the base
// address of the module (e.g. if func_address is in mmap'ed memory), just
// use func_address as is.
HMODULE module;
if (::GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
(LPCWSTR)func_address, &module)) {
MODULEINFO module_info;
if (::GetModuleInformation(::GetCurrentProcess(), module,
&module_info, sizeof(module_info))) {
image_address = (uptr)module_info.lpBaseOfDll;
}
}
#endif
// Find a region within 2G with enough space to allocate |size| bytes.
TrampolineMemoryRegion *region = nullptr;
for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
@ -431,7 +455,8 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
// The following prologues cannot be patched because of the short jump
// jumping to the patching region.
#if SANITIZER_WINDOWS64
// Short jump patterns below are only for x86_64.
# if SANITIZER_WINDOWS_x64
// ntdll!wcslen in Win11
// 488bc1 mov rax,rcx
// 0fb710 movzx edx,word ptr [rax]
@ -462,7 +487,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
return 4;
#endif
#if SANITIZER_WINDOWS64
# if SANITIZER_WINDOWS_x64
if (memcmp((u8*)address, kPrologueWithShortJump1,
sizeof(kPrologueWithShortJump1)) == 0 ||
memcmp((u8*)address, kPrologueWithShortJump2,
@ -478,6 +503,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u8*)address) {
case 0x90: // 90 : nop
case 0xC3: // C3 : ret (for small/empty function interception
case 0xCC: // CC : int 3 i.e. registering weak functions)
return 1;
case 0x50: // push eax / rax
@ -494,6 +521,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x6A: // 6A XX = push XX
return 2;
// This instruction can be encoded with a 16-bit immediate but that is
// incredibly unlikely.
case 0x68: // 68 XX XX XX XX : push imm32
return 5;
case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
return 5;
@ -501,7 +533,6 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
// Cannot overwrite control-instruction. Return 0 to indicate failure.
case 0xE9: // E9 XX XX XX XX : jmp <label>
case 0xE8: // E8 XX XX XX XX : call <func>
case 0xC3: // C3 : ret
case 0xEB: // EB XX : jmp XX (short jump)
case 0x70: // 7Y YY : jy XX (short conditional jump)
case 0x71:
@ -532,6 +563,9 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
case 0xD233: // 33 D2 : xor edx, edx
case 0xDB84: // 84 DB : test bl,bl
case 0xC984: // 84 C9 : test cl,cl
case 0xD284: // 84 D2 : test dl,dl
return 2;
// Cannot overwrite control-instruction. Return 0 to indicate failure.
@ -540,15 +574,38 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
switch (0x00FFFFFF & *(u32*)address) {
case 0xF8E483: // 83 E4 F8 : and esp, 0xFFFFFFF8
case 0x64EC83: // 83 EC 64 : sub esp, 64h
return 3;
case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
return 7;
}
#if SANITIZER_WINDOWS64
switch (0x000000FF & *(u32 *)address) {
case 0xc2: // C2 XX XX : ret XX (needed for registering weak functions)
return 3;
}
# if SANITIZER_WINDOWS_x64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
return 9;
case 0xF2:
switch (*(u32 *)(address + 1)) {
case 0x2444110f: // f2 0f 11 44 24 XX movsd QWORD PTR
// [rsp + XX], xmm0
case 0x244c110f: // f2 0f 11 4c 24 XX movsd QWORD PTR
// [rsp + XX], xmm1
case 0x2454110f: // f2 0f 11 54 24 XX movsd QWORD PTR
// [rsp + XX], xmm2
case 0x245c110f: // f2 0f 11 5c 24 XX movsd QWORD PTR
// [rsp + XX], xmm3
case 0x2464110f: // f2 0f 11 64 24 XX movsd QWORD PTR
// [rsp + XX], xmm4
return 6;
}
break;
case 0x83:
const u8 next_byte = *(u8*)(address + 1);
@ -577,51 +634,126 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x018a: // mov al, byte ptr [rcx]
return 2;
case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x7E80: // 80 7E YY XX cmp BYTE PTR [rsi+YY], XX
case 0x7D80: // 80 7D YY XX cmp BYTE PTR [rbp+YY], XX
case 0x7A80: // 80 7A YY XX cmp BYTE PTR [rdx+YY], XX
case 0x7880: // 80 78 YY XX cmp BYTE PTR [rax+YY], XX
case 0x7B80: // 80 7B YY XX cmp BYTE PTR [rbx+YY], XX
case 0x7980: // 80 79 YY XX cmp BYTE ptr [rcx+YY], XX
return 4;
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
if (rel_offset)
*rel_offset = 2;
return 6;
case 0x7E81: // 81 7E YY XX XX XX XX cmp DWORD PTR [rsi+YY], XX XX XX XX
case 0x7D81: // 81 7D YY XX XX XX XX cmp DWORD PTR [rbp+YY], XX XX XX XX
case 0x7A81: // 81 7A YY XX XX XX XX cmp DWORD PTR [rdx+YY], XX XX XX XX
case 0x7881: // 81 78 YY XX XX XX XX cmp DWORD PTR [rax+YY], XX XX XX XX
case 0x7B81: // 81 7B YY XX XX XX XX cmp DWORD PTR [rbx+YY], XX XX XX XX
case 0x7981: // 81 79 YY XX XX XX XX cmp dword ptr [rcx+YY], XX XX XX XX
return 7;
}
switch (0x00FFFFFF & *(u32*)address) {
case 0xe58948: // 48 8b c4 : mov rbp, rsp
case 0xc18b48: // 48 8b c1 : mov rax, rcx
case 0xc48b48: // 48 8b c4 : mov rax, rsp
case 0xd9f748: // 48 f7 d9 : neg rcx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
case 0xc98548: // 48 85 C9 : test rcx, rcx
case 0xd28548: // 48 85 d2 : test rdx, rdx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
case 0xc93345: // 45 33 c9 : xor r9d, r9d
case 0xdb3345: // 45 33 DB : xor r11d, r11d
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
case 0xc18b4c: // 4C 8B C1 : mov r8, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xc00b4d: // 4d 0b c0 : or r8, r8
case 0xc03345: // 45 33 c0 : xor r8d, r8d
case 0xc08548: // 48 85 c0 : test rax, rax
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc08b41: // 41 8b c0 : mov eax, r8d
case 0xc0ff48: // 48 ff c0 : inc rax
case 0xc0ff49: // 49 ff c0 : inc r8
case 0xc18b41: // 41 8b c1 : mov eax, r9d
case 0xc18b48: // 48 8b c1 : mov rax, rcx
case 0xc18b4c: // 4c 8b c1 : mov r8, rcx
case 0xc1ff48: // 48 ff c1 : inc rcx
case 0xc1ff49: // 49 ff c1 : inc r9
case 0xc28b41: // 41 8b c2 : mov eax, r10d
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc2ff48: // 48 ff c2 : inc rdx
case 0xc2ff49: // 49 ff c2 : inc r10
case 0xc38b41: // 41 8b c3 : mov eax, r11d
case 0xc3ff48: // 48 ff c3 : inc rbx
case 0xc3ff49: // 49 ff c3 : inc r11
case 0xc48b41: // 41 8b c4 : mov eax, r12d
case 0xc48b48: // 48 8b c4 : mov rax, rsp
case 0xc4ff49: // 49 ff c4 : inc r12
case 0xc5ff49: // 49 ff c5 : inc r13
case 0xc6ff48: // 48 ff c6 : inc rsi
case 0xc6ff49: // 49 ff c6 : inc r14
case 0xc7ff48: // 48 ff c7 : inc rdi
case 0xc7ff49: // 49 ff c7 : inc r15
case 0xc93345: // 45 33 c9 : xor r9d, r9d
case 0xc98548: // 48 85 c9 : test rcx, rcx
case 0xc9854d: // 4d 85 c9 : test r9, r9
case 0xc98b4c: // 4c 8b c9 : mov r9, rcx
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0xca3b48: // 48 3b ca : cmp rcx, rdx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
case 0xE0E483: // 83 E4 E0 : and esp, 0xFFFFFFE0
case 0xd28548: // 48 85 d2 : test rdx, rdx
case 0xd2854d: // 4d 85 d2 : test r10, r10
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd9f748: // 48 f7 d9 : neg rcx
case 0xdb3345: // 45 33 db : xor r11d, r11d
case 0xdb8548: // 48 85 db : test rbx, rbx
case 0xdb854d: // 4d 85 db : test r11, r11
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xe0e483: // 83 e4 e0 : and esp, 0xFFFFFFE0
case 0xe48548: // 48 85 e4 : test rsp, rsp
case 0xe4854d: // 4d 85 e4 : test r12, r12
case 0xe58948: // 48 89 e5 : mov rbp, rsp
case 0xed8548: // 48 85 ed : test rbp, rbp
case 0xed854d: // 4d 85 ed : test r13, r13
case 0xf6854d: // 4d 85 f6 : test r14, r14
case 0xff854d: // 4d 85 ff : test r15, r15
return 3;
case 0x245489: // 89 54 24 XX : mov DWORD PTR[rsp + XX], edx
case 0x428d44: // 44 8d 42 XX : lea r8d , [rdx + XX]
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
case 0xec8348: // 48 83 ec XX : sub rsp, XX
case 0xf88349: // 49 83 f8 XX : cmp r8, XX
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
return 4;
case 0x246483: // 83 64 24 XX YY : and DWORD PTR [rsp+XX], YY
return 5;
case 0x788166: // 66 81 78 XX YY YY cmp WORD PTR [rax+XX], YY YY
case 0x798166: // 66 81 79 XX YY YY cmp WORD PTR [rcx+XX], YY YY
case 0x7a8166: // 66 81 7a XX YY YY cmp WORD PTR [rdx+XX], YY YY
case 0x7b8166: // 66 81 7b XX YY YY cmp WORD PTR [rbx+XX], YY YY
case 0x7e8166: // 66 81 7e XX YY YY cmp WORD PTR [rsi+XX], YY YY
case 0x7f8166: // 66 81 7f XX YY YY cmp WORD PTR [rdi+XX], YY YY
return 6;
case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
return 7;
// clang-format off
case 0x788141: // 41 81 78 XX YY YY YY YY : cmp DWORD PTR [r8+YY], XX XX XX XX
case 0x798141: // 41 81 79 XX YY YY YY YY : cmp DWORD PTR [r9+YY], XX XX XX XX
case 0x7a8141: // 41 81 7a XX YY YY YY YY : cmp DWORD PTR [r10+YY], XX XX XX XX
case 0x7b8141: // 41 81 7b XX YY YY YY YY : cmp DWORD PTR [r11+YY], XX XX XX XX
case 0x7c8141: // 41 81 7c XX YY YY YY YY : cmp DWORD PTR [r12+YY], XX XX XX XX
case 0x7d8141: // 41 81 7d XX YY YY YY YY : cmp DWORD PTR [r13+YY], XX XX XX XX
case 0x7e8141: // 41 81 7e XX YY YY YY YY : cmp DWORD PTR [r14+YY], XX XX XX XX
case 0x7f8141: // 41 81 7f YY XX XX XX XX : cmp DWORD PTR [r15+YY], XX XX XX XX
case 0x247c81: // 81 7c 24 YY XX XX XX XX : cmp DWORD PTR [rsp+YY], XX XX XX XX
return 8;
// clang-format on
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x058d48: // 48 8d 05 XX XX XX XX :
// lea rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
@ -636,6 +768,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
switch (*(u32*)(address)) {
case 0x1ab60f44: // 44 0f b6 1a : movzx r11d, BYTE PTR [rdx]
return 4;
case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
@ -645,8 +779,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
case 0x2444894c: // 4c 89 44 24 XX : mov QWORD PTR [rsp + XX], r8
case 0x244c8944: // 44 89 4c 24 XX mov DWORD PTR [rsp + XX], r9d
case 0x24448944: // 44 89 44 24 XX mov DWORD PTR [rsp + XX], r8d
case 0x246c8d48: // 48 8d 6c 24 XX : lea rbp, [rsp + XX]
return 5;
case 0x24648348: // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY
case 0x24648348: // 48 83 64 24 XX YY : and QWORD PTR [rsp + XX], YY
return 6;
}
@ -660,6 +797,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
case 0x758B: // 8B 75 XX : mov esi, dword ptr [ebp + XX]
case 0xEC83: // 83 EC XX : sub esp, XX
case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
return 3;
@ -943,19 +1081,26 @@ bool OverrideFunction(
static void **InterestingDLLsAvailable() {
static const char *InterestingDLLs[] = {
"kernel32.dll",
"msvcr100.dll", // VS2010
"msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
#if (defined(__MINGW32__) && defined(__i386__))
"libc++.dll", // libc++
"libunwind.dll", // libunwind
#endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll", NULL};
"kernel32.dll",
"msvcr100d.dll", // VS2010
"msvcr110d.dll", // VS2012
"msvcr120d.dll", // VS2013
"vcruntime140d.dll", // VS2015
"ucrtbased.dll", // Universal CRT
"msvcr100.dll", // VS2010
"msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
# if (defined(__MINGW32__) && defined(__i386__))
"libc++.dll", // libc++
"libunwind.dll", // libunwind
# endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll",
NULL
};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
@ -1126,4 +1271,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
} // namespace __interception
#endif // SANITIZER_APPLE
#endif // SANITIZER_WINDOWS

View File

@ -92,15 +92,16 @@ extern "C" void __lsan_init() {
CacheBinaryName();
AvoidCVE_2016_2143();
InitializeFlags();
InitializePlatformEarly();
InitCommonLsan();
InitializeAllocator();
ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreads();
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
InitializeMainThread();
InstallAtExitCheckLeaks();
InstallAtForkHandler();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);

View File

@ -40,6 +40,7 @@ void InitializeInterceptors();
void ReplaceSystemMalloc();
void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
void InstallAtExitCheckLeaks();
void InstallAtForkHandler();
#define ENSURE_LSAN_INITED \
do { \

View File

@ -31,7 +31,7 @@ static const uptr kMaxAllowedMallocSize = 1ULL << 30;
#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4ULL << 30;
#else
static const uptr kMaxAllowedMallocSize = 8ULL << 30;
static const uptr kMaxAllowedMallocSize = 1ULL << 40;
#endif
static Allocator allocator;

View File

@ -42,6 +42,9 @@ namespace __lsan {
// also to protect the global list of root regions.
static Mutex global_mutex;
void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
Flags lsan_flags;
void DisableCounterUnderflow() {
@ -105,7 +108,7 @@ class LeakSuppressionContext {
void PrintMatchedSuppressions();
};
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = {kSuppressionLeak};
@ -152,14 +155,15 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
return s;
// Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(addr));
const SymbolizedStack *frames = symbolized_stack.get();
for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
context.Match(cur->info.file, kSuppressionLeak, &s)) {
break;
}
}
frames->ClearAll();
return s;
}
@ -284,23 +288,54 @@ static inline bool MaybeUserPointer(uptr p) {
# endif
}
namespace {
struct DirectMemoryAccessor {
void Init(uptr begin, uptr end) {};
void *LoadPtr(uptr p) const { return *reinterpret_cast<void **>(p); }
};
struct CopyMemoryAccessor {
void Init(uptr begin, uptr end) {
this->begin = begin;
buffer.clear();
buffer.resize(end - begin);
MemCpyAccessible(buffer.data(), reinterpret_cast<void *>(begin),
buffer.size());
};
void *LoadPtr(uptr p) const {
uptr offset = p - begin;
CHECK_LE(offset + sizeof(void *), reinterpret_cast<uptr>(buffer.size()));
return *reinterpret_cast<void **>(offset +
reinterpret_cast<uptr>(buffer.data()));
}
private:
uptr begin;
InternalMmapVector<char> buffer;
};
} // namespace
// Scans the memory range, looking for byte patterns that point into allocator
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
// There are two usage modes for this function: finding reachable chunks
// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag) {
template <class Accessor>
void ScanForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag,
Accessor &accessor) {
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
(void *)end);
accessor.Init(begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
for (; pp + sizeof(void *) <= end; pp += alignment) {
void *p = *reinterpret_cast<void **>(pp);
void *p = accessor.LoadPtr(pp);
# if SANITIZER_APPLE
p = TransformPointer(p);
# endif
@ -335,6 +370,12 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
}
}
void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag) {
DirectMemoryAccessor accessor;
ScanForPointers(begin, end, frontier, region_type, tag, accessor);
}
// Scans a global range for pointers
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
uptr allocator_begin = 0, allocator_end = 0;
@ -352,12 +393,19 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
}
}
template <class Accessor>
void ScanRanges(const InternalMmapVector<Range> &ranges, Frontier *frontier,
const char *region_type, Accessor &accessor) {
for (uptr i = 0; i < ranges.size(); i++) {
ScanForPointers(ranges[i].begin, ranges[i].end, frontier, region_type,
kReachable, accessor);
}
}
void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
Frontier *frontier) {
for (uptr i = 0; i < ranges.size(); i++) {
ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
kReachable);
}
DirectMemoryAccessor accessor;
ScanRanges(ranges, frontier, "FAKE STACK", accessor);
}
# if SANITIZER_FUCHSIA
@ -395,26 +443,129 @@ static void ProcessThreadRegistry(Frontier *frontier) {
}
// Scans thread data (stacks and TLS) for heap pointers.
template <class Accessor>
static void ProcessThread(tid_t os_id, uptr sp,
const InternalMmapVector<uptr> &registers,
InternalMmapVector<Range> &extra_ranges,
Frontier *frontier, Accessor &accessor) {
// `extra_ranges` is outside of the function and the loop to reused mapped
// memory.
CHECK(extra_ranges.empty());
LOG_THREADS("Processing thread %llu.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
bool thread_found =
GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
&tls_end, &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
LOG_THREADS("Thread %llu not found in registry.\n", os_id);
return;
}
if (!sp)
sp = stack_begin;
if (flags()->use_registers) {
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end =
reinterpret_cast<uptr>(registers.data() + registers.size());
ScanForPointers(registers_begin, registers_end, frontier, "REGISTERS",
kReachable, accessor);
}
if (flags()->use_stacks) {
LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
(void *)stack_end, (void *)sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack, or swapcontext was used).
// Again, consider the entire stack range to be reachable.
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
uptr page_size = GetPageSizeCached();
int skipped = 0;
while (stack_begin < stack_end &&
!IsAccessibleMemoryRange(stack_begin, 1)) {
skipped++;
stack_begin += page_size;
}
LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped,
(void *)stack_begin, (void *)stack_end);
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
}
ScanForPointers(stack_begin, stack_end, frontier, "STACK", kReachable,
accessor);
GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
ScanRanges(extra_ranges, frontier, "FAKE STACK", accessor);
}
if (flags()->use_tls) {
if (tls_begin) {
LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
// If the tls and cache ranges don't overlap, scan full tls range,
// otherwise, only scan the non-overlapping portions
if (cache_begin == cache_end || tls_end < cache_begin ||
tls_begin > cache_end) {
ScanForPointers(tls_begin, tls_end, frontier, "TLS", kReachable,
accessor);
} else {
if (tls_begin < cache_begin)
ScanForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable,
accessor);
if (tls_end > cache_end)
ScanForPointers(cache_end, tls_end, frontier, "TLS", kReachable,
accessor);
}
}
# if SANITIZER_ANDROID
extra_ranges.clear();
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
void *arg) -> void {
reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
{reinterpret_cast<uptr>(dtls_begin),
reinterpret_cast<uptr>(dtls_end)});
};
ScanRanges(extra_ranges, frontier, "DTLS", accessor);
// FIXME: There might be a race-condition here (and in Bionic) if the
// thread is suspended in the middle of updating its DTLS. IOWs, we
// could scan already freed memory. (probably fine for now)
__libc_iterate_dynamic_tls(os_id, cb, frontier);
# else
if (dtls && !DTLSInDestruction(dtls)) {
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtv.beg;
uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) {
LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
(void *)dtls_end);
ScanForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable,
accessor);
}
});
} else {
// We are handling a thread with DTLS under destruction. Log about
// this and continue.
LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
}
# endif
}
}
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier, tid_t caller_tid,
uptr caller_sp) {
InternalMmapVector<tid_t> done_threads;
InternalMmapVector<uptr> registers;
InternalMmapVector<Range> extra_ranges;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %llu.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
bool thread_found =
GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
&tls_end, &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
LOG_THREADS("Thread %llu not found in registry.\n", os_id);
continue;
}
uptr sp;
registers.clear();
extra_ranges.clear();
const tid_t os_id = suspended_threads.GetThreadID(i);
uptr sp = 0;
PtraceRegistersStatus have_registers =
suspended_threads.GetRegistersAndSP(i, &registers, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
@ -423,96 +574,32 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// GetRegistersAndSP failed with ESRCH.
if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
continue;
sp = stack_begin;
sp = 0;
}
if (suspended_threads.GetThreadID(i) == caller_tid) {
if (os_id == caller_tid)
sp = caller_sp;
}
if (flags()->use_registers && have_registers) {
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end =
reinterpret_cast<uptr>(registers.data() + registers.size());
ScanRangeForPointers(registers_begin, registers_end, frontier,
"REGISTERS", kReachable);
}
DirectMemoryAccessor accessor;
ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
if (flags()->use_detached)
done_threads.push_back(os_id);
}
if (flags()->use_stacks) {
LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
(void *)stack_end, (void *)sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack, or swapcontext was used).
// Again, consider the entire stack range to be reachable.
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
uptr page_size = GetPageSizeCached();
int skipped = 0;
while (stack_begin < stack_end &&
!IsAccessibleMemoryRange(stack_begin, 1)) {
skipped++;
stack_begin += page_size;
}
LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
skipped, (void *)stack_begin, (void *)stack_end);
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
}
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable);
if (flags()->use_detached) {
CopyMemoryAccessor accessor;
InternalMmapVector<tid_t> known_threads;
GetRunningThreadsLocked(&known_threads);
Sort(done_threads.data(), done_threads.size());
for (tid_t os_id : known_threads) {
registers.clear();
extra_ranges.clear();
GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
ScanExtraStackRanges(extra_ranges, frontier);
}
if (flags()->use_tls) {
if (tls_begin) {
LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
// If the tls and cache ranges don't overlap, scan full tls range,
// otherwise, only scan the non-overlapping portions
if (cache_begin == cache_end || tls_end < cache_begin ||
tls_begin > cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
if (tls_begin < cache_begin)
ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
kReachable);
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
kReachable);
}
uptr i = InternalLowerBound(done_threads, os_id);
if (i >= done_threads.size() || done_threads[i] != os_id) {
uptr sp = (os_id == caller_tid) ? caller_sp : 0;
ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
}
# if SANITIZER_ANDROID
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
void *arg) -> void {
ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
reinterpret_cast<uptr>(dtls_end),
reinterpret_cast<Frontier *>(arg), "DTLS",
kReachable);
};
// FIXME: There might be a race-condition here (and in Bionic) if the
// thread is suspended in the middle of updating its DTLS. IOWs, we
// could scan already freed memory. (probably fine for now)
__libc_iterate_dynamic_tls(os_id, cb, frontier);
# else
if (dtls && !DTLSInDestruction(dtls)) {
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtv.beg;
uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) {
LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
(void *)dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable);
}
});
} else {
// We are handling a thread with DTLS under destruction. Log about
// this and continue.
LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
}
# endif
}
}
@ -694,11 +781,13 @@ void LeakSuppressionContext::PrintMatchedSuppressions() {
// Fuchsia provides a libc interface that guarantees all threads are
// covered, and SuspendedThreadList is never really used.
static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
static bool ReportUnsuspendedThreads(const SuspendedThreadsList &) {
return true;
}
# else // !SANITIZER_FUCHSIA
static void ReportUnsuspendedThreads(
static bool ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
@ -706,16 +795,20 @@ static void ReportUnsuspendedThreads(
Sort(threads.data(), threads.size());
InternalMmapVector<tid_t> unsuspended;
GetRunningThreadsLocked(&unsuspended);
InternalMmapVector<tid_t> known_threads;
GetRunningThreadsLocked(&known_threads);
for (auto os_id : unsuspended) {
bool succeded = true;
for (auto os_id : known_threads) {
uptr i = InternalLowerBound(threads, os_id);
if (i >= threads.size() || threads[i] != os_id)
if (i >= threads.size() || threads[i] != os_id) {
succeded = false;
Report(
"Running thread %zu was not suspended. False leaks are possible.\n",
os_id);
}
}
return succeded;
}
# endif // !SANITIZER_FUCHSIA
@ -725,7 +818,18 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param);
CHECK(!param->success);
ReportUnsuspendedThreads(suspended_threads);
if (!ReportUnsuspendedThreads(suspended_threads)) {
switch (flags()->thread_suspend_fail) {
case 0:
param->success = true;
return;
case 1:
break;
case 2:
// Will crash on return.
return;
}
}
ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
param->caller_sp);
ForEachChunk(CollectLeaksCb, &param->leaks);
@ -750,19 +854,20 @@ static bool PrintResults(LeakReport &report) {
}
if (common_flags()->print_suppressions)
GetSuppressionContext()->PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
if (unsuppressed_count)
report.PrintSummary();
return true;
}
return false;
if ((unsuppressed_count && common_flags()->verbosity >= 2) ||
flags()->log_threads)
PrintThreads();
return unsuppressed_count;
}
static bool CheckForLeaks() {
static bool CheckForLeaksOnce() {
if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
VReport(1, "LeakSanitizer is disabled");
VReport(1, "LeakSanitizer is disabled\n");
return false;
}
VReport(1, "LeakSanitizer: checking for leaks");
VReport(1, "LeakSanitizer: checking for leaks\n");
// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
// suppressions. However if a stack id was previously suppressed, it should be
// suppressed in future checks as well.
@ -809,6 +914,12 @@ static bool CheckForLeaks() {
}
}
static bool CheckForLeaks() {
int leaking_tries = 0;
for (int i = 0; i < flags()->tries; ++i) leaking_tries += CheckForLeaksOnce();
return leaking_tries == flags()->tries;
}
static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; }

View File

@ -111,6 +111,7 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
InternalMmapVector<Range> *ranges);
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
void PrintThreads();
//// --------------------------------------------------------------------------
//// Allocator prototypes.
@ -120,6 +121,10 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
void LockAllocator();
void UnlockAllocator();
// Lock/unlock global mutext.
void LockGlobal();
void UnlockGlobal();
// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// If p points into a chunk that has been allocated to the user, returns its

View File

@ -28,7 +28,7 @@ namespace __lsan {
static const char kLinkerName[] = "ld";
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
alignas(64) static char linker_placeholder[sizeof(LoadedModule)];
static LoadedModule *linker = nullptr;
static bool IsLinker(const LoadedModule& module) {

View File

@ -41,6 +41,13 @@ LSAN_FLAG(bool, use_ld_allocations, true,
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
"Consider pointers found in poisoned memory to be valid.")
LSAN_FLAG(bool, use_detached, false,
"Scan threads even if attaching to them failed.")
LSAN_FLAG(bool, log_pointers, false, "Debug logging")
LSAN_FLAG(bool, log_threads, false, "Debug logging")
LSAN_FLAG(int, tries, 1, "Debug option to repeat leak checking multiple times")
LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
LSAN_FLAG(int, thread_suspend_fail, 1,
"Behaviour if thread suspendion all thread (0 - "
"abandon leak checking, 1 - continue with leak checking (reported "
"leaks can be false), 2 - crash (for debugging LSAN)).")

View File

@ -80,6 +80,7 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
// On Fuchsia, leak detection is done by a special hook after atexit hooks.
// So this doesn't install any atexit hook like on other platforms.
void InstallAtExitCheckLeaks() {}
void InstallAtForkHandler() {}
// ASan defines this to check its `halt_on_error` flag.
bool UseExitcodeOnLeak() { return true; }

View File

@ -26,7 +26,6 @@
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_posix.h"
#endif
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
@ -77,6 +76,8 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void, free, void *p) {
if (UNLIKELY(!p))
return;
if (DlsymAlloc::PointerIsMine(p))
return DlsymAlloc::Free(p);
ENSURE_LSAN_INITED;
@ -133,9 +134,7 @@ INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
#else
@ -389,7 +388,7 @@ INTERCEPTOR(int, atexit, void (*f)()) {
extern "C" {
extern int _pthread_atfork(void (*prepare)(), void (*parent)(),
void (*child)());
};
}
INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
void (*child)()) {
@ -525,7 +524,7 @@ INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN
# endif // SANITIZER_INTERCEPT_TIMEDJOIN
DEFINE_REAL_PTHREAD_FUNCTIONS
DEFINE_INTERNAL_PTHREAD_FUNCTIONS
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
@ -543,6 +542,7 @@ namespace __lsan {
void InitializeInterceptors() {
// Fuchsia doesn't use interceptors that require any setup.
#if !SANITIZER_FUCHSIA
__interception::DoesNotSupportStaticLinking();
InitializeSignalInterceptors();
INTERCEPT_FUNCTION(malloc);

View File

@ -14,11 +14,13 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
# include <pthread.h>
# include "lsan.h"
# include "lsan_allocator.h"
# include "lsan_thread.h"
# include "sanitizer_common/sanitizer_stacktrace.h"
# include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __lsan {
@ -48,12 +50,8 @@ void ThreadContext::OnStarted(void *arg) {
void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &args.stack_end,
&args.tls_begin, &args.tls_end);
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args);
@ -98,6 +96,31 @@ void InstallAtExitCheckLeaks() {
Atexit(DoLeakCheck);
}
static void BeforeFork() {
VReport(2, "BeforeFork tid: %llu\n", GetTid());
LockGlobal();
LockThreads();
LockAllocator();
StackDepotLockBeforeFork();
}
static void AfterFork(bool fork_child) {
StackDepotUnlockAfterFork(fork_child);
UnlockAllocator();
UnlockThreads();
UnlockGlobal();
VReport(2, "AfterFork tid: %llu\n", GetTid());
}
void InstallAtForkHandler() {
# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE
return; // FIXME: Implement FutexWait.
# endif
pthread_atfork(
&BeforeFork, []() { AfterFork(/* fork_child= */ false); },
[]() { AfterFork(/* fork_child= */ true); });
}
} // namespace __lsan
#endif // SANITIZER_POSIX

View File

@ -14,8 +14,8 @@
#include "lsan.h"
#if SANITIZER_CAN_USE_PREINIT_ARRAY
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
void (*__local_lsan_preinit)(void) = __lsan_init;
// This section is linked into the main executable when -fsanitize=leak is
// specified to perform initialization at a very early stage.
__attribute__((section(".preinit_array"), used)) static auto preinit =
__lsan_init;
#endif

View File

@ -18,6 +18,7 @@
#include "lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_history.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
@ -35,12 +36,12 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
}
void InitializeThreads() {
static ALIGNED(alignof(
ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
alignas(alignof(ThreadRegistry)) static char
thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry =
new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
static ALIGNED(alignof(ThreadArgRetval)) char
alignas(alignof(ThreadArgRetval)) static char
thread_arg_retval_placeholder[sizeof(ThreadArgRetval)];
thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval();
}
@ -109,6 +110,12 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
threads);
}
void PrintThreads() {
InternalScopedString out;
PrintThreadHistory(*thread_registry, out);
Report("%s\n", out.data());
}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
GetThreadArgRetval().GetAllPtrsLocked(ptrs);
}

View File

@ -47,7 +47,6 @@ sanitizer_common_files = \
sanitizer_netbsd.cpp \
sanitizer_platform_limits_freebsd.cpp \
sanitizer_platform_limits_linux.cpp \
sanitizer_platform_limits_openbsd.cpp \
sanitizer_platform_limits_posix.cpp \
sanitizer_platform_limits_solaris.cpp \
sanitizer_posix.cpp \
@ -74,15 +73,18 @@ sanitizer_common_files = \
sanitizer_symbolizer.cpp \
sanitizer_symbolizer_libbacktrace.cpp \
sanitizer_symbolizer_libcdep.cpp \
sanitizer_symbolizer_markup.cpp \
sanitizer_symbolizer_posix_libcdep.cpp \
sanitizer_symbolizer_win.cpp \
sanitizer_termination.cpp \
sanitizer_thread_arg_retval.cpp \
sanitizer_thread_history.cpp \
sanitizer_thread_registry.cpp \
sanitizer_tls_get_addr.cpp \
sanitizer_unwind_linux_libcdep.cpp \
sanitizer_unwind_win.cpp \
sanitizer_win.cpp
sanitizer_win.cpp \
sanitizer_win_interception.cpp
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)

View File

@ -133,7 +133,6 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_mutex.lo \
sanitizer_netbsd.lo sanitizer_platform_limits_freebsd.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_openbsd.lo \
sanitizer_platform_limits_posix.lo \
sanitizer_platform_limits_solaris.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
@ -148,12 +147,13 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_libcdep.lo sanitizer_symbolizer_markup.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_termination.lo \
sanitizer_thread_arg_retval.lo sanitizer_thread_registry.lo \
sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
sanitizer_unwind_win.lo sanitizer_win.lo
sanitizer_thread_arg_retval.lo sanitizer_thread_history.lo \
sanitizer_thread_registry.lo sanitizer_tls_get_addr.lo \
sanitizer_unwind_linux_libcdep.lo sanitizer_unwind_win.lo \
sanitizer_win.lo sanitizer_win_interception.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
@ -409,7 +409,6 @@ sanitizer_common_files = \
sanitizer_netbsd.cpp \
sanitizer_platform_limits_freebsd.cpp \
sanitizer_platform_limits_linux.cpp \
sanitizer_platform_limits_openbsd.cpp \
sanitizer_platform_limits_posix.cpp \
sanitizer_platform_limits_solaris.cpp \
sanitizer_posix.cpp \
@ -436,15 +435,18 @@ sanitizer_common_files = \
sanitizer_symbolizer.cpp \
sanitizer_symbolizer_libbacktrace.cpp \
sanitizer_symbolizer_libcdep.cpp \
sanitizer_symbolizer_markup.cpp \
sanitizer_symbolizer_posix_libcdep.cpp \
sanitizer_symbolizer_win.cpp \
sanitizer_termination.cpp \
sanitizer_thread_arg_retval.cpp \
sanitizer_thread_history.cpp \
sanitizer_thread_registry.cpp \
sanitizer_tls_get_addr.cpp \
sanitizer_unwind_linux_libcdep.cpp \
sanitizer_unwind_win.cpp \
sanitizer_win.cpp
sanitizer_win.cpp \
sanitizer_win_interception.cpp
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
@ -568,7 +570,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_netbsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_freebsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_openbsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_solaris.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@ -594,16 +595,19 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_markup.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_termination.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_arg_retval.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_history.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win_interception.Plo@am__quote@
.cpp.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<

View File

@ -37,10 +37,6 @@ static void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {
#undef SANCOV_FLAG
}
static const char *MaybeCallSancovDefaultOptions() {
return (&__sancov_default_options) ? __sancov_default_options() : "";
}
void InitializeSancovFlags() {
SancovFlags *f = sancov_flags();
f->SetDefaults();
@ -48,7 +44,7 @@ void InitializeSancovFlags() {
FlagParser parser;
RegisterSancovFlags(&parser, f);
parser.ParseString(MaybeCallSancovDefaultOptions());
parser.ParseString(__sancov_default_options());
parser.ParseStringFromEnv("SANCOV_OPTIONS");
ReportUnrecognizedFlags();

View File

@ -25,7 +25,7 @@ namespace __sanitizer {
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
@ -59,7 +59,7 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
uptr alignment = 8;
constexpr usize alignment = Max<usize>(8, sizeof(void *));
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
@ -137,7 +137,8 @@ void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
}
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
constexpr usize kLowLevelAllocatorDefaultAlignment =
Max<usize>(8, sizeof(void *));
constexpr uptr kMinNumPagesRounded = 16;
constexpr uptr kMinRoundedSize = 65536;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;

View File

@ -15,6 +15,8 @@
#define SANITIZER_ALLOCATOR_DLSYM_H
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __sanitizer {
@ -31,15 +33,15 @@ struct DlSymAllocator {
UNLIKELY(internal_allocator()->FromPrimary(ptr));
}
static void *Allocate(uptr size_in_bytes) {
void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);
static void *Allocate(uptr size_in_bytes, uptr align = kWordSize) {
void *ptr = InternalAlloc(size_in_bytes, nullptr, align);
CHECK(internal_allocator()->FromPrimary(ptr));
Details::OnAllocate(ptr,
internal_allocator()->GetActuallyAllocatedSize(ptr));
return ptr;
}
static void *Callocate(SIZE_T nmemb, SIZE_T size) {
static void *Callocate(usize nmemb, usize size) {
void *ptr = InternalCalloc(nmemb, size);
CHECK(internal_allocator()->FromPrimary(ptr));
Details::OnAllocate(ptr,
@ -70,6 +72,11 @@ struct DlSymAllocator {
return new_ptr;
}
static void *ReallocArray(void *ptr, uptr count, uptr size) {
CHECK(!CheckForCallocOverflow(count, size));
return Realloc(ptr, count * size);
}
static void OnAllocate(const void *ptr, uptr size) {}
static void OnFree(const void *ptr, uptr size) {}
};

View File

@ -40,6 +40,8 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
__sanitizer_ignore_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_purge_allocator();

View File

@ -278,7 +278,7 @@ class SizeClassAllocator32 {
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
struct alignas(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
StaticSpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
u32 rand_state;

View File

@ -316,13 +316,13 @@ class SizeClassAllocator64 {
Printf(
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
"last released: %6lldK region: 0x%zx\n",
"last released: %6lldK region: %p\n",
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
region->mapped_user >> 10, region->stats.n_allocated,
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
rss >> 10, region->rtoi.num_releases,
region->rtoi.last_released_bytes >> 10,
SpaceBeg() + kRegionSize * class_id);
(void *)(SpaceBeg() + kRegionSize * class_id));
}
void PrintStats() {
@ -639,13 +639,14 @@ class SizeClassAllocator64 {
static_assert(kRegionSize >= SizeClassMap::kMaxSize,
"Region size exceed largest size");
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
COMPILER_CHECK((kRegionSize) <=
(1ULL << (sizeof(CompactPtrT) * 8 + kCompactPtrScale)));
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 16;
static const uptr kUserMapSize = 1 << 18;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
static const uptr kFreeArrayMapSize = 1 << 18;
atomic_sint32_t release_to_os_interval_ms_;
@ -666,7 +667,7 @@ class SizeClassAllocator64 {
u64 last_released_bytes;
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
struct alignas(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.

View File

@ -18,12 +18,24 @@
namespace __sanitizer {
enum memory_order {
// If the __atomic atomic builtins are supported (Clang/GCC), use the
// compiler provided macro values so that we can map the atomic operations
// to __atomic_* directly.
#ifdef __ATOMIC_SEQ_CST
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
#else
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5
#endif
};
struct atomic_uint8_t {
@ -49,7 +61,7 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
// On 32-bit platforms u64 is not necessary aligned on 8 bytes.
volatile ALIGNED(8) Type val_dont_use;
alignas(8) volatile Type val_dont_use;
};
struct atomic_uintptr_t {

View File

@ -14,60 +14,63 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
#if defined(__i386__) || defined(__x86_64__)
# include "sanitizer_atomic_clang_x86.h"
#else
# include "sanitizer_atomic_clang_other.h"
#endif
namespace __sanitizer {
// We would like to just use compiler builtin atomic operations
// for loads and stores, but they are mostly broken in clang:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
// We use the compiler builtin atomic operations for loads and stores, which
// generates correct code for all architectures, but may require libatomic
// on platforms where e.g. 64-bit atomics are not supported natively.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
inline void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
#if defined(__i386__) || defined(__x86_64__)
for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
#endif
}
inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
template <typename T>
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_load_n(&a->val_dont_use, mo);
}
template<typename T>
inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
template <typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
__atomic_store_n(&a->val_dont_use, v, mo);
}
template <typename T>
inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_fetch_add(&a->val_dont_use, v, mo);
}
template <typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, v);
return __atomic_fetch_sub(&a->val_dont_use, v, mo);
}
template<typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
template <typename T>
inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, -v);
}
template<typename T>
inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
__sync_synchronize();
v = __sync_lock_test_and_set(&a->val_dont_use, v);
if (mo == memory_order_seq_cst)
__sync_synchronize();
return v;
return __atomic_exchange_n(&a->val_dont_use, v, mo);
}
template <typename T>
@ -82,9 +85,8 @@ inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
template<typename T>
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
template <typename T>
inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
@ -92,13 +94,6 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
} // namespace __sanitizer
// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
# include "sanitizer_atomic_clang_mips.h"
#endif
#undef ATOMIC_ORDER
#endif // SANITIZER_ATOMIC_CLANG_H

View File

@ -1,117 +0,0 @@
//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
#define SANITIZER_ATOMIC_CLANG_MIPS_H
namespace __sanitizer {
// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
// internal spin lock mechanism to emulate atomic operations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
while (*lock) {
}
}
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
// Make sure the lock is on its own cache line to prevent false sharing.
// Put it inside a struct that is aligned and padded to the typical MIPS
// cacheline which is 32 bytes.
static struct {
int lock;
char pad[32 - sizeof(int)];
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret;
__spin_lock(&lock.lock);
ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
ptr->val_dont_use = ret + val;
__spin_unlock(&lock.lock);
return ret;
}
template <>
inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type;
Type cmpv = *cmp;
Type prev;
bool ret = false;
__spin_lock(&lock.lock);
prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
if (prev == cmpv) {
ret = true;
ptr->val_dont_use = xchg;
}
__spin_unlock(&lock.lock);
return ret;
}
template <>
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0;
volatile atomic_uint64_t *Newptr =
const_cast<volatile atomic_uint64_t *>(ptr);
return atomic_fetch_add(Newptr, zero, mo);
}
template <>
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock);
ptr->val_dont_use = v;
__spin_unlock(&lock.lock);
}
} // namespace __sanitizer
#endif // SANITIZER_ATOMIC_CLANG_MIPS_H

View File

@ -1,85 +0,0 @@
//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
#define SANITIZER_ATOMIC_CLANG_OTHER_H
namespace __sanitizer {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__sync_synchronize();
} else { // seq_cst
// E.g. on POWER we need a hw fence even before the store.
__sync_synchronize();
v = a->val_dont_use;
__sync_synchronize();
}
} else {
__atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
__ATOMIC_SEQ_CST);
}
return v;
}
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
__sync_synchronize();
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
__sync_synchronize();
a->val_dont_use = v;
__sync_synchronize();
}
} else {
__atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H

View File

@ -1,113 +0,0 @@
//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_X86_H
#define SANITIZER_ATOMIC_CLANG_X86_H
namespace __sanitizer {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
// On x86 loads are implicitly acquire.
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 plain MOV is enough for seq_cst store.
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
}
} else {
// 64-bit load on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;" // (ptr could be read-only)
"emms;" // Empty mmx state/Reset FP regs
: "=m" (v)
: "m" (a->val_dont_use)
: // mark the mmx registers as clobbered
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
}
return v;
}
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;"
"emms;" // Empty mmx state/Reset FP regs
: "=m" (a->val_dont_use)
: "m" (v)
: // mark the mmx registers as clobbered
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
if (mo == memory_order_seq_cst)
__sync_synchronize();
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

View File

@ -70,8 +70,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
@ -87,8 +87,8 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {

View File

@ -321,23 +321,23 @@ class TwoLevelBitVector {
};
private:
void check(uptr idx) const { CHECK_LE(idx, size()); }
void check(uptr idx) const { CHECK_LT(idx, size()); }
uptr idx0(uptr idx) const {
uptr res = idx / (BV::kSize * BV::kSize);
CHECK_LE(res, kLevel1Size);
CHECK_LT(res, kLevel1Size);
return res;
}
uptr idx1(uptr idx) const {
uptr res = (idx / BV::kSize) % BV::kSize;
CHECK_LE(res, BV::kSize);
CHECK_LT(res, BV::kSize);
return res;
}
uptr idx2(uptr idx) const {
uptr res = idx % BV::kSize;
CHECK_LE(res, BV::kSize);
CHECK_LT(res, BV::kSize);
return res;
}

View File

@ -139,9 +139,11 @@ u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
return desc.here_id;
}
void ChainedOriginDepot::LockAll() { depot.LockAll(); }
void ChainedOriginDepot::LockBeforeFork() { depot.LockBeforeFork(); }
void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
void ChainedOriginDepot::UnlockAfterFork(bool fork_child) {
depot.UnlockAfterFork(fork_child);
}
void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }

View File

@ -32,8 +32,8 @@ class ChainedOriginDepot {
// Retrieves the stored StackDepot ID for the given origin ID.
u32 Get(u32 id, u32 *other);
void LockAll();
void UnlockAll();
void LockBeforeFork();
void UnlockAfterFork(bool fork_child);
void TestOnlyUnmap();
private:

Some files were not shown because too many files have changed in this diff Show More