[scudo] Refactor the wrapper code into a single file. (#190857)

Allow using a define to create a prefix rather than trying to use
android/bionic checks.
This commit is contained in:
Christopher Ferris
2026-04-09 19:02:11 +00:00
committed by GitHub
parent 012f820073
commit b742ec91c0
5 changed files with 437 additions and 502 deletions

View File

@@ -8,12 +8,8 @@
#include "platform.h"
// Skip this compilation unit if compiled as part of Bionic.
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
#include "internal_defs.h"
#include "platform.h"
#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
@@ -21,20 +17,446 @@
#include <stdint.h>
#include <stdio.h>
#if defined(SCUDO_PREFIX_NAME)
#define SCUDO_PREFIX(name) CONCATENATE(SCUDO_PREFIX_NAME, name)
#define SCUDO_ALLOCATOR_STATIC static
#else
#define SCUDO_PREFIX(name) name
#define SCUDO_ALLOCATOR Allocator
// Export the static allocator so that the C++ wrappers can access it.
// Technically we could have a completely separated heap for C & C++ but in
// reality the amount of cross pollination between the two is staggering.
#define SCUDO_ALLOCATOR_STATIC
#endif
// malloc-type functions have to be aligned to std::max_align_t. This is
// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
// do not have to abide by the same requirement.
#ifndef SCUDO_MALLOC_ALIGNMENT
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
SCUDO_ALLOCATOR_STATIC
scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> Allocator;
#include "wrappers_c.inc"
static void reportAllocation(void *ptr, size_t size) {
if (SCUDO_ENABLE_HOOKS)
if (__scudo_allocate_hook && ptr)
__scudo_allocate_hook(ptr, size);
}
static void reportDeallocation(void *ptr) {
if (SCUDO_ENABLE_HOOKS)
if (__scudo_deallocate_hook)
__scudo_deallocate_hook(ptr);
}
static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
DCHECK_NE(new_ptr, nullptr);
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
if (SCUDO_ENABLE_HOOKS) {
if (__scudo_realloc_allocate_hook)
__scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
else if (__scudo_allocate_hook)
__scudo_allocate_hook(new_ptr, size);
}
}
static void reportReallocDeallocation(void *old_ptr) {
if (SCUDO_ENABLE_HOOKS) {
if (__scudo_realloc_deallocate_hook)
__scudo_realloc_deallocate_hook(old_ptr);
else if (__scudo_deallocate_hook)
__scudo_deallocate_hook(old_ptr);
}
}
extern "C" INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
extern "C" {
#endif // !SCUDO_ANDROID || !_BIONIC
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
scudo::uptr Product;
if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
if (Allocator.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportCallocOverflow(nmemb, size);
}
void *Ptr = Allocator.allocate(Product, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT, true);
reportAllocation(Ptr, Product);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::Malloc);
}
INTERFACE WEAK void SCUDO_PREFIX(free_sized)(void *ptr, size_t size) {
reportDeallocation(ptr);
Allocator.deallocateSized(ptr, scudo::Chunk::Origin::Malloc, size);
}
INTERFACE WEAK void
SCUDO_PREFIX(free_aligned_sized)(void *ptr, size_t alignment, size_t size) {
reportDeallocation(ptr);
Allocator.deallocateSizedAligned(ptr, scudo::Chunk::Origin::Malloc, size,
alignment);
}
INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
struct SCUDO_MALLINFO Info = {};
scudo::StatCounters Stats;
Allocator.getStats(Stats);
// Space allocated in mmapped regions (bytes)
Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
// Maximum total allocated space (bytes)
Info.usmblks = Info.hblkhd;
// Space in freed fastbin blocks (bytes)
Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
// Total allocated space (bytes)
Info.uordblks =
static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
// Total free space (bytes)
Info.fordblks = Info.fsmblks;
return Info;
}
// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
#if !SCUDO_ANDROID
INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
struct __scudo_mallinfo2 Info = {};
scudo::StatCounters Stats;
Allocator.getStats(Stats);
// Space allocated in mmapped regions (bytes)
Info.hblkhd = Stats[scudo::StatMapped];
// Maximum total allocated space (bytes)
Info.usmblks = Info.hblkhd;
// Space in freed fastbin blocks (bytes)
Info.fsmblks = Stats[scudo::StatFree];
// Total allocated space (bytes)
Info.uordblks = Stats[scudo::StatAllocated];
// Total free space (bytes)
Info.fordblks = Info.fsmblks;
return Info;
}
#endif
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
#if SCUDO_ANDROID
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
#else
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
#endif
return Allocator.getUsableSize(ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
// Android rounds up the alignment to a power of two if it isn't one.
if (SCUDO_ANDROID) {
if (UNLIKELY(!alignment)) {
alignment = 1U;
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
alignment = scudo::roundUpPowerOfTwo(alignment);
}
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
if (Allocator.canReturnNull()) {
errno = EINVAL;
return nullptr;
}
scudo::reportAlignmentNotPowerOfTwo(alignment);
}
}
void *Ptr =
Allocator.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
reportAllocation(Ptr, size);
return Ptr;
}
INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
size_t size) {
if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
if (!Allocator.canReturnNull())
scudo::reportInvalidPosixMemalignAlignment(alignment);
return EINVAL;
}
void *Ptr =
Allocator.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
if (UNLIKELY(!Ptr))
return ENOMEM;
reportAllocation(Ptr, size);
*memptr = Ptr;
return 0;
}
INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
const scudo::uptr PageSize = scudo::getPageSizeCached();
if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
if (Allocator.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
void *Ptr =
Allocator.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
scudo::Chunk::Origin::Memalign, PageSize);
reportAllocation(Ptr, scudo::roundUp(size, PageSize));
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
if (!ptr) {
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
if (size == 0) {
reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::Malloc);
return nullptr;
}
// Given that the reporting of deallocation and allocation are not atomic, we
// always pretend the old pointer will be released so that the user doesn't
// need to worry about the false double-use case from the view of hooks.
//
// For example, assume that `realloc` releases the old pointer and allocates a
// new pointer. Before the reporting of both operations has been done, another
// thread may get the old pointer from `malloc`. It may be misinterpreted as
// double-use if it's not handled properly on the hook side.
reportReallocDeallocation(ptr);
void *NewPtr = Allocator.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
if (NewPtr != nullptr) {
// Note that even if NewPtr == ptr, the size has changed. We still need to
// report the new size.
reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
} else {
// If `realloc` fails, the old pointer is not released. Report the old
// pointer as allocated again.
reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
Allocator.getAllocSize(ptr));
}
return scudo::setErrnoOnNull(NewPtr);
}
INTERFACE WEAK void *SCUDO_PREFIX(reallocarray)(void *ptr, size_t nmemb,
size_t size) {
scudo::uptr Product;
if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
if (Allocator.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportReallocarrayOverflow(nmemb, size);
}
return SCUDO_PREFIX(realloc)(ptr, Product);
}
INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::Memalign,
scudo::getPageSizeCached());
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
Allocator.iterateOverChunks(base, size, callback, arg);
return 0;
}
INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { Allocator.enable(); }
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() { Allocator.disable(); }
void SCUDO_PREFIX(malloc_postinit)() {
Allocator.initGwpAsan();
pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
SCUDO_PREFIX(malloc_enable));
}
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
if (param == M_DECAY_TIME) {
if (SCUDO_ANDROID) {
// Before changing the interval, reset the memory usage status by doing a
// M_PURGE call so that we can minimize the impact of any unreleased pages
// introduced by interval transition.
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
// The values allowed on Android are {-1, 0, 1}. "1" means the longest
// interval.
CHECK(value >= -1 && value <= 1);
if (value == 1)
value = INT32_MAX;
}
Allocator.setOption(scudo::Option::ReleaseInterval,
static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
return 1;
} else if (param == M_PURGE_FAST) {
Allocator.releaseToOS(scudo::ReleaseToOS::ForceFast);
return 1;
} else if (param == M_PURGE_ALL) {
Allocator.releaseToOS(scudo::ReleaseToOS::ForceAll);
return 1;
} else if (param == M_LOG_STATS) {
Allocator.printStats();
Allocator.printFragmentationInfo();
return 1;
} else {
scudo::Option option;
switch (param) {
case M_MEMTAG_TUNING:
option = scudo::Option::MemtagTuning;
break;
case M_THREAD_DISABLE_MEM_INIT:
option = scudo::Option::ThreadDisableMemInit;
break;
case M_CACHE_COUNT_MAX:
option = scudo::Option::MaxCacheEntriesCount;
break;
case M_CACHE_SIZE_MAX:
option = scudo::Option::MaxCacheEntrySize;
break;
case M_TSDS_COUNT_MAX:
option = scudo::Option::MaxTSDsCount;
break;
default:
return 0;
}
return Allocator.setOption(option, static_cast<scudo::sptr>(value));
}
}
INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
size_t size) {
if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
if (Allocator.canReturnNull()) {
errno = EINVAL;
return nullptr;
}
scudo::reportInvalidAlignedAllocAlignment(alignment, size);
}
void *Ptr =
Allocator.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
const scudo::uptr max_size =
decltype(Allocator)::PrimaryT::SizeClassMap::MaxSize;
auto *sizes = static_cast<scudo::uptr *>(
SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
auto callback = [](uintptr_t, size_t size, void *arg) {
auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
if (size < max_size)
sizes[size]++;
};
Allocator.disable();
Allocator.iterateOverChunks(0, -1ul, callback, sizes);
Allocator.enable();
fputs("<malloc version=\"scudo-1\">\n", stream);
for (scudo::uptr i = 0; i != max_size; ++i)
if (sizes[i])
fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
fputs("</malloc>\n", stream);
SCUDO_PREFIX(free)(sizes);
return 0;
}
// Disable memory tagging for the heap. The caller must disable memory tag
// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
// function, and may not re-enable them after calling the function.
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
Allocator.disableMemoryTagging();
}
// Sets whether scudo records stack traces and other metadata for allocations
// and deallocations. This function only has an effect if the allocator and
// hardware support memory tagging.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
Allocator.setTrackAllocationStacks(track);
}
// Sets whether scudo zero-initializes all allocated memory.
INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
Allocator.setFillContents(zero_contents ? scudo::ZeroFill : scudo::NoFill);
}
// Sets whether scudo pattern-initializes all allocated memory.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
Allocator.setFillContents(pattern_fill_contents ? scudo::PatternOrZeroFill
: scudo::NoFill);
}
// Sets whether scudo adds a small amount of slack at the end of large
// allocations, before the guard page. This can be enabled to work around buggy
// applications that read a few bytes past the end of their allocation.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
Allocator.setAddLargeAllocationSlack(add_slack);
}
// Extra Internal functions.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
INTERFACE void __scudo_get_error_info(
struct scudo_error_info *error_info, uintptr_t fault_addr,
const char *stack_depot, size_t stack_depot_size, const char *region_info,
const char *ring_buffer, size_t ring_buffer_size, const char *memory,
const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
region_info, ring_buffer, ring_buffer_size, memory,
memory_tags, memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
return Allocator.getStackDepotAddress();
}
INTERFACE size_t __scudo_get_stack_depot_size() {
return Allocator.getStackDepotSize();
}
INTERFACE const char *__scudo_get_region_info_addr() {
return Allocator.getRegionInfoArrayAddress();
}
INTERFACE size_t __scudo_get_region_info_size() {
return Allocator.getRegionInfoArraySize();
}
INTERFACE const char *__scudo_get_ring_buffer_addr() {
return Allocator.getRingBufferAddress();
}
INTERFACE size_t __scudo_get_ring_buffer_size() {
return Allocator.getRingBufferSize();
}
} // extern "C"

View File

@@ -54,9 +54,4 @@ struct __scudo_mallinfo2 {
#define SCUDO_MALLINFO __scudo_mallinfo
#endif
#if !SCUDO_ANDROID || !_BIONIC
extern "C" void malloc_postinit();
extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
#endif
#endif // SCUDO_WRAPPERS_C_H_

View File

@@ -1,405 +0,0 @@
//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_PREFIX
#error "Define SCUDO_PREFIX prior to including this file!"
#endif
// malloc-type functions have to be aligned to std::max_align_t. This is
// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
// do not have to abide by the same requirement.
#ifndef SCUDO_MALLOC_ALIGNMENT
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif
static void reportAllocation(void *ptr, size_t size) {
if (SCUDO_ENABLE_HOOKS)
if (__scudo_allocate_hook && ptr)
__scudo_allocate_hook(ptr, size);
}
static void reportDeallocation(void *ptr) {
if (SCUDO_ENABLE_HOOKS)
if (__scudo_deallocate_hook)
__scudo_deallocate_hook(ptr);
}
static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
DCHECK_NE(new_ptr, nullptr);
if (SCUDO_ENABLE_HOOKS) {
if (__scudo_realloc_allocate_hook)
__scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
else if (__scudo_allocate_hook)
__scudo_allocate_hook(new_ptr, size);
}
}
static void reportReallocDeallocation(void *old_ptr) {
if (SCUDO_ENABLE_HOOKS) {
if (__scudo_realloc_deallocate_hook)
__scudo_realloc_deallocate_hook(old_ptr);
else if (__scudo_deallocate_hook)
__scudo_deallocate_hook(old_ptr);
}
}
extern "C" {
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
scudo::uptr Product;
if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
if (SCUDO_ALLOCATOR.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportCallocOverflow(nmemb, size);
}
void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT, true);
reportAllocation(Ptr, Product);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
}
INTERFACE WEAK void SCUDO_PREFIX(free_sized)(void *ptr, size_t size) {
reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocateSized(ptr, scudo::Chunk::Origin::Malloc, size);
}
INTERFACE WEAK void
SCUDO_PREFIX(free_aligned_sized)(void *ptr, size_t alignment, size_t size) {
reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocateSizedAligned(ptr, scudo::Chunk::Origin::Malloc,
size, alignment);
}
INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
struct SCUDO_MALLINFO Info = {};
scudo::StatCounters Stats;
SCUDO_ALLOCATOR.getStats(Stats);
// Space allocated in mmapped regions (bytes)
Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
// Maximum total allocated space (bytes)
Info.usmblks = Info.hblkhd;
// Space in freed fastbin blocks (bytes)
Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
// Total allocated space (bytes)
Info.uordblks =
static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
// Total free space (bytes)
Info.fordblks = Info.fsmblks;
return Info;
}
// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
#if !SCUDO_ANDROID
INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
struct __scudo_mallinfo2 Info = {};
scudo::StatCounters Stats;
SCUDO_ALLOCATOR.getStats(Stats);
// Space allocated in mmapped regions (bytes)
Info.hblkhd = Stats[scudo::StatMapped];
// Maximum total allocated space (bytes)
Info.usmblks = Info.hblkhd;
// Space in freed fastbin blocks (bytes)
Info.fsmblks = Stats[scudo::StatFree];
// Total allocated space (bytes)
Info.uordblks = Stats[scudo::StatAllocated];
// Total free space (bytes)
Info.fordblks = Info.fsmblks;
return Info;
}
#endif
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
#if SCUDO_ANDROID
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
#else
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
#endif
return SCUDO_ALLOCATOR.getUsableSize(ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
// Android rounds up the alignment to a power of two if it isn't one.
if (SCUDO_ANDROID) {
if (UNLIKELY(!alignment)) {
alignment = 1U;
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
alignment = scudo::roundUpPowerOfTwo(alignment);
}
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
if (SCUDO_ALLOCATOR.canReturnNull()) {
errno = EINVAL;
return nullptr;
}
scudo::reportAlignmentNotPowerOfTwo(alignment);
}
}
void *Ptr =
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
reportAllocation(Ptr, size);
return Ptr;
}
INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
size_t size) {
if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
if (!SCUDO_ALLOCATOR.canReturnNull())
scudo::reportInvalidPosixMemalignAlignment(alignment);
return EINVAL;
}
void *Ptr =
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
if (UNLIKELY(!Ptr))
return ENOMEM;
reportAllocation(Ptr, size);
*memptr = Ptr;
return 0;
}
INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
const scudo::uptr PageSize = scudo::getPageSizeCached();
if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
if (SCUDO_ALLOCATOR.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
void *Ptr =
SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
scudo::Chunk::Origin::Memalign, PageSize);
reportAllocation(Ptr, scudo::roundUp(size, PageSize));
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
if (!ptr) {
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
SCUDO_MALLOC_ALIGNMENT);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
if (size == 0) {
reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
return nullptr;
}
// Given that the reporting of deallocation and allocation are not atomic, we
// always pretend the old pointer will be released so that the user doesn't
// need to worry about the false double-use case from the view of hooks.
//
// For example, assume that `realloc` releases the old pointer and allocates a
// new pointer. Before the reporting of both operations has been done, another
// thread may get the old pointer from `malloc`. It may be misinterpreted as
// double-use if it's not handled properly on the hook side.
reportReallocDeallocation(ptr);
void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
if (NewPtr != nullptr) {
// Note that even if NewPtr == ptr, the size has changed. We still need to
// report the new size.
reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
} else {
// If `realloc` fails, the old pointer is not released. Report the old
// pointer as allocated again.
reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
SCUDO_ALLOCATOR.getAllocSize(ptr));
}
return scudo::setErrnoOnNull(NewPtr);
}
INTERFACE WEAK void *SCUDO_PREFIX(reallocarray)(void *ptr, size_t nmemb,
size_t size) {
scudo::uptr Product;
if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
if (SCUDO_ALLOCATOR.canReturnNull()) {
errno = ENOMEM;
return nullptr;
}
scudo::reportReallocarrayOverflow(nmemb, size);
}
return SCUDO_PREFIX(realloc)(ptr, Product);
}
INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
scudo::getPageSizeCached());
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
return 0;
}
INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
SCUDO_ALLOCATOR.disable();
}
void SCUDO_PREFIX(malloc_postinit)() {
SCUDO_ALLOCATOR.initGwpAsan();
pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
SCUDO_PREFIX(malloc_enable));
}
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
if (param == M_DECAY_TIME) {
if (SCUDO_ANDROID) {
// Before changing the interval, reset the memory usage status by doing a
// M_PURGE call so that we can minimize the impact of any unreleased pages
// introduced by interval transition.
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
// The values allowed on Android are {-1, 0, 1}. "1" means the longest
// interval.
CHECK(value >= -1 && value <= 1);
if (value == 1)
value = INT32_MAX;
}
SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
return 1;
} else if (param == M_PURGE_FAST) {
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceFast);
return 1;
} else if (param == M_PURGE_ALL) {
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
return 1;
} else if (param == M_LOG_STATS) {
SCUDO_ALLOCATOR.printStats();
SCUDO_ALLOCATOR.printFragmentationInfo();
return 1;
} else {
scudo::Option option;
switch (param) {
case M_MEMTAG_TUNING:
option = scudo::Option::MemtagTuning;
break;
case M_THREAD_DISABLE_MEM_INIT:
option = scudo::Option::ThreadDisableMemInit;
break;
case M_CACHE_COUNT_MAX:
option = scudo::Option::MaxCacheEntriesCount;
break;
case M_CACHE_SIZE_MAX:
option = scudo::Option::MaxCacheEntrySize;
break;
case M_TSDS_COUNT_MAX:
option = scudo::Option::MaxTSDsCount;
break;
default:
return 0;
}
return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
}
}
INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
size_t size) {
if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
if (SCUDO_ALLOCATOR.canReturnNull()) {
errno = EINVAL;
return nullptr;
}
scudo::reportInvalidAlignedAllocAlignment(alignment, size);
}
void *Ptr =
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
reportAllocation(Ptr, size);
return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
const scudo::uptr max_size =
decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
auto *sizes = static_cast<scudo::uptr *>(
SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
auto callback = [](uintptr_t, size_t size, void *arg) {
auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
if (size < max_size)
sizes[size]++;
};
SCUDO_ALLOCATOR.disable();
SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
SCUDO_ALLOCATOR.enable();
fputs("<malloc version=\"scudo-1\">\n", stream);
for (scudo::uptr i = 0; i != max_size; ++i)
if (sizes[i])
fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
fputs("</malloc>\n", stream);
SCUDO_PREFIX(free)(sizes);
return 0;
}
// Disable memory tagging for the heap. The caller must disable memory tag
// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
// function, and may not re-enable them after calling the function.
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
SCUDO_ALLOCATOR.disableMemoryTagging();
}
// Sets whether scudo records stack traces and other metadata for allocations
// and deallocations. This function only has an effect if the allocator and
// hardware support memory tagging.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
}
// Sets whether scudo zero-initializes all allocated memory.
INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
: scudo::NoFill);
}
// Sets whether scudo pattern-initializes all allocated memory.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
SCUDO_ALLOCATOR.setFillContents(
pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
}
// Sets whether scudo adds a small amount of slack at the end of large
// allocations, before the guard page. This can be enabled to work around buggy
// applications that read a few bytes past the end of their allocation.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
}
} // extern "C"

View File

@@ -1,75 +0,0 @@
//===-- wrappers_c_bionic.cpp -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "platform.h"
// This is only used when compiled as part of Bionic.
#if SCUDO_ANDROID && _BIONIC
#include "allocator_config.h"
#include "internal_defs.h"
#include "platform.h"
#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
#include <stdint.h>
#include <stdio.h>
// Regular MallocDispatch definitions.
#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
#define SCUDO_ALLOCATOR Allocator
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
#include "wrappers_c.inc"
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
// TODO(kostyak): support both allocators.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
INTERFACE void __scudo_get_error_info(
struct scudo_error_info *error_info, uintptr_t fault_addr,
const char *stack_depot, size_t stack_depot_size, const char *region_info,
const char *ring_buffer, size_t ring_buffer_size, const char *memory,
const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
region_info, ring_buffer, ring_buffer_size, memory,
memory_tags, memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
return Allocator.getStackDepotAddress();
}
INTERFACE size_t __scudo_get_stack_depot_size() {
return Allocator.getStackDepotSize();
}
INTERFACE const char *__scudo_get_region_info_addr() {
return Allocator.getRegionInfoArrayAddress();
}
INTERFACE size_t __scudo_get_region_info_size() {
return Allocator.getRegionInfoArraySize();
}
INTERFACE const char *__scudo_get_ring_buffer_addr() {
return Allocator.getRingBufferAddress();
}
INTERFACE size_t __scudo_get_ring_buffer_size() {
return Allocator.getRingBufferSize();
}
#endif // SCUDO_ANDROID && _BIONIC

View File

@@ -8,9 +8,6 @@
#include "platform.h"
// Skip this compilation unit if compiled as part of Bionic.
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
#include "internal_defs.h"
#include "platform.h"
@@ -19,6 +16,9 @@
#include <stdint.h>
extern "C" void malloc_postinit();
extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
namespace std {
struct nothrow_t {};
enum class align_val_t : size_t {};
@@ -146,5 +146,3 @@ INTERFACE WEAK void operator delete[](void *ptr, size_t size,
Allocator.deallocateSizedAligned(ptr, scudo::Chunk::Origin::NewArray, size,
static_cast<scudo::uptr>(align));
}
#endif // !SCUDO_ANDROID || !_BIONIC