Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle.h
|
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
| 2,874 | 32.045977 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc.h
|
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
#include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed, int empty);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
| 10,674 | 34 | 90 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos.h
|
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,124 | 44.289855 | 90 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_typedefs.h
|
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
| 150 | 49.333333 | 82 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_macros.h
|
#include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
| 1,426 | 29.361702 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/size_classes.h
|
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of
* SC(index, lg_delta, size, bin, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
*/
#define LG_SIZE_CLASS_GROUP 2
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, no, 9) \
\
SC( 32, 12, 10, 1, no, no) \
SC( 33, 12, 10, 2, no, no) \
SC( 34, 12, 10, 3, no, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 35
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, yes, no) \
\
SC( 36, 13, 11, 1, yes, no) \
SC( 37, 13, 11, 2, yes, no) \
SC( 38, 13, 11, 3, yes, no) \
SC( 39, 13, 11, 4, yes, no) \
\
SC( 40, 14, 12, 1, yes, no) \
SC( 41, 14, 12, 2, yes, no) \
SC( 42, 14, 12, 3, yes, no) \
SC( 43, 14, 12, 4, yes, no) \
\
SC( 44, 15, 13, 1, yes, no) \
SC( 45, 15, 13, 2, yes, no) \
SC( 46, 15, 13, 3, yes, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 47
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, no, 9) \
\
SC( 29, 12, 10, 1, no, no) \
SC( 30, 12, 10, 2, no, no) \
SC( 31, 12, 10, 3, no, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 28
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 32
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, yes, no) \
\
SC( 33, 13, 11, 1, yes, no) \
SC( 34, 13, 11, 2, yes, no) \
SC( 35, 13, 11, 3, yes, no) \
SC( 36, 13, 11, 4, yes, no) \
\
SC( 37, 14, 12, 1, yes, no) \
SC( 38, 14, 12, 2, yes, no) \
SC( 39, 14, 12, 3, yes, no) \
SC( 40, 14, 12, 4, yes, no) \
\
SC( 41, 15, 13, 1, yes, no) \
SC( 42, 15, 13, 2, yes, no) \
SC( 43, 15, 13, 3, yes, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 44
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, no, 9) \
\
SC( 28, 12, 10, 1, no, no) \
SC( 29, 12, 10, 2, no, no) \
SC( 30, 12, 10, 3, no, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 27
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, yes, no) \
\
SC( 32, 13, 11, 1, yes, no) \
SC( 33, 13, 11, 2, yes, no) \
SC( 34, 13, 11, 3, yes, no) \
SC( 35, 13, 11, 4, yes, no) \
\
SC( 36, 14, 12, 1, yes, no) \
SC( 37, 14, 12, 2, yes, no) \
SC( 38, 14, 12, 3, yes, no) \
SC( 39, 14, 12, 4, yes, no) \
\
SC( 40, 15, 13, 1, yes, no) \
SC( 41, 15, 13, 2, yes, no) \
SC( 42, 15, 13, 3, yes, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 43
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, no, 9) \
\
SC( 32, 12, 10, 1, no, no) \
SC( 33, 12, 10, 2, no, no) \
SC( 34, 12, 10, 3, no, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 35
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, yes, no) \
\
SC( 36, 13, 11, 1, yes, no) \
SC( 37, 13, 11, 2, yes, no) \
SC( 38, 13, 11, 3, yes, no) \
SC( 39, 13, 11, 4, yes, no) \
\
SC( 40, 14, 12, 1, yes, no) \
SC( 41, 14, 12, 2, yes, no) \
SC( 42, 14, 12, 3, yes, no) \
SC( 43, 14, 12, 4, yes, no) \
\
SC( 44, 15, 13, 1, yes, no) \
SC( 45, 15, 13, 2, yes, no) \
SC( 46, 15, 13, 3, yes, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 47
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, no, 9) \
\
SC( 29, 12, 10, 1, no, no) \
SC( 30, 12, 10, 2, no, no) \
SC( 31, 12, 10, 3, no, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 28
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 32
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, yes, no) \
\
SC( 33, 13, 11, 1, yes, no) \
SC( 34, 13, 11, 2, yes, no) \
SC( 35, 13, 11, 3, yes, no) \
SC( 36, 13, 11, 4, yes, no) \
\
SC( 37, 14, 12, 1, yes, no) \
SC( 38, 14, 12, 2, yes, no) \
SC( 39, 14, 12, 3, yes, no) \
SC( 40, 14, 12, 4, yes, no) \
\
SC( 41, 15, 13, 1, yes, no) \
SC( 42, 15, 13, 2, yes, no) \
SC( 43, 15, 13, 3, yes, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 44
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, no, 9) \
\
SC( 28, 12, 10, 1, no, no) \
SC( 29, 12, 10, 2, no, no) \
SC( 30, 12, 10, 3, no, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 27
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, yes, no) \
\
SC( 32, 13, 11, 1, yes, no) \
SC( 33, 13, 11, 2, yes, no) \
SC( 34, 13, 11, 3, yes, no) \
SC( 35, 13, 11, 4, yes, no) \
\
SC( 36, 14, 12, 1, yes, no) \
SC( 37, 14, 12, 2, yes, no) \
SC( 38, 14, 12, 3, yes, no) \
SC( 39, 14, 12, 4, yes, no) \
\
SC( 40, 15, 13, 1, yes, no) \
SC( 41, 15, 13, 2, yes, no) \
SC( 42, 15, 13, 3, yes, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 43
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 199,875 | 46.931894 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_namespace.h
|
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapelm_to_pageind JEMALLOC_N(arena_mapelm_to_pageind)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_runs_avail_tree_iter JEMALLOC_N(arena_runs_avail_tree_iter)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define pools JEMALLOC_N(pools)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_free_fn JEMALLOC_N(base_free_fn)
#define base_malloc_fn JEMALLOC_N(base_malloc_fn)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dalloc JEMALLOC_N(base_node_dalloc)
#define base_pool JEMALLOC_N(base_pool)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc_arena JEMALLOC_N(chunk_alloc_arena)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_default JEMALLOC_N(chunk_alloc_default)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_global_boot JEMALLOC_N(chunk_global_boot)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dalloc_default JEMALLOC_N(chunk_dalloc_default)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunk_record JEMALLOC_N(chunk_record)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define icalloc JEMALLOC_N(icalloc)
#define icalloct JEMALLOC_N(icalloct)
#define idalloc JEMALLOC_N(idalloc)
#define idalloct JEMALLOC_N(idalloct)
#define imalloc JEMALLOC_N(imalloc)
#define imalloct JEMALLOC_N(imalloct)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define lg_floor JEMALLOC_N(lg_floor)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_rwlock_init JEMALLOC_N(malloc_rwlock_init)
#define malloc_rwlock_postfork_child JEMALLOC_N(malloc_rwlock_postfork_child)
#define malloc_rwlock_postfork_parent JEMALLOC_N(malloc_rwlock_postfork_parent)
#define malloc_rwlock_prefork JEMALLOC_N(malloc_rwlock_prefork)
#define malloc_rwlock_rdlock JEMALLOC_N(malloc_rwlock_rdlock)
#define malloc_rwlock_wrlock JEMALLOC_N(malloc_rwlock_wrlock)
#define malloc_rwlock_unlock JEMALLOC_N(malloc_rwlock_unlock)
#define malloc_rwlock_destroy JEMALLOC_N(malloc_rwlock_destroy)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define npools JEMALLOC_N(npools)
#define npools_cnt JEMALLOC_N(npools_cnt)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pools_shared_data_initialized JEMALLOC_N(pools_shared_data_initialized)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_record_object JEMALLOC_N(prof_malloc_record_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_booted JEMALLOC_N(quarantine_booted)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define quarantine_init JEMALLOC_N(quarantine_init)
#define quarantine_tls JEMALLOC_N(quarantine_tls)
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define small_bin2size JEMALLOC_N(small_bin2size)
#define small_bin2size_compute JEMALLOC_N(small_bin2size_compute)
#define small_bin2size_lookup JEMALLOC_N(small_bin2size_lookup)
#define small_bin2size_tab JEMALLOC_N(small_bin2size_tab)
#define small_s2u JEMALLOC_N(small_s2u)
#define small_s2u_compute JEMALLOC_N(small_s2u_compute)
#define small_s2u_lookup JEMALLOC_N(small_s2u_lookup)
#define small_size2bin JEMALLOC_N(small_size2bin)
#define small_size2bin_compute JEMALLOC_N(small_size2bin_compute)
#define small_size2bin_lookup JEMALLOC_N(small_size2bin_lookup)
#define small_size2bin_tab JEMALLOC_N(small_size2bin_tab)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define pool_new JEMALLOC_N(pool_new)
#define pool_destroy JEMALLOC_N(pool_destroy)
#define pools_lock JEMALLOC_N(pools_lock)
#define pool_base_lock JEMALLOC_N(pool_base_lock)
#define pool_prefork JEMALLOC_N(pool_prefork)
#define pool_postfork_parent JEMALLOC_N(pool_postfork_parent)
#define pool_postfork_child JEMALLOC_N(pool_postfork_child)
#define pool_alloc JEMALLOC_N(pool_alloc)
#define vec_get JEMALLOC_N(vec_get)
#define vec_set JEMALLOC_N(vec_set)
#define vec_delete JEMALLOC_N(vec_delete)
| 25,252 | 53.778742 | 95 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h
|
/* ./../windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#define JEMALLOC_PREFIX "je_vmem_"
#define JEMALLOC_CPREFIX "JE_VMEM_"
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_vmem_je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
/* #undef JEMALLOC_ATOMIC9 */
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
/* #undef JEMALLOC_OSATOMIC */
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */
/*
* Defined if madvise(2) is available.
*/
/* #undef JEMALLOC_HAVE_MADVISE */
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
/* #undef JEMALLOC_THREADED_INIT */
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#define JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
/* #undef JEMALLOC_CODE_COVERAGE */
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#define JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
/* #undef JEMALLOC_DSS */
/* Support memory filling (junk/zero/quarantine/redzone). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support Valgrind. */
/* #undef JEMALLOC_VALGRIND */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#define STATIC_PAGE_SHIFT 12
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
/* #undef JEMALLOC_MUNMAP */
/* TLS is used to map arenas and magazine caches to threads. */
/* #undef JEMALLOC_TLS */
/*
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSL ffsl
#define JEMALLOC_INTERNAL_FFS ffs
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
/* #undef JEMALLOC_IVSALLOC */
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/* #undef JEMALLOC_ZONE_VERSION */
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
/*
* Define if operating system has alloca.h header.
*/
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
/* #undef JEMALLOC_HAS_RESTRICT */
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 2
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
| 6,731 | 30.457944 | 142 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_unnamespace.h
|
#undef je_pool_create
#undef je_pool_delete
#undef je_pool_malloc
#undef je_pool_calloc
#undef je_pool_ralloc
#undef je_pool_aligned_alloc
#undef je_pool_free
#undef je_pool_malloc_usable_size
#undef je_pool_malloc_stats_print
#undef je_pool_extend
#undef je_pool_set_alloc_funcs
#undef je_pool_check
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_navsnprintf
#undef je_malloc_stats_print
#undef je_malloc_usable_size
| 720 | 20.848485 | 33 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_namespace.h
|
#define je_pool_create JEMALLOC_N(pool_create)
#define je_pool_delete JEMALLOC_N(pool_delete)
#define je_pool_malloc JEMALLOC_N(pool_malloc)
#define je_pool_calloc JEMALLOC_N(pool_calloc)
#define je_pool_ralloc JEMALLOC_N(pool_ralloc)
#define je_pool_aligned_alloc JEMALLOC_N(pool_aligned_alloc)
#define je_pool_free JEMALLOC_N(pool_free)
#define je_pool_malloc_usable_size JEMALLOC_N(pool_malloc_usable_size)
#define je_pool_malloc_stats_print JEMALLOC_N(pool_malloc_stats_print)
#define je_pool_extend JEMALLOC_N(pool_extend)
#define je_pool_set_alloc_funcs JEMALLOC_N(pool_set_alloc_funcs)
#define je_pool_check JEMALLOC_N(pool_check)
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_navsnprintf JEMALLOC_N(navsnprintf)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
| 1,536 | 45.575758 | 70 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal.h
|
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "jemalloc/jemalloc.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) je_vmem_je_##n
# include "jemalloc/jemalloc.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#include "jemalloc/internal/jemalloc_internal_macros.h"
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# ifdef __sparc64__
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define LG_PAGE STATIC_PAGE_SHIFT
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & ((alignment) - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + ((alignment) - 1)) & (-(alignment)))
/* Declare a variable length array */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
#ifndef alloca
# define alloca _alloca
#endif
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
extern unsigned npools;
extern unsigned npools_cnt;
extern pool_t base_pool;
extern pool_t **pools;
extern malloc_mutex_t pools_lock;
extern void *(*base_malloc_fn)(size_t);
extern void (*base_free_fn)(void *);
extern bool pools_shared_data_create(void);
arena_t *arenas_extend(pool_t *pool, unsigned ind);
bool arenas_tsd_extend(tsd_pool_t *tsd, unsigned len);
void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(pool_t *pool);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
/*
* Include arena.h the first time in order to provide inline functions for this
* header's inlines.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, tsd_pool_t)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(pool_t *pool);
arena_t *choose_arena(arena_t *arena);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
malloc_tsd_externs(arenas, tsd_pool_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, tsd_pool_t, {0},
arenas_cleanup)
/*
* Check if the arena is dummy.
*/
JEMALLOC_ALWAYS_INLINE bool
is_arena_dummy(arena_t *arena) {
return (arena->ind == ARENA_DUMMY_IND);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
if (size <= SMALL_MAXCLASS)
return (small_s2u(size));
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each small
* size class, every object is aligned at the smallest power of two
* that is non-zero in the base two representation of the size. For
* example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (small_s2u(usize));
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/*
* We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/
run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
JEMALLOC_INLINE unsigned
narenas_total_get(pool_t *pool)
{
unsigned narenas;
malloc_rwlock_rdlock(&pool->arenas_lock);
narenas = pool->narenas_total;
malloc_rwlock_unlock(&pool->arenas_lock);
return (narenas);
}
/*
* Choose an arena based on a per-thread value.
* Arena pointer must be either a valid arena pointer or a dummy arena with
* pool field filled.
*/
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
{
arena_t *ret;
tsd_pool_t *tsd;
pool_t *pool;
if (!is_arena_dummy(arena))
return (arena);
pool = arena->pool;
tsd = arenas_tsd_get();
/* expand arenas array if necessary */
if ((tsd->npools <= pool->pool_id) &&
arenas_tsd_extend(tsd, pool->pool_id)) {
return (NULL);
}
if ( (tsd->seqno[pool->pool_id] != pool->seqno) ||
(ret = tsd->arenas[pool->pool_id]) == NULL) {
ret = choose_arena_hard(pool);
assert(ret != NULL);
}
return (ret);
}
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include arena.h the second and third times in order to resolve circular
* dependencies with tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *pool_imalloc(pool_t *pool, size_t size);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *pool_icalloc(pool_t *pool, size_t size);
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
void *pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t pool_isalloc(pool_t *pool, const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloct(void *ptr, bool try_tcache);
void pool_idalloct(pool_t *pool, void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloct(void *ptr, bool try_tcache);
void pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
void *pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra,
size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
int msc_clz(unsigned int val);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
# ifdef _MSC_VER
JEMALLOC_ALWAYS_INLINE int
msc_clz(unsigned int val)
{
unsigned int res = 0;
# if LG_SIZEOF_INT == 2
if (_BitScanReverse(&res, val)) {
return 31 - res;
}
else {
return 32;
}
# elif LG_SIZEOF_INT == 3
if (_BitScanReverse64(&res, val)) {
return 63 - res;
}
else {
return 64;
}
# else
# error "Unsupported clz function for that size of int"
# endif
}
#endif
JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(arena, size, false));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (imalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_imalloc(pool_t *pool, size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (imalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(arena, size, true));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (icalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_icalloc(pool_t *pool, size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (icalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(arena, usize, zero);
else
ret = huge_palloc(arena, usize, alignment, zero);
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (ipalloct(usize, alignment, zero, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (ipalloct(usize, alignment, zero, true, &dummy));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
pool_isalloc(pool_t *pool, const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_pool_salloc(pool, ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
size_t i;
malloc_mutex_lock(&pools_lock);
unsigned n = npools;
for (i = 0; i < n; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(pool->chunks_rtree,
(uintptr_t)CHUNK_ADDR2BASE(ptr)) != 0)
break;
}
malloc_mutex_unlock(&pools_lock);
if (i == n)
return 0;
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
size_t binind = small_size2bin(usize);
assert(binind < NBINS);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
{
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, try_tcache);
else
huge_dalloc(&base_pool, ptr);
}
JEMALLOC_ALWAYS_INLINE void
pool_idalloct(pool_t *pool, void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, try_tcache);
else
huge_dalloc(pool, ptr);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
pool_idalloct(pool, ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(ptr, oldsize, size, extra, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
if (size + extra <= arena_maxclass) {
void *ret;
ret = arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc);
if ((ret != NULL) || (size + extra > oldsize))
return (ret);
if (oldsize > chunksize) {
size_t old_usize JEMALLOC_CC_SILENCE_INIT(0);
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (config_valgrind && in_valgrind) {
old_usize = isalloc(ptr, config_prof);
old_rzsize = config_prof ?
p2rz(ptr) : u2rz(old_usize);
}
ret = huge_ralloc(arena, ptr, oldsize, chunksize, 0,
alignment, zero, try_tcache_dalloc);
JEMALLOC_VALGRIND_REALLOC(true, ret, s2u(chunksize),
true, ptr, old_usize, old_rzsize, true, false);
if (ret != NULL) {
/* Now, it should succeed... */
return arena_ralloc(arena, ret, chunksize, size,
extra, alignment, zero, try_tcache_alloc,
try_tcache_dalloc);
}
}
return NULL;
} else {
return (huge_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc));
}
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra,
size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(&base_pool, ptr, oldsize, size, extra, zero));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
#ifdef _WIN32
#define __builtin_clz(x) msc_clz(x)
#endif
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_H */
| 27,780 | 24.095754 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_unnamespace.h
|
#undef a0calloc
#undef a0free
#undef a0malloc
#undef arena_alloc_junk_small
#undef arena_bin_index
#undef arena_bin_info
#undef arena_boot
#undef arena_chunk_alloc_huge
#undef arena_chunk_dalloc_huge
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_locked
#undef arena_dalloc_small
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_malloc
#undef arena_malloc_large
#undef arena_malloc_small
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbits_unzeroed_set
#undef arena_mapbitsp_get
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_mapelm_to_pageind
#undef arena_mapp_get
#undef arena_maxclass
#undef arena_new
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_ctx_get
#undef arena_prof_ctx_set
#undef arena_prof_promoted
#undef arena_ptr_small_binind_get
#undef arena_purge_all
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_redzone_corruption
#undef arena_run_regind
#undef arena_runs_avail_tree_iter
#undef arena_salloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arenas
#undef pools
#undef arenas_booted
#undef arenas_cleanup
#undef arenas_extend
#undef arenas_initialized
#undef arenas_lock
#undef arenas_tls
#undef arenas_tsd
#undef arenas_tsd_boot
#undef arenas_tsd_cleanup_wrapper
#undef arenas_tsd_get
#undef arenas_tsd_get_wrapper
#undef arenas_tsd_init_head
#undef arenas_tsd_set
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef base_alloc
#undef base_boot
#undef base_calloc
#undef base_free_fn
#undef base_malloc_fn
#undef base_node_alloc
#undef base_node_dalloc
#undef base_pool
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_info_ngroups
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bt_init
#undef buferror
#undef choose_arena
#undef choose_arena_hard
#undef chunk_alloc_arena
#undef chunk_alloc_base
#undef chunk_alloc_default
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_global_boot
#undef chunk_boot
#undef chunk_dalloc_default
#undef chunk_dalloc_mmap
#undef chunk_dss_boot
#undef chunk_dss_postfork_child
#undef chunk_dss_postfork_parent
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_dss_prefork
#undef chunk_in_dss
#undef chunk_npages
#undef chunk_postfork_child
#undef chunk_postfork_parent
#undef chunk_prefork
#undef chunk_unmap
#undef chunk_record
#undef chunks_mtx
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_bucket_search
#undef ckh_count
#undef ckh_delete
#undef ckh_evict_reloc_insert
#undef ckh_insert
#undef ckh_isearch
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_rebuild
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ckh_try_bucket_insert
#undef ckh_try_insert
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef dss_prec_names
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szad_first
#undef extent_tree_szad_insert
#undef extent_tree_szad_iter
#undef extent_tree_szad_iter_recurse
#undef extent_tree_szad_iter_start
#undef extent_tree_szad_last
#undef extent_tree_szad_new
#undef extent_tree_szad_next
#undef extent_tree_szad_nsearch
#undef extent_tree_szad_prev
#undef extent_tree_szad_psearch
#undef extent_tree_szad_remove
#undef extent_tree_szad_reverse_iter
#undef extent_tree_szad_reverse_iter_recurse
#undef extent_tree_szad_reverse_iter_start
#undef extent_tree_szad_search
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_allocated
#undef huge_boot
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_ndalloc
#undef huge_nmalloc
#undef huge_palloc
#undef huge_postfork_child
#undef huge_postfork_parent
#undef huge_prefork
#undef huge_prof_ctx_get
#undef huge_prof_ctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef icalloc
#undef icalloct
#undef idalloc
#undef idalloct
#undef imalloc
#undef imalloct
#undef in_valgrind
#undef ipalloc
#undef ipalloct
#undef iqalloc
#undef iqalloct
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef lg_floor
#undef malloc_cprintf
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_rwlock_init
#undef malloc_rwlock_postfork_child
#undef malloc_rwlock_postfork_parent
#undef malloc_rwlock_prefork
#undef malloc_rwlock_rdlock
#undef malloc_rwlock_wrlock
#undef malloc_rwlock_unlock
#undef malloc_rwlock_destroy
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef mb_write
#undef mutex_boot
#undef narenas_auto
#undef narenas_total
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef npools
#undef npools_cnt
#undef opt_abort
#undef opt_dss
#undef opt_junk
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_purge
#undef pools_shared_data_initialized
#undef pow2_ceil
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_ctx_get
#undef prof_ctx_set
#undef prof_dump_open
#undef prof_free
#undef prof_gdump
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_record_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork
#undef prof_realloc
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tdata_booted
#undef prof_tdata_cleanup
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_initialized
#undef prof_tdata_tls
#undef prof_tdata_tsd
#undef prof_tdata_tsd_boot
#undef prof_tdata_tsd_cleanup_wrapper
#undef prof_tdata_tsd_get
#undef prof_tdata_tsd_get_wrapper
#undef prof_tdata_tsd_init_head
#undef prof_tdata_tsd_set
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_boot
#undef quarantine_booted
#undef quarantine_cleanup
#undef quarantine_init
#undef quarantine_tls
#undef quarantine_tsd
#undef quarantine_tsd_boot
#undef quarantine_tsd_cleanup_wrapper
#undef quarantine_tsd_get
#undef quarantine_tsd_get_wrapper
#undef quarantine_tsd_init_head
#undef quarantine_tsd_set
#undef register_zone
#undef rtree_delete
#undef rtree_get
#undef rtree_get_locked
#undef rtree_new
#undef rtree_postfork_child
#undef rtree_postfork_parent
#undef rtree_prefork
#undef rtree_set
#undef s2u
#undef sa2u
#undef set_errno
#undef small_bin2size
#undef small_bin2size_compute
#undef small_bin2size_lookup
#undef small_bin2size_tab
#undef small_s2u
#undef small_s2u_compute
#undef small_s2u_lookup
#undef small_size2bin
#undef small_size2bin_compute
#undef small_size2bin_lookup
#undef small_size2bin_tab
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_chunks
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_associate
#undef tcache_arena_dissociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot0
#undef tcache_boot1
#undef tcache_booted
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_destroy
#undef tcache_enabled_booted
#undef tcache_enabled_get
#undef tcache_enabled_initialized
#undef tcache_enabled_set
#undef tcache_enabled_tls
#undef tcache_enabled_tsd
#undef tcache_enabled_tsd_boot
#undef tcache_enabled_tsd_cleanup_wrapper
#undef tcache_enabled_tsd_get
#undef tcache_enabled_tsd_get_wrapper
#undef tcache_enabled_tsd_init_head
#undef tcache_enabled_tsd_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_initialized
#undef tcache_maxclass
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcache_thread_cleanup
#undef tcache_tls
#undef tcache_tsd
#undef tcache_tsd_boot
#undef tcache_tsd_cleanup_wrapper
#undef tcache_tsd_get
#undef tcache_tsd_get_wrapper
#undef tcache_tsd_init_head
#undef tcache_tsd_set
#undef thread_allocated_booted
#undef thread_allocated_initialized
#undef thread_allocated_tls
#undef thread_allocated_tsd
#undef thread_allocated_tsd_boot
#undef thread_allocated_tsd_cleanup_wrapper
#undef thread_allocated_tsd_get
#undef thread_allocated_tsd_get_wrapper
#undef thread_allocated_tsd_init_head
#undef thread_allocated_tsd_set
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef pool_new
#undef pool_destroy
#undef pools_lock
#undef pool_base_lock
#undef pool_prefork
#undef pool_postfork_parent
#undef pool_postfork_child
#undef pool_alloc
#undef vec_get
#undef vec_set
#undef vec_delete
| 11,246 | 23.396963 | 44 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/err.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* err.h - error and warning messages
*/
#ifndef ERR_H
#define ERR_H 1
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
/*
* err - windows implementation of unix err function
*/
__declspec(noreturn) static void
err(int eval, const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
vfprintf(stderr, fmt, vl);
va_end(vl);
exit(eval);
}
/*
* warn - windows implementation of unix warn function
*/
static void
warn(const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
fprintf(stderr, "Warning: ");
vfprintf(stderr, fmt, vl);
va_end(vl);
}
#endif /* ERR_H */
| 2,190 | 29.859155 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sched.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sched.h
*/
| 1,620 | 44.027778 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/win_mmap.h
|
/*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,817 | 33.790123 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/platform.h
|
/*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characteres */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,389 | 22.744493 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/libgen.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake libgen.h
*/
| 1,621 | 44.055556 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/endian.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 2,211 | 34.677419 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/features.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake features.h
*/
| 1,623 | 44.111111 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/unistd.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* unistd.h -- compatibility layer for POSIX operating system API
*/
#ifndef UNISTD_H
#define UNISTD_H 1
#include <stdio.h>
#define _SC_PAGESIZE 0
#define _SC_NPROCESSORS_ONLN 1
#define R_OK 04
#define W_OK 02
#define X_OK 00 /* execute permission doesn't exist on Windows */
#define F_OK 00
/*
* sysconf -- get configuration information at run time
*/
static __inline long
sysconf(int p)
{
SYSTEM_INFO si;
int ret = 0;
switch (p) {
case _SC_PAGESIZE:
GetSystemInfo(&si);
return si.dwPageSize;
case _SC_NPROCESSORS_ONLN:
for (int i = 0; i < GetActiveProcessorGroupCount(); i++) {
ret += GetActiveProcessorCount(i);
}
return ret;
default:
return 0;
}
}
#define getpid _getpid
/*
* pread -- read from a file descriptor at given offset
*/
static ssize_t
pread(int fd, void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _read(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
/*
* pwrite -- write to a file descriptor at given offset
*/
static ssize_t
pwrite(int fd, const void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _write(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
#define S_ISBLK(x) 0 /* BLK devices not exist on Windows */
/*
* basename -- parse pathname and return filename component
*/
static char *
basename(char *path)
{
char fname[_MAX_FNAME];
char ext[_MAX_EXT];
_splitpath(path, NULL, NULL, fname, ext);
sprintf(path, "%s%s", fname, ext);
return path;
}
/*
* dirname -- parse pathname and return directory component
*/
static char *
dirname(char *path)
{
if (path == NULL)
return ".";
size_t len = strlen(path);
if (len == 0)
return ".";
char *end = path + len;
/* strip trailing forslashes and backslashes */
while ((--end) > path) {
if (*end != '\\' && *end != '/') {
*(end + 1) = '\0';
break;
}
}
/* strip basename */
while ((--end) > path) {
if (*end == '\\' || *end == '/') {
*end = '\0';
break;
}
}
if (end != path) {
return path;
/* handle edge cases */
} else if (*end == '\\' || *end == '/') {
*(end + 1) = '\0';
} else {
*end++ = '.';
*end = '\0';
}
return path;
}
#endif /* UNISTD_H */
| 3,962 | 22.873494 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/strings.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake strings.h
*/
| 1,627 | 44.222222 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/dirent.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake dirent.h
*/
| 1,626 | 44.194444 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/uio.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/uio.h -- definition of iovec structure
*/
#ifndef SYS_UIO_H
#define SYS_UIO_H 1
#include <pmemcompat.h>
#ifdef __cplusplus
extern "C" {
#endif
ssize_t writev(int fd, const struct iovec *iov, int iovcnt);
#ifdef __cplusplus
}
#endif
#endif /* SYS_UIO_H */
| 1,874 | 34.377358 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/file.h
|
/*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,706 | 45.135135 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/statvfs.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake statvfs.h
*/
| 1,622 | 44.083333 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/param.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 2,127 | 39.150943 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mount.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/mount.h
*/
| 1,629 | 44.277778 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mman.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/mman.h -- memory-mapped files for Windows
*/
#ifndef SYS_MMAN_H
#define SYS_MMAN_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define PROT_NONE 0x0
#define PROT_READ 0x1
#define PROT_WRITE 0x2
#define PROT_EXEC 0x4
#define MAP_SHARED 0x1
#define MAP_PRIVATE 0x2
#define MAP_FIXED 0x10
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_NORESERVE 0x04000
#define MS_ASYNC 1
#define MS_SYNC 4
#define MS_INVALIDATE 2
#define MAP_FAILED ((void *)(-1))
void *mmap(void *addr, size_t len, int prot, int flags,
int fd, os_off_t offset);
int munmap(void *addr, size_t len);
int msync(void *addr, size_t len, int flags);
int mprotect(void *addr, size_t len, int prot);
#ifdef __cplusplus
}
#endif
#endif /* SYS_MMAN_H */
| 2,357 | 30.026316 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/resource.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/resource.h
*/
| 1,627 | 44.222222 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/wait.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/wait.h
*/
| 1,628 | 44.25 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/linux/limits.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/limits.h -- fake header file
*/
/*
* XXX - The only purpose of this empty file is to avoid preprocessor
* errors when including a Linux-specific header file that has no equivalent
* on Windows. With this cheap trick, we don't need a lot of preprocessor
* conditionals in all the source code files.
*
* In the future, this will be addressed in some other way.
*/
| 1,986 | 43.155556 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/coverage.sh
|
#!/bin/sh
set -e
objdir=$1
suffix=$2
shift 2
objs=$@
gcov -b -p -f -o "${objdir}" ${objs}
# Move gcov outputs so that subsequent gcov invocations won't clobber results
# for the same sources with different compilation flags.
for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
mv "${f}" "${f}.${suffix}"
done
| 321 | 17.941176 | 77 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/autogen.sh
|
#!/bin/sh
for i in autoconf; do
echo "$i"
$i
if [ $? -ne 0 ]; then
echo "Error $? in $i"
exit 1
fi
done
echo "./configure --enable-autogen $@"
./configure --enable-autogen $@
if [ $? -ne 0 ]; then
echo "Error $? in ./configure"
exit 1
fi
| 266 | 13.833333 | 38 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/win_autogen.sh
|
#!/bin/sh
# Copyright 2016, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
JEMALLOC_GEN=./../windows/jemalloc_gen
AC_PATH=./../../jemalloc
autoconf
if [ $? -ne 0 ]; then
echo "Error $? in $i"
exit 1
fi
if [ ! -d "$JEMALLOC_GEN" ]; then
echo Creating... $JEMALLOC_GEN
mkdir "$JEMALLOC_GEN"
fi
cd $JEMALLOC_GEN
echo "Run configure..."
$AC_PATH/configure \
--enable-autogen \
CC=cl \
--enable-lazy-lock=no \
--without-export \
--with-jemalloc-prefix=je_vmem_ \
--with-private-namespace=je_vmem_ \
--disable-xmalloc \
--disable-munmap \
EXTRA_CFLAGS="-DJEMALLOC_LIBVMEM"
if [ $? -ne 0 ]; then
echo "Error $? in $AC_PATH/configure"
exit 1
fi
| 2,161 | 32.261538 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk_dss.c
|
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
static void *
chunk_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
return (NULL);
#endif
}
dss_prec_t
chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (have_dss == false)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (have_dss == false)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false);
}
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_unmap(&base_pool, cpad, cpad_size);
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
}
void
chunk_dss_prefork(void)
{
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
| 4,272 | 20.365 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/util.c
|
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define JEMALLOC_UTIL_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
static void
wrtmessage(void *cbopaque, const char *s)
{
#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*/
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write(const char *s)
{
if (je_malloc_message != NULL)
je_malloc_message(NULL, s);
else
wrtmessage(NULL, s);
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
return (strerror_r(err, buf, buflen));
#endif
}
uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
unsigned b;
bool neg;
const char *p, *ns;
p = nptr;
if (base < 0 || base == 1 || base > 36) {
ns = p;
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
/* Fall through. */
case '+':
p++;
/* Fall through. */
default:
goto label_prefix;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0)
b = 8;
if (b == 8)
p++;
break;
case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0)
b = 16;
if (b == 16)
p += 2;
break;
default:
break;
}
break;
default:
p++;
ret = 0;
goto label_return;
}
}
if (b == 0)
b = 10;
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
ret = UINTMAX_MAX;
goto label_return;
}
p++;
}
if (neg)
ret = -ret;
if (p == ns) {
/* No conversion performed. */
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else
*endptr = (char *)p;
}
return (ret);
}
static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
{
unsigned i;
i = U2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
s[i] = digits[x & 0xf];
x >>= 4;
} while (x > 0);
break;
} default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
assert(base >= 2 && base <= 36);
do {
i--;
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return (&s[i]);
}
static char *
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
{
bool neg;
if ((neg = (x < 0)))
x = -x;
s = u2s(x, 10, false, s, slen_p);
if (neg)
sign = '-';
switch (sign) {
case '-':
if (neg == false)
break;
/* Fall through. */
case ' ':
case '+':
s--;
(*slen_p)++;
*s = sign;
break;
default: not_reached();
}
return (s);
}
static char *
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
{
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
return (s);
}
static char *
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
{
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return (s);
}
int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) \
str[i] = (c); \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = ((slen) <= size - i) ? (slen) : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += (slen); \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = ((width) == -1) ? 0 : (((slen) < (size_t)(width)) ? \
(size_t)(width) - (slen) : 0); \
if ((left_justify) == false && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if ((left_justify) && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '%': {
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
f++;
/* Flags. */
while (true) {
switch (*f) {
case '#':
assert(alt_form == false);
alt_form = true;
break;
case '-':
assert(left_justify == false);
left_justify = true;
break;
case ' ':
assert(plus_space == false);
plus_space = true;
break;
case '+':
assert(plus_plus == false);
plus_plus = true;
break;
default: goto label_width;
}
f++;
}
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
f++;
if (width < 0) {
left_justify = true;
width = -width;
}
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
break;
} default:
break;
}
/* Width/precision separator. */
if (*f == '.')
f++;
else
goto label_length;
/* Precision. */
switch (*f) {
case '*':
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
prec = (int)uprec;
break;
}
default: break;
}
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
if (*f == 'l') {
len = 'q';
f++;
} else
len = 'l';
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
f++;
break;
default: break;
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case '%':
/* %% */
APPEND_C(*f);
f++;
break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
unsigned char val;
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
val = va_arg(ap, int);
buf[0] = val;
buf[1] = '\0';
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
if (s) {
slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
} else {
APPEND_S("(null)", 6);
}
f++;
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} default: not_reached();
}
break;
} default: {
APPEND_C(*f);
f++;
break;
}}
}
label_out:
if (i < size)
str[i] = '\0';
else
str[size - 1] = '\0';
ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (ret);
}
JEMALLOC_ATTR(format(printf, 3, 4))
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
}
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
cbopaque = NULL;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
| 14,080 | 20.49771 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/ctl.c
|
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/pool.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
*/
static malloc_mutex_t ctl_mtx; /* XXX separate mutex for each pool? */
static uint64_t ctl_epoch;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
static inline const ctl_named_node_t *
ctl_named_node(const ctl_node_t *node)
{
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
static inline const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL);
}
static inline const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t *node)
{
return ((node->named == false) ? (const ctl_indexed_node_t *)node :
NULL);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(const size_t *mib, \
size_t miblen, size_t i);
static bool ctl_arena_init(pool_t *pool, ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i);
static bool ctl_grow(pool_t *pool);
static void ctl_refresh_pool(pool_t *pool);
static void ctl_refresh(void);
static bool ctl_init_pool(pool_t *pool);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
INDEX_PROTO(thread_pool_i)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_arena)
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_quarantine)
CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(arena_i_purge)
static void arena_purge(pool_t *pool, unsigned arena_ind);
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_chunk_alloc)
CTL_PROTO(arena_i_chunk_dalloc)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_extend)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
CTL_PROTO(stats_chunks_current)
CTL_PROTO(stats_chunks_total)
CTL_PROTO(stats_chunks_high)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests)
CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_huge_allocated)
CTL_PROTO(stats_arenas_i_huge_nmalloc)
CTL_PROTO(stats_arenas_i_huge_ndalloc)
CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
INDEX_PROTO(pool_i)
CTL_PROTO(pools_npools)
CTL_PROTO(pool_i_base)
CTL_PROTO(pool_i_size)
/******************************************************************************/
/* mallctl tree. */
/* Maximum tree depth. */
#define CTL_MAX_DEPTH 8
#define NAME(n) {true}, n
#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \
NULL
#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
#define INDEX(i) {false}, i##_index
static const ctl_named_node_t tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
static const ctl_named_node_t thread_pool_i_node[] = {
{NAME("arena"), CTL(thread_arena)},
};
static const ctl_named_node_t super_thread_pool_i_node[] = {
{NAME(""), CHILD(named, thread_pool_i)}
};
static const ctl_indexed_node_t thread_pool_node[] = {
{INDEX(thread_pool_i)}
};
static const ctl_named_node_t thread_node[] = {
{NAME("pool"), CHILD(indexed, thread_pool)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, tcache)}
};
static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
{NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
{NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("quarantine"), CTL(opt_quarantine)},
{NAME("redzone"), CTL(opt_redzone)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
static const ctl_named_node_t chunk_node[] = {
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)}
};
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("chunk"), CHILD(named, chunk)},
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
{INDEX(arena_i)}
};
static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(named, arenas_bin_i)}
};
static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
static const ctl_named_node_t arenas_lrun_i_node[] = {
{NAME("size"), CTL(arenas_lrun_i_size)}
};
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
{NAME(""), CHILD(named, arenas_lrun_i)}
};
static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
{NAME("nbins"), CTL(arenas_nbins)},
{NAME("nhbins"), CTL(arenas_nhbins)},
{NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
{NAME("extend"), CTL(arenas_extend)}
};
static const ctl_named_node_t prof_node[] = {
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("interval"), CTL(prof_interval)}
};
static const ctl_named_node_t stats_chunks_node[] = {
{NAME("current"), CTL(stats_chunks_current)},
{NAME("total"), CTL(stats_chunks_total)},
{NAME("high"), CTL(stats_chunks_high)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)},
};
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
};
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
};
static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
static const ctl_named_node_t pool_stats_node[] = {
{NAME("chunks"), CHILD(named, stats_chunks)},
{NAME("arenas"), CHILD(indexed, stats_arenas)},
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)}
};
static const ctl_named_node_t pools_node[] = {
{NAME("npools"), CTL(pools_npools)},
};
static const ctl_named_node_t pool_i_node[] = {
{NAME("mem_base"), CTL(pool_i_base)},
{NAME("mem_size"), CTL(pool_i_size)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("stats"), CHILD(named, pool_stats)}
};
static const ctl_named_node_t super_pool_i_node[] = {
{NAME(""), CHILD(named, pool_i)}
};
static const ctl_indexed_node_t pool_node[] = {
{INDEX(pool_i)}
};
static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
{NAME("pool"), CHILD(indexed, pool)},
{NAME("pools"), CHILD(named, pools)},
{NAME("prof"), CHILD(named, prof)}
};
static const ctl_named_node_t super_root_node[] = {
{NAME(""), CHILD(named, root)}
};
#undef NAME
#undef CHILD
#undef CTL
#undef INDEX
/******************************************************************************/
static bool
ctl_arena_init(pool_t *pool, ctl_arena_stats_t *astats)
{
if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)base_alloc(pool, nlclasses *
sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
return (false);
}
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->dss = dss_prec_names[dss_prec_limit];
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
memset(&astats->astats, 0, sizeof(arena_stats_t));
astats->allocated_small = 0;
astats->nmalloc_small = 0;
astats->ndalloc_small = 0;
astats->nrequests_small = 0;
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
}
}
static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
&cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated;
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
}
static void
ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned i;
sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty;
sstats->astats.mapped += astats->astats.mapped;
sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large += astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
}
static void
ctl_arena_refresh(arena_t *arena, unsigned i)
{
pool_t *pool = arena->pool;
ctl_arena_stats_t *astats = &pool->ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &pool->ctl_stats.arenas[pool->ctl_stats.narenas];
ctl_arena_clear(astats);
sstats->nthreads += astats->nthreads;
if (config_stats) {
ctl_arena_stats_amerge(astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
} else {
astats->pactive += arena->nactive;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
}
}
static bool
ctl_grow(pool_t *pool)
{
ctl_arena_stats_t *astats;
arena_t **tarenas;
/* Allocate extended arena stats and arenas arrays. */
astats = (ctl_arena_stats_t *)imalloc((pool->ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
tarenas = (arena_t **)imalloc((pool->ctl_stats.narenas + 1) *
sizeof(arena_t *));
if (tarenas == NULL) {
idalloc(astats);
return (true);
}
/* Initialize the new astats element. */
memcpy(astats, pool->ctl_stats.arenas, (pool->ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[pool->ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(pool, &astats[pool->ctl_stats.narenas + 1])) {
idalloc(tarenas);
idalloc(astats);
return (true);
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
memcpy(&tstats, &astats[pool->ctl_stats.narenas],
sizeof(ctl_arena_stats_t));
memcpy(&astats[pool->ctl_stats.narenas],
&astats[pool->ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
memcpy(&astats[pool->ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
/* Initialize the new arenas element. */
tarenas[pool->ctl_stats.narenas] = NULL;
{
arena_t **arenas_old = pool->arenas;
/*
* Swap extended arenas array into place. Although ctl_mtx
* protects this function from other threads extending the
* array, it does not protect from other threads mutating it
* (i.e. initializing arenas and setting array elements to
* point to them). Therefore, array copying must happen under
* the protection of arenas_lock.
*/
malloc_rwlock_wrlock(&pool->arenas_lock);
pool->arenas = tarenas;
memcpy(pool->arenas, arenas_old, pool->ctl_stats.narenas *
sizeof(arena_t *));
pool->narenas_total++;
arenas_extend(pool, pool->narenas_total - 1);
malloc_rwlock_unlock(&pool->arenas_lock);
/*
* Deallocate arenas_old only if it came from imalloc() (not
* base_alloc()).
*/
if (pool->ctl_stats.narenas != pool->narenas_auto)
idalloc(arenas_old);
}
pool->ctl_stats.arenas = astats;
pool->ctl_stats.narenas++;
return (false);
}
static void
ctl_refresh_pool(pool_t *pool)
{
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, pool->ctl_stats.narenas);
if (config_stats) {
malloc_mutex_lock(&pool->chunks_mtx);
pool->ctl_stats.chunks.current = pool->stats_chunks.curchunks;
pool->ctl_stats.chunks.total = pool->stats_chunks.nchunks;
pool->ctl_stats.chunks.high = pool->stats_chunks.highchunks;
malloc_mutex_unlock(&pool->chunks_mtx);
}
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
pool->ctl_stats.arenas[pool->ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&pool->ctl_stats.arenas[pool->ctl_stats.narenas]);
malloc_rwlock_wrlock(&pool->arenas_lock);
memcpy(tarenas, pool->arenas, sizeof(arena_t *) * pool->ctl_stats.narenas);
for (i = 0; i < pool->ctl_stats.narenas; i++) {
if (pool->arenas[i] != NULL)
pool->ctl_stats.arenas[i].nthreads = pool->arenas[i]->nthreads;
else
pool->ctl_stats.arenas[i].nthreads = 0;
}
malloc_rwlock_unlock(&pool->arenas_lock);
for (i = 0; i < pool->ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
pool->ctl_stats.arenas[i].initialized = initialized;
if (initialized)
ctl_arena_refresh(tarenas[i], i);
}
if (config_stats) {
pool->ctl_stats_allocated =
pool->ctl_stats.arenas[pool->ctl_stats.narenas].allocated_small
+ pool->ctl_stats.arenas[pool->ctl_stats.narenas].astats.allocated_large
+ pool->ctl_stats.arenas[pool->ctl_stats.narenas].astats.allocated_huge;
pool->ctl_stats_active =
(pool->ctl_stats.arenas[pool->ctl_stats.narenas].pactive << LG_PAGE);
pool->ctl_stats_mapped = (pool->ctl_stats.chunks.current << opt_lg_chunk);
}
ctl_epoch++;
}
static void
ctl_refresh(void)
{
for (size_t i = 0; i < npools; ++i) {
if (pools[i] != NULL) {
ctl_refresh_pool(pools[i]);
}
}
}
static bool
ctl_init_pool(pool_t *pool)
{
bool ret;
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
assert(pool->narenas_auto == narenas_total_get(pool));
pool->ctl_stats.narenas = pool->narenas_auto;
pool->ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(pool,
(pool->ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (pool->ctl_stats.arenas == NULL) {
ret = true;
goto label_return;
}
memset(pool->ctl_stats.arenas, 0, (pool->ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
* Initialize all stats structures, regardless of whether they
* ever get used. Lazy initialization would allow errors to
* cause inconsistent state to be viewable by the application.
*/
if (config_stats) {
unsigned i;
for (i = 0; i <= pool->ctl_stats.narenas; i++) {
if (ctl_arena_init(pool, &pool->ctl_stats.arenas[i])) {
ret = true;
goto label_return;
}
}
}
pool->ctl_stats.arenas[pool->ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh_pool(pool);
pool->ctl_initialized = true;
ret = false;
label_return:
return (ret);
}
static bool
ctl_init(void)
{
bool ret;
malloc_mutex_lock(&ctl_mtx);
for (size_t i = 0; i < npools; ++i) {
if (pools[i] != NULL && pools[i]->ctl_initialized == false) {
if (ctl_init_pool(pools[i])) {
ret = true;
goto label_return;
}
}
}
/* false means that functions ends with success */
ret = false;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
const ctl_named_node_t *node;
elm = name;
/* Equivalent to strchrnul(). */
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
if (elen == 0) {
ret = ENOENT;
goto label_return;
}
node = super_root_node;
for (i = 0; i < *depthp; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
const ctl_named_node_t *pnode = node;
/* Children are named. */
for (j = 0; j < node->nchildren; j++) {
const ctl_named_node_t *child =
ctl_named_children(node, j);
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
if (nodesp != NULL)
nodesp[i] =
(const ctl_node_t *)node;
mibp[i] = j;
break;
}
}
if (node == pnode) {
ret = ENOENT;
goto label_return;
}
} else {
uintmax_t index;
const ctl_indexed_node_t *inode;
/* Children are indexed. */
index = malloc_strtoumax(elm, NULL, 10);
if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
ret = ENOENT;
goto label_return;
}
inode = ctl_indexed_node(node->children);
node = inode->index(mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
if (nodesp != NULL)
nodesp[i] = (const ctl_node_t *)node;
mibp[i] = (size_t)index;
}
if (node->ctl != NULL) {
/* Terminal node. */
if (*dot != '\0') {
/*
* The name contains more elements than are
* in this path through the tree.
*/
ret = ENOENT;
goto label_return;
}
/* Complete lookup successful. */
*depthp = i + 1;
break;
}
/* Update elm. */
if (*dot == '\0') {
/* No more elements. */
ret = ENOENT;
goto label_return;
}
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
ret = 0;
label_return:
return (ret);
}
int
ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
if (ctl_init()) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl)
ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
label_return:
return(ret);
}
int
ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
if (ctl_init()) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookup(name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
const ctl_named_node_t *node;
size_t i;
if (ctl_init()) {
ret = EAGAIN;
goto label_return;
}
/* Iterate down the tree. */
node = super_root_node;
for (i = 0; i < miblen; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
if (node->nchildren <= mib[i]) {
ret = ENOENT;
goto label_return;
}
node = ctl_named_children(node, mib[i]);
} else {
const ctl_indexed_node_t *inode;
/* Indexed element. */
inode = ctl_indexed_node(node->children);
node = inode->index(mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
}
}
/* Call the ctl function. */
if (node && node->ctl)
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
else {
/* Partial MIB. */
ret = ENOENT;
}
label_return:
return(ret);
}
bool
ctl_boot(void)
{
if (malloc_mutex_init(&ctl_mtx))
return (true);
return (false);
}
void
ctl_prefork(void)
{
malloc_mutex_prefork(&ctl_mtx);
}
void
ctl_postfork_parent(void)
{
malloc_mutex_postfork_parent(&ctl_mtx);
}
void
ctl_postfork_child(void)
{
malloc_mutex_postfork_child(&ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */
#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
} else \
*(t *)oldp = (v); \
} \
} while (0)
#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
goto label_return; \
} \
(v) = *(t *)newp; \
} \
} while (0)
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c) == false) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
if (l) \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c) == false) \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c) == false) \
return (ENOENT); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return (ret); \
}
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return (ret); \
}
#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
bool oldval; \
\
READONLY(); \
oldval = n; \
READ(oldval, bool); \
\
ret = 0; \
label_return: \
return (ret); \
}
/******************************************************************************/
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
ctl_refresh();
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned newind, oldind;
size_t pool_ind = mib[1];
pool_t *pool;
arena_t dummy;
if (pool_ind >= npools)
return (ENOENT);
pool = pools[pool_ind];
DUMMY_ARENA_INITIALIZE(dummy, pool);
tsd_tcache_t *tcache_tsd = tcache_tsd_get();
if (tcache_tsd->npools <= pool_ind) {
assert(pool_ind < POOLS_MAX);
size_t npools = 1ULL << (32 - __builtin_clz(pool_ind + 1));
if (npools < POOLS_MIN)
npools = POOLS_MIN;
unsigned *tseqno = base_malloc_fn(npools * sizeof (unsigned));
if (tseqno == NULL)
return (ENOMEM);
if (tcache_tsd->seqno != NULL)
memcpy(tseqno, tcache_tsd->seqno, tcache_tsd->npools * sizeof (unsigned));
memset(&tseqno[tcache_tsd->npools], 0, (npools - tcache_tsd->npools) * sizeof (unsigned));
tcache_t **tcaches = base_malloc_fn(npools * sizeof (tcache_t *));
if (tcaches == NULL) {
base_free_fn(tseqno);
return (ENOMEM);
}
if (tcache_tsd->tcaches != NULL)
memcpy(tcaches, tcache_tsd->tcaches, tcache_tsd->npools * sizeof (tcache_t *));
memset(&tcaches[tcache_tsd->npools], 0, (npools - tcache_tsd->npools) * sizeof (tcache_t *));
base_free_fn(tcache_tsd->seqno);
tcache_tsd->seqno = tseqno;
base_free_fn(tcache_tsd->tcaches);
tcache_tsd->tcaches = tcaches;
tcache_tsd->npools = npools;
}
malloc_mutex_lock(&ctl_mtx);
arena_t *arena = choose_arena(&dummy);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
newind = oldind = arena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
arena_t *arena;
tsd_pool_t *tsd;
if (newind >= pool->ctl_stats.narenas) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
}
/* Initialize arena if necessary. */
malloc_rwlock_wrlock(&pool->arenas_lock);
if ((arena = pool->arenas[newind]) == NULL && (arena =
arenas_extend(pool, newind)) == NULL) {
malloc_rwlock_unlock(&pool->arenas_lock);
ret = EAGAIN;
goto label_return;
}
assert(arena == pool->arenas[newind]);
pool->arenas[oldind]->nthreads--;
pool->arenas[newind]->nthreads++;
malloc_rwlock_unlock(&pool->arenas_lock);
/* Set new arena association. */
if (config_tcache) {
tcache_t *tcache = tcache_tsd->tcaches[pool->pool_id];
if ((uintptr_t)(tcache) > (uintptr_t)TCACHE_STATE_MAX) {
if(tcache_tsd->seqno[pool->pool_id] == pool->seqno)
tcache_arena_dissociate(tcache);
tcache_arena_associate(tcache, arena);
tcache_tsd->seqno[pool->pool_id] = pool->seqno;
}
}
tsd = arenas_tsd_get();
tsd->seqno[0] = pool->seqno;
tsd->arenas[0] = arena;
}
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_NL_CGEN(config_stats, thread_allocated,
thread_allocated_tsd_get()->allocated, uint64_t)
CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
&thread_allocated_tsd_get()->allocated, uint64_t *)
CTL_RO_NL_CGEN(config_stats, thread_deallocated,
thread_allocated_tsd_get()->deallocated, uint64_t)
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
&thread_allocated_tsd_get()->deallocated, uint64_t *)
static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
if (config_tcache == false)
return (ENOENT);
oldval = tcache_enabled_get();
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
tcache_enabled_set(*(bool *)newp);
}
READ(oldval, bool);
ret = 0;
label_return:
return (ret);
}
static int
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (config_tcache == false)
return (ENOENT);
READONLY();
WRITEONLY();
tcache_flush(pools[0]);
ret = 0;
label_return:
return (ret);
}
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void
arena_purge(pool_t *pool, unsigned arena_ind)
{
VARIABLE_ARRAY(arena_t *, tarenas, pool->ctl_stats.narenas);
malloc_rwlock_wrlock(&pool->arenas_lock);
memcpy(tarenas, pool->arenas, sizeof(arena_t *) * pool->ctl_stats.narenas);
malloc_rwlock_unlock(&pool->arenas_lock);
if (arena_ind == pool->ctl_stats.narenas) {
unsigned i;
for (i = 0; i < pool->ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
}
} else {
assert(arena_ind < pool->ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]);
}
}
static int
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
if (mib[1] >= npools)
return (ENOENT);
READONLY();
WRITEONLY();
malloc_mutex_lock(&ctl_mtx);
arena_purge(pools[mib[1]], mib[3]);
malloc_mutex_unlock(&ctl_mtx);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret, i;
bool match, err;
const char *dss = "";
size_t pool_ind = mib[1];
size_t arena_ind = mib[3];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
pool_t *pool;
if (pool_ind >= npools)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx);
pool = pools[pool_ind];
WRITE(dss, const char *);
match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strcmp(dss_prec_names[i], dss) == 0) {
dss_prec = i;
match = true;
break;
}
}
if (match == false) {
ret = EINVAL;
goto label_return;
}
if (arena_ind < pool->ctl_stats.narenas) {
arena_t *arena = pool->arenas[arena_ind];
if (arena != NULL) {
dss_prec_old = arena_dss_prec_get(arena);
err = arena_dss_prec_set(arena, dss_prec);
} else
err = true;
} else {
dss_prec_old = chunk_dss_prec_get();
err = chunk_dss_prec_set(dss_prec);
}
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
if (err) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
size_t pool_ind = mib[1];
size_t arena_ind = mib[3];
arena_t *arena;
pool_t *pool;
if (pool_ind >= npools)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx);
pool = pools[pool_ind];
if (arena_ind < pool->narenas_total && (arena = pool->arenas[arena_ind]) != NULL) {
malloc_mutex_lock(&arena->lock);
READ(arena->chunk_alloc, chunk_alloc_t *);
WRITE(arena->chunk_alloc, chunk_alloc_t *);
/*
* There could be direct jump to label_return from inside
* of READ/WRITE macros. This is why unlocking the arena mutex
* must be moved there.
*/
} else {
ret = EFAULT;
goto label_outer_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&arena->lock);
label_outer_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
size_t pool_ind = mib[1];
size_t arena_ind = mib[3];
arena_t *arena;
pool_t *pool;
if (pool_ind >= npools)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx);
pool = pools[pool_ind];
if (arena_ind < pool->narenas_total && (arena = pool->arenas[arena_ind]) != NULL) {
malloc_mutex_lock(&arena->lock);
READ(arena->chunk_dalloc, chunk_dalloc_t *);
WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
/*
* There could be direct jump to label_return from inside
* of READ/WRITE macros. This is why unlocking the arena mutex
* must be moved there.
*/
} else {
ret = EFAULT;
goto label_outer_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&arena->lock);
label_outer_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
if (i > pools[mib[1]]->ctl_stats.narenas) {
ret = NULL;
goto label_return;
}
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
narenas = pools[mib[1]]->ctl_stats.narenas;
READ(narenas, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
pool_t *pool;
malloc_mutex_lock(&ctl_mtx);
READONLY();
pool = pools[mib[1]];
if (*oldlenp != pool->ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < pool->ctl_stats.narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : pool->ctl_stats.narenas;
} else {
ret = 0;
nread = pool->ctl_stats.narenas;
}
for (i = 0; i < nread; i++)
((bool *)oldp)[i] = pool->ctl_stats.arenas[i].initialized;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[4]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[4]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[4]].run_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
return (NULL);
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[4]+1) << LG_PAGE), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
return (NULL);
return (super_arenas_lrun_i_node);
}
static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned narenas;
unsigned pool_ind = mib[1];
pool_t *pool;
if (pool_ind >= npools)
return (ENOENT);
pool = pools[pool_ind];
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (ctl_grow(pool)) {
ret = EAGAIN;
goto label_return;
}
narenas = pool->ctl_stats.narenas - 1;
READ(narenas, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/**
* @stub
*/
static int
pools_npools_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned _npools;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
_npools = npools_cnt;
READ(_npools, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/**
* @stub
*/
static int
pool_i_base_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
malloc_mutex_lock(&ctl_mtx);
//TODO
malloc_mutex_unlock(&ctl_mtx);
ret = 0;
label_return:
return (ret);
}
/**
* @stub
*/
static int
pool_i_size_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
malloc_mutex_lock(&ctl_mtx);
//TODO
malloc_mutex_unlock(&ctl_mtx);
ret = 0;
label_return:
return (ret);
}
/**
* @stub
*/
static const ctl_named_node_t *
pool_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
if (i > npools) {
ret = NULL;
goto label_return;
}
ret = super_pool_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
bool oldval;
if (config_prof == false)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
/*
* The memory barriers will tend to make opt_prof_active
* propagate faster on systems with weak memory ordering.
*/
mb_write();
WRITE(opt_prof_active, bool);
mb_write();
}
READ(oldval, bool);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
if (config_prof == false)
return (ENOENT);
WRITEONLY();
WRITE(filename, const char *);
if (prof_mdump(filename)) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
return (ret);
}
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
/*
* @TODO remember to split up stats to arena-related and th rest
*/
CTL_RO_CGEN(config_stats, stats_cactive, &(pools[mib[1]]->stats_cactive), size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, pools[mib[1]]->ctl_stats_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, pools[mib[1]]->ctl_stats_active, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, pools[mib[1]]->ctl_stats_mapped, size_t)
CTL_RO_CGEN(config_stats, stats_chunks_current, pools[mib[1]]->ctl_stats.chunks.current,
size_t)
CTL_RO_CGEN(config_stats, stats_chunks_total, pools[mib[1]]->ctl_stats.chunks.total, uint64_t)
CTL_RO_CGEN(config_stats, stats_chunks_high, pools[mib[1]]->ctl_stats.chunks.high, size_t)
CTL_RO_GEN(stats_arenas_i_dss, pools[mib[1]]->ctl_stats.arenas[mib[4]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_nthreads, pools[mib[1]]->ctl_stats.arenas[mib[4]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, pools[mib[1]]->ctl_stats.arenas[mib[4]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, pools[mib[1]]->ctl_stats.arenas[mib[4]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.purged, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
pools[mib[1]]->ctl_stats.arenas[mib[4]].allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].nmalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
pools[mib[1]]->ctl_stats.arenas[mib[4]].nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.nrequests_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.allocated_huge, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.nmalloc_huge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.ndalloc_huge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
pools[mib[1]]->ctl_stats.arenas[mib[4]].astats.nrequests_huge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].nruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].reruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
pools[mib[1]]->ctl_stats.arenas[mib[4]].bstats[mib[6]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > NBINS)
return (NULL);
return (super_stats_arenas_i_bins_j_node);
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].lstats[mib[6]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
pools[mib[1]]->ctl_stats.arenas[mib[4]].lstats[mib[6]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
pools[mib[1]]->ctl_stats.arenas[mib[4]].lstats[mib[6]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
pools[mib[1]]->ctl_stats.arenas[mib[4]].lstats[mib[6]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nlclasses)
return (NULL);
return (super_stats_arenas_i_lruns_j_node);
}
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t *ret;
malloc_mutex_lock(&ctl_mtx);
if (i > pools[mib[1]]->ctl_stats.narenas ||
pools[mib[1]]->ctl_stats.arenas[i].initialized == false) {
ret = NULL;
goto label_return;
}
ret = super_stats_arenas_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
thread_pool_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t *ret;
malloc_mutex_lock(&ctl_mtx);
if (i > npools) {
ret = NULL;
goto label_return;
}
ret = super_thread_pool_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
| 55,070 | 26.869939 | 95 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/stats.c
|
#define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET_ARRAY(n, v, t, c) do { \
size_t mib[8]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t) * (c); \
xmallctlnametomib(n, mib, &miblen); \
mib[1] = p; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET(n, v, t) CTL_P_GET_ARRAY(n, v, t, 1)
#define CTL_PI_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PIJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
mib[6] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
bool opt_stats_print = false;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i, bool bins, bool large);
/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_P_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
" newruns reruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns curruns\n");
}
CTL_P_GET("pool.0.arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
uint64_t nruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = j;
} else {
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
size_t curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u..%u]\n", gap_start,
j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
CTL_PJ_GET("pool.%u.arenas.bin.0.size", ®_size, size_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.nregs", &nregs, uint32_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.run_size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.allocated",
&allocated, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nmalloc",
&nmalloc, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.ndalloc",
&ndalloc, uint64_t);
if (config_tcache) {
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nfills",
&nfills, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nflushes",
&nflushes, uint64_t);
}
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nrequests,
nfills, nflushes, nruns, reruns, curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nruns, reruns,
curruns);
}
}
}
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
}
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page, nlruns, j;
ssize_t gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
" curruns\n");
CTL_P_GET("pool.0.arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.ndalloc", &ndalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0) {
if (gap_start == -1)
gap_start = j;
} else {
CTL_PJ_GET("pool.%u.arenas.lrun.0.size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
j - gap_start);
gap_start = -1;
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
run_size, run_size / page, nmalloc, ndalloc,
nrequests, curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i, bool bins, bool large)
{
unsigned nthreads;
const char *dss;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
CTL_PI_GET("pool.%u.stats.arenas.0.dss", &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_PI_GET("pool.%u.stats.arenas.0.pactive", &pactive, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.pdirty", &pdirty, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.npurge", &npurge, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nmadvise", &nmadvise, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.purged", &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc nrequests\n");
CTL_PI_GET("pool.%u.stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.allocated", &huge_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_PI_GET("pool.%u.stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, p, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, p, i);
}
void
stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
unsigned p = pool->pool_id;
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
}
}
}
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_P_GET("pool.0.arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
CTL_P_GET("pool.0.arenas.quantum", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_P_GET("pool.0.arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_P_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
== 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"Average profile sample interval: %"PRIu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"PRIu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
(ZU(1) << sv), sv);
}
if (config_stats) {
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
CTL_P_GET("pool.0.stats.cactive", &cactive, size_t *);
CTL_P_GET("pool.0.stats.allocated", &allocated, size_t);
CTL_P_GET("pool.0.stats.active", &active, size_t);
CTL_P_GET("pool.0.stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n",
allocated, active, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_P_GET("pool.0.stats.chunks.total", &chunks_total, uint64_t);
CTL_P_GET("pool.0.stats.chunks.high", &chunks_high, size_t);
CTL_P_GET("pool.0.stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
if (merged) {
unsigned narenas;
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i, ninitialized;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
p, narenas, bins, large);
}
}
}
if (unmerged) {
unsigned narenas;
/* Print stats for each arena. */
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = 0; i < narenas; i++) {
if (initialized[i]) {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, p, i, bins, large);
}
}
}
}
}
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
| 18,137 | 31.216696 | 82 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/vector.c
|
#define JEMALLOC_VECTOR_C_
#include "jemalloc/internal/jemalloc_internal.h"
/* Round up the value to the closest power of two. */
static inline unsigned
ceil_p2(unsigned n)
{
return 1 << (32 - __builtin_clz(n));
}
/* Calculate how big should be the vector list array. */
static inline unsigned
get_vec_part_len(unsigned n)
{
return MAX(ceil_p2(n), VECTOR_MIN_PART_SIZE);
}
/*
* Find the vector list element in which the index should be stored,
* if no such list exist return a pointer to a place in memory where it should
* be allocated.
*/
static vec_list_t **
find_vec_list(vector_t *vector, int *index)
{
vec_list_t **vec_list;
for (vec_list = &vector->list;
*vec_list != NULL; vec_list = &(*vec_list)->next) {
if (*index < (*vec_list)->length)
break;
*index -= (*vec_list)->length;
}
return vec_list;
}
/* Return a value from vector at index. */
void *
vec_get(vector_t *vector, int index)
{
vec_list_t *vec_list = *find_vec_list(vector, &index);
return (vec_list == NULL) ? NULL : vec_list->data[index];
}
/* Set a value to vector at index. */
void
vec_set(vector_t *vector, int index, void *val)
{
vec_list_t **vec_list = find_vec_list(vector, &index);
/*
* There's no array to put the value in,
* which means a new one has to be allocated.
*/
if (*vec_list == NULL) {
int vec_part_len = get_vec_part_len(index);
*vec_list = base_malloc_fn(sizeof(vec_list_t) +
sizeof(void *) * vec_part_len);
if (*vec_list == NULL)
return;
(*vec_list)->next = NULL;
(*vec_list)->length = vec_part_len;
}
(*vec_list)->data[index] = val;
}
/* Free all the memory in the container. */
void
vec_delete(vector_t *vector)
{
vec_list_t *vec_list_next, *vec_list = vector->list;
while (vec_list != NULL) {
vec_list_next = vec_list->next;
base_free_fn(vec_list);
vec_list = vec_list_next;
}
}
| 1,845 | 21.512195 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/zone.c
|
#include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/*
* The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void *zone_destroy(malloc_zone_t *zone);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, void *ptr)
{
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr, config_prof));
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size)
{
return (je_malloc(size));
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
{
return (je_calloc(num, size));
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
return (ret);
}
static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
return (ret);
}
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
free(ptr);
}
#endif
static void *
zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
not_reached();
return (NULL);
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{
if (size == 0)
size = 1;
return (s2u(size));
}
static void
zone_force_lock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_prefork();
}
static void
zone_force_unlock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_postfork_parent();
}
JEMALLOC_ATTR(constructor)
void
register_zone(void)
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
malloc_zone_t *default_zone = malloc_default_zone();
malloc_zone_t *purgeable_zone = NULL;
if (!default_zone->zone_name ||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
}
zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
zone.pressure_relief = NULL;
#endif
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
zone_introspect.check = NULL;
zone_introspect.print = NULL;
zone_introspect.log = NULL;
zone_introspect.force_lock = (void *)zone_force_lock;
zone_introspect.force_unlock = (void *)zone_force_unlock;
zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
zone_introspect.enable_discharge_checking = NULL;
zone_introspect.disable_discharge_checking = NULL;
zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
zone_introspect.enumerate_discharged_pointers = NULL;
#else
zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
#endif
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
if (malloc_default_purgeable_zone != NULL)
purgeable_zone = malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
do {
default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
* at the location of the specified zone. Unregistering the
* default zone thus makes the last registered one the default.
* On OSX < 10.6, unregistering shifts all registered zones.
* The first registered zone then becomes the default.
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
* owns the default zone allocated pointers. We thus unregister/
* re-register it in order to ensure it's always after the
* default zone. On OSX < 10.6, there is no purgeable zone, so
* this does nothing. On OSX >= 10.6, unregistering replaces the
* purgeable zone with the last registered zone above, i.e the
* default zone. Registering it again then puts it at the end,
* obviously after the default zone.
*/
if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
} while (malloc_default_zone() != &zone);
}
| 7,677 | 26.92 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/atomic.c
|
#define JEMALLOC_ATOMIC_C_
#include "jemalloc/internal/jemalloc_internal.h"
| 76 | 24.666667 | 48 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/bitmap.c
|
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t bits2groups(size_t nbits);
/******************************************************************************/
static size_t
bits2groups(size_t nbits)
{
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
!!(nbits & BITMAP_GROUP_NBITS_MASK));
}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = bits2groups(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = bits2groups(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
binfo->nlevels = i;
binfo->nbits = nbits;
}
size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
| 2,516 | 26.659341 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/ckh.c
|
/*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(ckh_t *ckh);
static void ckh_shrink(ckh_t *ckh);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key))
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
}
return (SIZE_T_MAX);
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
ckh->hash(key, hashes);
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
}
}
return (true);
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
data = *argdata;
while (true) {
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
#endif
/* Find the alternate bucket for the evicted item. */
ckh->hash(key, hashes);
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
}
bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
}
}
JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
ckh->count = 0;
for (i = nins = 0; nins < count; i++) {
if (aTab[i].key != NULL) {
key = aTab[i].key;
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
}
nins++;
}
}
return (false);
}
static bool
ckh_grow(ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
return (ret);
}
static void
ckh_shrink(ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
return;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
ckh->nshrinkfails++;
#endif
}
bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
ckh->ngrows = 0;
ckh->nshrinks = 0;
ckh->nshrinkfails = 0;
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit aBaseCount
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow 2^aLgMinItems to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++)
; /* Do nothing. */
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
return (ret);
}
void
ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
" nrelocs: %"PRIu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
(unsigned long long)ckh->nrelocs);
#endif
idalloc(ckh->tab);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
return (ckh->count);
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
{
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL)
*key = (void *)ckh->tab[i].key;
if (data != NULL)
*data = (void *)ckh->tab[i].data;
*tabind = i + 1;
return (false);
}
}
return (true);
}
bool
ckh_insert(ckh_t *ckh, const void *key, const void *data)
{
bool ret;
assert(ckh != NULL);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
ckh->ninserts++;
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
goto label_return;
}
}
ret = false;
label_return:
return (ret);
}
bool
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(ckh);
}
return (false);
}
return (true);
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
return (false);
}
return (true);
}
void
ckh_string_hash(const void *key, size_t r_hash[2])
{
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
ckh_string_keycomp(const void *k1, const void *k2)
{
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
}
void
ckh_pointer_hash(const void *key, size_t r_hash[2])
{
union {
const void *v;
size_t i;
} u;
assert(sizeof(u.v) == sizeof(u.i));
u.v = key;
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2)
{
return ((k1 == k2) ? true : false);
}
| 13,888 | 23.625887 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/extent.c
|
#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
size_t a_size = a->size;
size_t b_size = b->size;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
extent_ad_comp)
| 973 | 23.35 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/base.c
|
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_internal.h"
static bool
base_pages_alloc(pool_t *pool, size_t minsize)
{
size_t csize;
void* base_pages;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
base_pages = chunk_alloc_base(pool, csize);
if (base_pages == NULL)
return (true);
pool->base_next_addr = base_pages;
pool->base_past_addr = (void *)((uintptr_t)base_pages + csize);
return (false);
}
void *
base_alloc(pool_t *pool, size_t size)
{
void *ret;
size_t csize;
/* Round size up to nearest multiple of the cacheline size. */
csize = CACHELINE_CEILING(size);
malloc_mutex_lock(&pool->base_mtx);
/* Make sure there's enough space for the allocation. */
if ((uintptr_t)pool->base_next_addr + csize > (uintptr_t)pool->base_past_addr) {
if (base_pages_alloc(pool, csize)) {
malloc_mutex_unlock(&pool->base_mtx);
return (NULL);
}
}
/* Allocate. */
ret = pool->base_next_addr;
pool->base_next_addr = (void *)((uintptr_t)pool->base_next_addr + csize);
malloc_mutex_unlock(&pool->base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
}
void *
base_calloc(pool_t *pool, size_t number, size_t size)
{
void *ret = base_alloc(pool, number * size);
if (ret != NULL)
memset(ret, 0, number * size);
return (ret);
}
extent_node_t *
base_node_alloc(pool_t *pool)
{
extent_node_t *ret;
malloc_mutex_lock(&pool->base_node_mtx);
if (pool->base_nodes != NULL) {
ret = pool->base_nodes;
pool->base_nodes = *(extent_node_t **)ret;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
sizeof(extent_node_t));
} else {
/* preallocated nodes for pools other than 0 */
if (pool->pool_id == 0) {
ret = (extent_node_t *)base_alloc(pool, sizeof(extent_node_t));
} else {
ret = NULL;
}
}
malloc_mutex_unlock(&pool->base_node_mtx);
return (ret);
}
void
base_node_dalloc(pool_t *pool, extent_node_t *node)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&pool->base_node_mtx);
*(extent_node_t **)node = pool->base_nodes;
pool->base_nodes = node;
malloc_mutex_unlock(&pool->base_node_mtx);
}
size_t
base_node_prealloc(pool_t *pool, size_t number)
{
extent_node_t *node;
malloc_mutex_lock(&pool->base_node_mtx);
for (; number > 0; --number) {
node = (extent_node_t *)base_alloc(pool, sizeof(extent_node_t));
if (node == NULL)
break;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = pool->base_nodes;
pool->base_nodes = node;
}
malloc_mutex_unlock(&pool->base_node_mtx);
/* return number of nodes that couldn't be allocated */
return number;
}
/*
* Called at each pool opening.
*/
bool
base_boot(pool_t *pool)
{
if (malloc_mutex_init(&pool->base_mtx))
return (true);
if (malloc_mutex_init(&pool->base_node_mtx))
return (true);
return (false);
}
/*
* Called only at pool creation.
*/
bool
base_init(pool_t *pool)
{
if (base_boot(pool))
return (true);
pool->base_nodes = NULL;
return (false);
}
void
base_prefork(pool_t *pool)
{
malloc_mutex_prefork(&pool->base_mtx);
}
void
base_postfork_parent(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->base_mtx);
}
void
base_postfork_child(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->base_mtx);
}
| 3,278 | 19.36646 | 81 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/valgrind.c
|
#include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_VALGRIND
# error "This source file is for Valgrind integration."
#endif
#include <valgrind/memcheck.h>
void
valgrind_make_mem_noaccess(void *ptr, size_t usize)
{
(void)VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
}
void
valgrind_make_mem_undefined(void *ptr, size_t usize)
{
(void)VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
}
void
valgrind_make_mem_defined(void *ptr, size_t usize)
{
(void)VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
}
void
valgrind_freelike_block(void *ptr, size_t usize)
{
VALGRIND_FREELIKE_BLOCK(ptr, usize);
}
| 599 | 16.142857 | 56 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/jemalloc.c
|
#define JEMALLOC_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
malloc_tsd_data(, arenas, tsd_pool_t, TSD_POOL_INITIALIZER)
malloc_tsd_data(, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER)
/* Runtime configuration options. */
const char *je_malloc_conf;
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
bool opt_junk =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
size_t opt_quarantine = ZU(0);
bool opt_redzone = false;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
size_t opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind;
unsigned npools_cnt; /* actual number of pools */
unsigned npools; /* size of the pools[] array */
unsigned ncpus;
pool_t **pools;
pool_t base_pool;
unsigned pool_seqno = 0;
bool pools_shared_data_initialized;
/*
* Custom malloc() and free() for shared data and for data needed to
* initialize pool. If not defined functions then base_pool will be
* created for allocations from RAM.
*/
void *(*base_malloc_fn)(size_t);
void (*base_free_fn)(void *);
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
static bool base_pool_initialized = false;
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
# define NO_INITIALIZER ((unsigned long)0)
# define INITIALIZER pthread_self()
# define IS_INITIALIZER (malloc_initializer == pthread_self())
static pthread_t malloc_initializer = NO_INITIALIZER;
#else
# define NO_INITIALIZER false
# define INITIALIZER true
# define IS_INITIALIZER malloc_initializer
static bool malloc_initializer = NO_INITIALIZER;
#endif
/* Used to avoid initialization races. */
#ifdef _WIN32
static malloc_mutex_t init_lock;
JEMALLOC_ATTR(constructor)
static void WINAPI
_init_init_lock(void)
{
malloc_mutex_init(&init_lock);
malloc_mutex_init(&pools_lock);
malloc_mutex_init(&pool_base_lock);
}
#ifdef _MSC_VER
# pragma comment(linker, "/include:__init_init_lock")
# pragma section(".CRT$XCU", read)
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
const void (WINAPI *__init_init_lock)(void) = _init_init_lock;
#endif
#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
typedef struct {
void *p; /* Input pointer (as in realloc(p, s)). */
size_t s; /* Request size. */
void *r; /* Result pointer. */
} malloc_utrace_t;
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
if (opt_utrace) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \
} while (0)
#else
# define UTRACE(a, b, c) do { (void)(a); (void)(b); (void)(c); } while (0)
#endif
/* data structures for callbacks used in je_pool_check() to browse trees */
typedef struct {
pool_memory_range_node_t *list;
size_t size;
int error;
} check_data_cb_t;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static bool malloc_init_hard(void);
static bool malloc_init_base_pool(void);
static void *base_malloc_default(size_t size);
static void base_free_default(void *ptr);
/******************************************************************************/
/*
* Begin miscellaneous support functions.
*/
/* Create a new arena and insert it into the arenas array at index ind. */
arena_t *
arenas_extend(pool_t *pool, unsigned ind)
{
arena_t *ret;
ret = (arena_t *)base_alloc(pool, sizeof(arena_t));
if (ret != NULL && arena_new(pool, ret, ind) == false) {
pool->arenas[ind] = ret;
return (ret);
}
/* Only reached if there is an OOM error. */
/*
* OOM here is quite inconvenient to propagate, since dealing with it
* would require a check for failure in the fast path. Instead, punt
* by using arenas[0]. In practice, this is an extremely unlikely
* failure.
*/
malloc_write("<jemalloc>: Error initializing arena\n");
if (opt_abort)
abort();
return (pool->arenas[0]);
}
/* Slow path, called only by choose_arena(). */
arena_t *
choose_arena_hard(pool_t *pool)
{
arena_t *ret;
tsd_pool_t *tsd;
if (pool->narenas_auto > 1) {
unsigned i, choose, first_null;
choose = 0;
first_null = pool->narenas_auto;
malloc_rwlock_wrlock(&pool->arenas_lock);
assert(pool->arenas[0] != NULL);
for (i = 1; i < pool->narenas_auto; i++) {
if (pool->arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
if (pool->arenas[i]->nthreads <
pool->arenas[choose]->nthreads)
choose = i;
} else if (first_null == pool->narenas_auto) {
/*
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
*
* NB: It is possible for there to be
* discontinuities in terms of initialized
* versus uninitialized arenas, due to the
* "thread.arena" mallctl.
*/
first_null = i;
}
}
if (pool->arenas[choose]->nthreads == 0
|| first_null == pool->narenas_auto) {
/*
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
*/
ret = pool->arenas[choose];
} else {
/* Initialize a new arena. */
ret = arenas_extend(pool, first_null);
}
ret->nthreads++;
malloc_rwlock_unlock(&pool->arenas_lock);
} else {
ret = pool->arenas[0];
malloc_rwlock_wrlock(&pool->arenas_lock);
ret->nthreads++;
malloc_rwlock_unlock(&pool->arenas_lock);
}
tsd = arenas_tsd_get();
tsd->seqno[pool->pool_id] = pool->seqno;
tsd->arenas[pool->pool_id] = ret;
return (ret);
}
static void
stats_print_atexit(void)
{
if (config_tcache && config_stats) {
unsigned narenas, i, j;
pool_t *pool;
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
* events. As a consequence, the final stats may be slightly
* out of date by the time they are reported, if other threads
* continue to allocate.
*/
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; i++) {
pool = pools[i];
if (pool != NULL) {
for (j = 0, narenas = narenas_total_get(pool); j < narenas; j++) {
arena_t *arena = pool->arenas[j];
if (arena != NULL) {
tcache_t *tcache;
/*
* tcache_stats_merge() locks bins, so if any
* code is introduced that acquires both arena
* and bin locks in the opposite order,
* deadlocks may result.
*/
malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tcache, arena);
}
malloc_mutex_unlock(&arena->lock);
}
}
}
}
malloc_mutex_unlock(&pools_lock);
}
je_malloc_stats_print(NULL, NULL, NULL);
}
/*
* End miscellaneous support functions.
*/
/******************************************************************************/
/*
* Begin initialization functions.
*/
static unsigned
malloc_ncpus(void)
{
long result;
#ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
return ((result == -1) ? 1 : (unsigned)result);
}
bool
arenas_tsd_extend(tsd_pool_t *tsd, unsigned len)
{
assert(len < POOLS_MAX);
/* round up the new length to the nearest power of 2... */
size_t npools = 1ULL << (32 - __builtin_clz(len + 1));
/* ... but not less than */
if (npools < POOLS_MIN)
npools = POOLS_MIN;
unsigned *tseqno = base_malloc_fn(npools * sizeof (unsigned));
if (tseqno == NULL)
return (true);
if (tsd->seqno != NULL)
memcpy(tseqno, tsd->seqno, tsd->npools * sizeof (unsigned));
memset(&tseqno[tsd->npools], 0, (npools - tsd->npools) * sizeof (unsigned));
arena_t **tarenas = base_malloc_fn(npools * sizeof (arena_t *));
if (tarenas == NULL) {
base_free_fn(tseqno);
return (true);
}
if (tsd->arenas != NULL)
memcpy(tarenas, tsd->arenas, tsd->npools * sizeof (arena_t *));
memset(&tarenas[tsd->npools], 0, (npools - tsd->npools) * sizeof (arena_t *));
base_free_fn(tsd->seqno);
tsd->seqno = tseqno;
base_free_fn(tsd->arenas);
tsd->arenas = tarenas;
tsd->npools = npools;
return (false);
}
void
arenas_cleanup(void *arg)
{
unsigned i;
pool_t *pool;
tsd_pool_t *tsd = arg;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd->npools; i++) {
pool = pools[i];
if (pool != NULL) {
if (pool->seqno == tsd->seqno[i] && tsd->arenas[i] != NULL) {
malloc_rwlock_wrlock(&pool->arenas_lock);
tsd->arenas[i]->nthreads--;
malloc_rwlock_unlock(&pool->arenas_lock);
}
}
}
base_free_fn(tsd->seqno);
base_free_fn(tsd->arenas);
tsd->npools = 0;
malloc_mutex_unlock(&pools_lock);
}
JEMALLOC_ALWAYS_INLINE_C bool
malloc_thread_init(void)
{
if (config_fill && opt_quarantine && base_malloc_fn == base_malloc_default) {
/* create pool base and call quarantine_alloc_hook() inside */
return (malloc_init_base_pool());
}
return (false);
}
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)
{
if (malloc_initialized == false && malloc_init_hard())
return (true);
return (false);
}
static bool
malloc_init_base_pool(void)
{
malloc_mutex_lock(&pool_base_lock);
if (base_pool_initialized) {
/*
* Another thread initialized the base pool before this one
* acquired pools_lock.
*/
malloc_mutex_unlock(&pool_base_lock);
return (false);
}
if (malloc_init()) {
malloc_mutex_unlock(&pool_base_lock);
return (true);
}
if (pool_new(&base_pool, 0)) {
malloc_mutex_unlock(&pool_base_lock);
return (true);
}
pools = base_calloc(&base_pool, sizeof(pool_t *), POOLS_MIN);
if (pools == NULL) {
malloc_mutex_unlock(&pool_base_lock);
return (true);
}
pools[0] = &base_pool;
pools[0]->seqno = ++pool_seqno;
npools_cnt++;
npools = POOLS_MIN;
base_pool_initialized = true;
malloc_mutex_unlock(&pool_base_lock);
/*
* TSD initialization can't be safely done as a side effect of
* deallocation, because it is possible for a thread to do nothing but
* deallocate its TLS data via free(), in which case writing to TLS
* would cause write-after-free memory corruption. The quarantine
* facility *only* gets used as a side effect of deallocation, so make
* a best effort attempt at initializing its TSD by hooking all
* allocation events.
*/
if (config_fill && opt_quarantine)
quarantine_alloc_hook();
/*
* In the JEMALLOC_LAZY_LOCK case we had to defer initializing the
* arenas_lock until base pool initialization was complete. Deferral
* is safe because there are no other threads yet. We will actually
* recurse here, but since base_pool_initialized is set we will
* drop out of the recursion in the check at the top of this function.
*/
if (!isthreaded) {
if (malloc_rwlock_init(&base_pool.arenas_lock))
return (true);
}
return (false);
}
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p)
{
bool accept;
const char *opts = *opts_p;
*k_p = opts;
for (accept = false; accept == false;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
case 's': case 't': case 'u': case 'v': case 'w': case 'x':
case 'y': case 'z':
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7': case '8': case '9':
case '_':
opts++;
break;
case ':':
opts++;
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
*v_p = opts;
accept = true;
break;
case '\0':
if (opts != *opts_p) {
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
}
return (true);
default:
malloc_write("<jemalloc>: Malformed conf string\n");
return (true);
}
}
for (accept = false; accept == false;) {
switch (*opts) {
case ',':
opts++;
/*
* Look ahead one character here, because the next time
* this function is called, it will assume that end of
* input has been cleanly reached if no input remains,
* but we have optimistically already consumed the
* comma if one exists.
*/
if (*opts == '\0') {
malloc_write("<jemalloc>: Conf string ends "
"with comma\n");
}
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
accept = true;
break;
case '\0':
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
accept = true;
break;
default:
opts++;
break;
}
}
*opts_p = opts;
return (false);
}
static void
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
size_t vlen)
{
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v);
}
static void
malloc_conf_init(void)
{
unsigned i;
char buf[JE_PATH_MAX + 1];
const char *opts, *k, *v;
size_t klen, vlen;
/*
* Automatically configure valgrind before processing options. The
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if (config_valgrind) {
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
if (config_fill && in_valgrind) {
opt_junk = false;
assert(opt_zero == false);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true;
}
if (config_tcache && in_valgrind)
opt_tcache = false;
}
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
* program.
*/
opts = je_malloc_conf;
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
case 1: {
int linklen = 0;
#ifndef _WIN32
int saved_errno = errno;
const char *linkname =
# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
# else
"/etc/malloc.conf"
# endif
;
/*
* Try to use the contents of the "/etc/malloc.conf"
* symbolic link's name.
*/
linklen = readlink(linkname, buf, sizeof(buf) - 1);
if (linklen == -1) {
/* No configuration specified. */
linklen = 0;
/* restore errno */
set_errno(saved_errno);
}
#endif
buf[linklen] = '\0';
opts = buf;
break;
} case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
#else
"MALLOC_CONF"
#endif
;
if ((opts = getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
* variable.
*/
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
} default:
not_reached();
buf[0] = '\0';
opts = buf;
}
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
#define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
#define CONF_HANDLE_BOOL(o, n, cont) \
if (CONF_MATCH(n)) { \
if (strncmp("true", v, vlen) == 0 && \
vlen == sizeof("true")-1) \
o = true; \
else if (strncmp("false", v, vlen) == \
0 && vlen == sizeof("false")-1) \
o = false; \
else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
if (cont) \
continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
set_errno(0); \
um = malloc_strtoumax(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
if ((min) != 0 && um < (min)) \
o = min; \
else if (um > (max)) \
o = max; \
else \
o = um; \
} else { \
if (((min) != 0 && um < (min)) || \
um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else \
o = um; \
} \
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
set_errno(0); \
l = strtol(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (l < (ssize_t)(min) || l > \
(ssize_t)(max)) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
} else \
o = l; \
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
strncpy(o, v, cpylen); \
o[cpylen] = '\0'; \
continue; \
}
CONF_HANDLE_BOOL(opt_abort, "abort", true)
/*
* Chunks always require at least one header page, plus
* one data page in the absence of redzones, or three
* pages in the presence of redzones. In order to
* simplify options processing, fix the limit based on
* config_fill.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
if (chunk_dss_prec_set(i)) {
malloc_conf_error(
"Error setting dss",
k, klen, v, vlen);
} else {
opt_dss =
dss_prec_names[i];
match = true;
break;
}
}
}
if (match == false) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, "junk", true)
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
}
if (config_tcache) {
CONF_HANDLE_BOOL(opt_tcache, "tcache",
!config_valgrind || !in_valgrind)
if (CONF_MATCH("tcache")) {
assert(config_valgrind && in_valgrind);
if (opt_tcache) {
opt_tcache = false;
malloc_conf_error(
"tcache cannot be enabled "
"while running inside Valgrind",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof", true)
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
true)
CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
true)
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
true)
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
#undef CONF_HANDLE_BOOL
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
}
}
}
static bool
malloc_init_hard(void)
{
malloc_mutex_lock(&init_lock);
if (malloc_initialized || IS_INITIALIZER) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
malloc_mutex_unlock(&init_lock);
return (false);
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
/* Busy-wait until the initializing thread completes. */
do {
malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT;
malloc_mutex_lock(&init_lock);
} while (malloc_initialized == false);
malloc_mutex_unlock(&init_lock);
return (false);
}
#endif
malloc_initializer = INITIALIZER;
malloc_tsd_boot();
if (config_prof)
prof_boot0();
malloc_conf_init();
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
}
pools_shared_data_initialized = false;
if (base_malloc_fn == NULL && base_free_fn == NULL) {
base_malloc_fn = base_malloc_default;
base_free_fn = base_free_default;
}
if (chunk_global_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_prof)
prof_boot1();
arena_params_boot();
/* Initialize allocation counters before any allocations can occur. */
if (config_stats && thread_allocated_tsd_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (arenas_tsd_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_tcache && tcache_boot1()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_fill && quarantine_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_mutex_unlock(&init_lock);
/**********************************************************************/
/* Recursive allocation may follow. */
ncpus = malloc_ncpus();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32) && !defined(__native_client__))
/* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
}
#endif
/* Done recursively allocating. */
/**********************************************************************/
malloc_mutex_lock(&init_lock);
if (mutex_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (opt_narenas == 0) {
/*
* For SMP systems, create more than one arena per CPU by
* default.
*/
if (ncpus > 1)
opt_narenas = ncpus << 2;
else
opt_narenas = 1;
}
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
return (false);
}
/*
* End initialization functions.
*/
/******************************************************************************/
/*
* Begin malloc(3)-compatible functions.
*/
static void *
imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = imalloc(SMALL_MAXCLASS+1);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = imalloc(usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imalloc_prof(size_t usize)
{
void *p;
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(usize, cnt);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = imalloc_prof_sample(usize, cnt);
else
p = imalloc(usize);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imalloc_body(size_t size, size_t *usize)
{
if (malloc_init_base_pool())
return (NULL);
if (config_prof && opt_prof) {
*usize = s2u(size);
return (imalloc_prof(*usize));
}
if (config_stats || (config_valgrind && in_valgrind))
*usize = s2u(size);
return (imalloc(size));
}
void *
je_malloc(size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
ret = imalloc_body(size, &usize);
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
static void *
imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = ipalloc(usize, alignment, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = imemalign_prof_sample(alignment, usize, cnt);
else
p = ipalloc(usize, alignment, false);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
JEMALLOC_ATTR(nonnull(1))
static int
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
size_t usize;
void *result;
assert(min_alignment != 0);
if (malloc_init_base_pool()) {
result = NULL;
goto label_oom;
} else {
if (size == 0)
size = 1;
/* Make sure that alignment is a large enough power of 2. */
if (((alignment - 1) & alignment) != 0
|| (alignment < min_alignment)) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating "
"aligned memory: invalid alignment\n");
abort();
}
result = NULL;
ret = EINVAL;
goto label_return;
}
usize = sa2u(size, alignment);
if (usize == 0) {
result = NULL;
goto label_oom;
}
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(usize, cnt);
result = imemalign_prof(alignment, usize, cnt);
} else
result = ipalloc(usize, alignment, false);
if (result == NULL)
goto label_oom;
}
*memptr = result;
ret = 0;
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, result);
return (ret);
label_oom:
assert(result == NULL);
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating aligned memory: "
"out of memory\n");
abort();
}
ret = ENOMEM;
goto label_return;
}
int
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
int ret = imemalign(memptr, alignment, size, sizeof(void *));
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
config_prof), false);
return (ret);
}
void *
je_aligned_alloc(size_t alignment, size_t size)
{
void *ret;
int err;
if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
ret = NULL;
set_errno(err);
}
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
false);
return (ret);
}
static void *
icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = icalloc(SMALL_MAXCLASS+1);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = icalloc(usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = icalloc_prof_sample(usize, cnt);
else
p = icalloc(usize);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
void *
je_calloc(size_t num, size_t size)
{
void *ret;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (malloc_init_base_pool()) {
num_size = 0;
ret = NULL;
goto label_return;
}
num_size = num * size;
if (num_size == 0) {
if (num == 0 || size == 0)
num_size = 1;
else {
ret = NULL;
goto label_return;
}
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
&& (num_size / size != num)) {
/* size_t overflow. */
ret = NULL;
goto label_return;
}
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
usize = s2u(num_size);
PROF_ALLOC_PREP(usize, cnt);
ret = icalloc_prof(usize, cnt);
} else {
if (config_stats || (config_valgrind && in_valgrind))
usize = s2u(num_size);
ret = icalloc(num_size);
}
label_return:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
static void *
irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = iralloc(oldptr, usize, 0, 0, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(oldptr);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = irealloc_prof_sample(oldptr, usize, cnt);
else
p = iralloc(oldptr, usize, 0, 0, false);
if (p == NULL)
return (NULL);
prof_realloc(p, usize, cnt, old_usize, old_ctx);
return (p);
}
JEMALLOC_INLINE_C void
ifree(void *ptr)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && in_valgrind)
rzsize = p2rz(ptr);
iqalloc(ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
void *
je_realloc(void *ptr, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
ifree(ptr);
return (NULL);
}
size = 1;
}
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return (NULL);
if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && in_valgrind))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && in_valgrind)
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
usize = s2u(size);
PROF_ALLOC_PREP(usize, cnt);
ret = irealloc_prof(ptr, old_usize, usize, cnt);
} else {
if (config_stats || (config_valgrind && in_valgrind))
usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
ret = imalloc_body(size, &usize);
}
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_usize;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
return (ret);
}
void
je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
if (ptr != NULL)
ifree(ptr);
}
/*
* End malloc(3)-compatible functions.
*/
/******************************************************************************/
/*
* Begin non-standard override functions.
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
void *
je_memalign(size_t alignment, size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, alignment, size, 1);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
void *
je_valloc(size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, PAGE, size, 1);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
/*
* is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
* #define je_malloc malloc
*/
#define malloc_is_malloc 1
#define is_malloc_(a) malloc_is_ ## a
#define is_malloc(a) is_malloc_(a)
#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
*
* These definitions interpose hooks in glibc. The functions are actually
* passed an extra argument for the caller return address, which will be
* ignored.
*/
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
#endif
/*
* End non-standard override functions.
*/
/******************************************************************************/
/*
* Begin non-standard functions.
*/
static void *
base_malloc_default(size_t size)
{
return base_alloc(&base_pool, size);
}
static void
base_free_default(void *ptr)
{
}
static void
je_base_pool_destroy(void)
{
if (base_pool_initialized == false)
return;
#ifndef JEMALLOC_MUTEX_INIT_CB
pool_destroy(&base_pool);
malloc_mutex_destroy(&pool_base_lock);
malloc_mutex_destroy(&pools_lock);
#endif
}
bool
pools_shared_data_create(void)
{
if (malloc_init())
return (true);
if (pools_shared_data_initialized)
return (false);
if (config_tcache && tcache_boot0())
return (true);
pools_shared_data_initialized = true;
return (false);
}
void
pools_shared_data_destroy(void)
{
/* Only destroy when no pools exist */
if (npools == 0) {
pools_shared_data_initialized = false;
base_free_fn(tcache_bin_info);
tcache_bin_info = NULL;
}
}
#ifdef JEMALLOC_VALGRIND
/*
* Iterates through all the chunks/allocations on the heap and marks them
* as defined/undefined.
*/
static extent_node_t *
vg_tree_binary_iter_cb(extent_tree_t *tree, extent_node_t *node, void *arg)
{
assert(node->size != 0);
int noaccess = *(int *)arg;
if (noaccess) {
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(node->addr, node->size);
} else {
/* assume memory is defined */
JEMALLOC_VALGRIND_MALLOC(1, node->addr, node->size, 1);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(node->addr, node->size);
}
return (NULL);
}
/*
* Iterates through all the chunks/allocations on the heap and marks them
* as defined/undefined.
*/
static arena_chunk_map_t *
vg_tree_chunks_avail_iter_cb(arena_avail_tree_t *tree,
arena_chunk_map_t *map, void *arg)
{
int noaccess = *(int *)arg;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(map, sizeof(*map));
assert((map->bits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
assert((map->bits & ~PAGE_MASK) != 0);
size_t chunk_size = (map->bits & ~PAGE_MASK);
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(map);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(run_chunk, sizeof(*run_chunk));
size_t pageind = arena_mapelm_to_pageind(map);
void *chunk_addr = (void *)((uintptr_t)run_chunk + (pageind << LG_PAGE));
if (noaccess) {
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk_addr, chunk_size);
} else {
JEMALLOC_VALGRIND_MALLOC(1, chunk_addr, chunk_size, 1);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(chunk_addr, chunk_size);
}
return (NULL);
}
/*
* Reinitializes memcheck state if run under Valgrind.
* Iterates through all the chunks/allocations on the heap and marks them
* as defined/undefined.
*/
static int
vg_pool_init(pool_t *pool, size_t size)
{
/*
* There is no need to grab any locks here, as the pool is not
* being used yet.
*/
/* mark base_alloc used space as defined */
char *base_start = (char *)CACHELINE_CEILING((uintptr_t)pool +
sizeof(pool_t));
char *base_end = pool->base_next_addr;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(base_start, base_end - base_start);
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(base_end,
(char *)pool->base_past_addr - base_end);
/* pointer to the address of chunks, align the address to chunksize */
void *usable_addr =
(void *)CHUNK_CEILING((uintptr_t)pool->base_next_addr);
/* usable chunks space, must be multiple of chunksize */
size_t usable_size =
(size - (uintptr_t)((char *)usable_addr - (char *)pool))
& ~chunksize_mask;
/* initially mark the entire heap as defined */
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(
usable_addr,
usable_size);
/* iterate through unused (available) chunks - mark as NOACCESS */
int noaccess = 1;
extent_tree_szad_iter(&pool->chunks_szad_mmap, NULL,
vg_tree_binary_iter_cb, &noaccess);
/* iterate through huge allocations - mark as MALLOCLIKE */
noaccess = 0;
extent_tree_ad_iter(&pool->huge, NULL,
vg_tree_binary_iter_cb, &noaccess);
/* iterate through arenas/runs */
for (unsigned i = 0; i < pool->narenas_total; ++i) {
arena_t *arena = pool->arenas[i];
if (arena != NULL) {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(arena,
sizeof(*arena));
/* bins */
for (unsigned b = 0; b < NBINS; b++) {
arena_bin_t *bin = &arena->bins[b];
if (bin->runcur != NULL)
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(
bin->runcur,
sizeof(*(bin->runcur)));
}
noaccess = 1; /* XXX */
arena_runs_avail_tree_iter(arena,
vg_tree_chunks_avail_iter_cb, &noaccess);
arena_chunk_t *spare = arena->spare;
if (spare != NULL) {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(
spare, sizeof(*spare));
}
}
}
return 1;
}
#endif /* JEMALLOC_VALGRIND */
/*
* Creates a new pool.
* Initializes the heap and all the allocator metadata.
*/
static pool_t *
pool_create_empty(pool_t *pool, size_t size, int zeroed, unsigned pool_id)
{
size_t result;
if (!zeroed)
memset(pool, 0, sizeof (pool_t));
/*
* preinit base allocator in unused space, align the address
* to the cache line
*/
pool->base_next_addr = (void *)CACHELINE_CEILING((uintptr_t)pool +
sizeof (pool_t));
pool->base_past_addr = (void *)((uintptr_t)pool + size);
/* prepare pool and internal structures */
if (pool_new(pool, pool_id)) {
assert(pools[pool_id] == NULL);
pools_shared_data_destroy();
return NULL;
}
/*
* preallocate the chunk tree nodes for the maximum possible
* number of chunks
*/
result = base_node_prealloc(pool, size/chunksize);
assert(result == 0);
assert(pools[pool_id] == NULL);
pool->seqno = pool_seqno++;
pools[pool_id] = pool;
npools_cnt++;
pool->memory_range_list =
base_alloc(pool, sizeof(*pool->memory_range_list));
/* pointer to the address of chunks, align the address to chunksize */
void *usable_addr =
(void *)CHUNK_CEILING((uintptr_t)pool->base_next_addr);
/* reduce end of base allocator up to chunks start */
pool->base_past_addr = usable_addr;
/* usable chunks space, must be multiple of chunksize */
size_t usable_size =
(size - (uintptr_t)((char *)usable_addr - (char *)pool))
& ~chunksize_mask;
assert(usable_size > 0);
malloc_mutex_lock(&pool->memory_range_mtx);
pool->memory_range_list->next = NULL;
pool->memory_range_list->addr = (uintptr_t)pool;
pool->memory_range_list->addr_end = (uintptr_t)pool + size;
pool->memory_range_list->usable_addr = (uintptr_t)usable_addr;
pool->memory_range_list->usable_addr_end =
(uintptr_t)usable_addr + usable_size;
malloc_mutex_unlock(&pool->memory_range_mtx);
/* register the usable pool space as a single big chunk */
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, zeroed);
pool->ctl_initialized = false;
return pool;
}
/*
* Opens an existing pool (i.e. pmemcto pool).
* Only the run-time state needs to be re-initialized.
*/
static pool_t *
pool_open(pool_t *pool, size_t size, unsigned pool_id)
{
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(pool, sizeof(pool_t));
/* prepare pool's runtime state */
if (pool_runtime_init(pool, pool_id)) {
malloc_mutex_unlock(&pools_lock);
return NULL;
}
assert(pools[pool_id] == NULL);
pool->seqno = pool_seqno++;
pools[pool_id] = pool;
npools_cnt++;
return pool;
}
pool_t *
je_pool_create(void *addr, size_t size, int zeroed, int empty)
{
if (malloc_init())
return (NULL);
if (addr == NULL || size < POOL_MINIMAL_SIZE)
return (NULL);
pool_t *pool = (pool_t *)addr;
unsigned pool_id;
/* Preinit base pool if not exist, before lock pool_lock */
if (malloc_init_base_pool())
return (NULL);
malloc_mutex_lock(&pools_lock);
assert(pools != NULL);
assert(npools > 0);
/*
* Find unused pool ID.
* Pool 0 is a special pool with reserved ID. Pool is created during
* malloc_init_pool_base() and allocates memory from RAM.
*/
for (pool_id = 1; pool_id < npools; ++pool_id) {
if (pools[pool_id] == NULL)
break;
}
if (pool_id == npools && npools < POOLS_MAX) {
size_t npools_new = npools * 2;
pool_t **pools_new = base_alloc(&base_pool,
npools_new * sizeof (pool_t *));
if (pools_new == NULL)
goto err;
memcpy(pools_new, pools, npools * sizeof (pool_t *));
memset(&pools_new[npools], 0,
(npools_new - npools) * sizeof (pool_t *));
pools = pools_new;
npools = npools_new;
}
if (pool_id == POOLS_MAX) {
malloc_printf("<jemalloc>: Error in pool_create(): "
"exceeded max number of pools (%u)\n", POOLS_MAX);
goto err;
}
pool_t *ret;
if (empty) {
ret = pool_create_empty(pool, size, zeroed, pool_id);
} else {
ret = pool_open(pool, size, pool_id);
}
malloc_mutex_unlock(&pools_lock);
#ifdef JEMALLOC_VALGRIND
/* must be done with unlocked 'pools_lock' */
if (config_valgrind && !empty)
vg_pool_init(pool, size);
#endif
return ret;
err:
malloc_mutex_unlock(&pools_lock);
return (NULL);
}
int
je_pool_delete(pool_t *pool)
{
unsigned pool_id = pool->pool_id;
/* Remove pool from global array */
malloc_mutex_lock(&pools_lock);
if ((pool_id == 0) || (pool_id >= npools) || (pools[pool_id] != pool)) {
malloc_mutex_unlock(&pools_lock);
malloc_printf("<jemalloc>: Error in pool_delete(): "
"invalid pool_id (%u)\n", pool_id);
return -1;
}
pool_destroy(pool);
pools[pool_id] = NULL;
npools_cnt--;
pools_shared_data_destroy();
malloc_mutex_unlock(&pools_lock);
return 0;
}
static int
check_is_unzeroed(void *ptr, size_t size)
{
size_t i;
size_t *p = (size_t *)ptr;
size /= sizeof(size_t);
for (i = 0; i < size; i++) {
if (p[i])
return 1;
}
return 0;
}
static extent_node_t *
check_tree_binary_iter_cb(extent_tree_t *tree, extent_node_t *node, void *arg)
{
check_data_cb_t *arg_cb = arg;
if (node->size == 0) {
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"chunk 0x%p size is zero\n", node);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
arg_cb->size += node->size;
if (node->zeroed && check_is_unzeroed(node->addr, node->size)) {
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"chunk 0x%p, is marked as zeroed, but is dirty\n",
node->addr);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
/* check chunks address is inside pool memory */
pool_memory_range_node_t *list = arg_cb->list;
uintptr_t addr = (uintptr_t)node->addr;
uintptr_t addr_end = (uintptr_t)node->addr + node->size;
while (list != NULL) {
if ((list->usable_addr <= addr) &&
(addr < list->usable_addr_end) &&
(list->usable_addr < addr_end) &&
(addr_end <= list->usable_addr_end)) {
/* return NULL to continue iterations of tree */
return (NULL);
}
list = list->next;
}
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"incorrect address chunk 0x%p, out of memory pool\n",
node->addr);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
static arena_chunk_map_t *
check_tree_chunks_avail_iter_cb(arena_avail_tree_t *tree,
arena_chunk_map_t *map, void *arg)
{
check_data_cb_t *arg_cb = arg;
if ((map->bits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) != 0) {
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"flags in map->bits %zu are incorrect\n", map->bits);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
if ((map->bits & ~PAGE_MASK) == 0) {
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"chunk_map 0x%p size is zero\n", map);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
size_t chunk_size = (map->bits & ~PAGE_MASK);
arg_cb->size += chunk_size;
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(map);
size_t pageind = arena_mapelm_to_pageind(map);
void *chunk_addr = (void *)((uintptr_t)run_chunk + (pageind << LG_PAGE));
if (((map->bits & (CHUNK_MAP_UNZEROED | CHUNK_MAP_DIRTY)) == 0) &&
check_is_unzeroed(chunk_addr, chunk_size)) {
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"chunk_map 0x%p, is marked as zeroed, but is dirty\n",
map);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
/* check chunks address is inside pool memory */
pool_memory_range_node_t *list = arg_cb->list;
uintptr_t addr = (uintptr_t)chunk_addr;
uintptr_t addr_end = (uintptr_t)chunk_addr + chunk_size;
while (list != NULL) {
if ((list->usable_addr <= addr) &&
(addr < list->usable_addr_end) &&
(list->usable_addr < addr_end) &&
(addr_end <= list->usable_addr_end)) {
/* return NULL to continue iterations of tree */
return (NULL);
}
list = list->next;
}
arg_cb->error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"incorrect address chunk_map 0x%p, out of memory pool\n",
chunk_addr);
/* returns value other than NULL to break iteration */
return (void*)(UINTPTR_MAX);
}
int
je_pool_check(pool_t *pool)
{
size_t total_size = 0;
unsigned i;
pool_memory_range_node_t *node;
malloc_mutex_lock(&pools_lock);
if ((pool->pool_id == 0) || (pool->pool_id >= npools)) {
malloc_write("<jemalloc>: Error in pool_check(): "
"invalid pool id\n");
malloc_mutex_unlock(&pools_lock);
return -1;
}
if (pools[pool->pool_id] != pool) {
malloc_write("<jemalloc>: Error in pool_check(): "
"invalid pool handle, probably pool was deleted\n");
malloc_mutex_unlock(&pools_lock);
return -1;
}
malloc_mutex_lock(&pool->memory_range_mtx);
/* check memory regions defined correctly */
node = pool->memory_range_list;
while (node != NULL) {
size_t node_size = node->usable_addr_end - node->usable_addr;
total_size += node_size;
if ((node->addr > node->usable_addr) ||
(node->addr_end < node->usable_addr_end) ||
(node->usable_addr >= node->usable_addr_end)) {
malloc_write("<jemalloc>: Error in pool_check(): "
"corrupted pool memory\n");
malloc_mutex_unlock(&pool->memory_range_mtx);
malloc_mutex_unlock(&pools_lock);
return 0;
}
/* for the purpose of further checks we need to mark it as defined */
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)node->usable_addr,
node_size);
node = node->next;
}
/* check memory collision with other pools */
for (i = 1; i < npools; i++) {
pool_t *pool_cmp = pools[i];
if (pool_cmp != NULL && i != pool->pool_id) {
node = pool->memory_range_list;
while (node != NULL) {
pool_memory_range_node_t *node2 = pool_cmp->memory_range_list;
while (node2 != NULL) {
if ((node->addr <= node2->addr &&
node2->addr < node->addr_end) ||
(node2->addr <= node->addr &&
node->addr < node2->addr_end)) {
malloc_write("<jemalloc>: Error in pool_check(): "
"pool uses the same as another pool\n");
malloc_mutex_unlock(&pool->memory_range_mtx);
malloc_mutex_unlock(&pools_lock);
return 0;
}
node2 = node2->next;
}
node = node->next;
}
}
}
/* check the addresses of the chunks are inside memory region */
check_data_cb_t arg_cb;
arg_cb.list = pool->memory_range_list;
arg_cb.size = 0;
arg_cb.error = 0;
malloc_mutex_lock(&pool->chunks_mtx);
malloc_rwlock_wrlock(&pool->arenas_lock);
extent_tree_szad_iter(&pool->chunks_szad_mmap, NULL,
check_tree_binary_iter_cb, &arg_cb);
for (i = 0; i < pool->narenas_total && arg_cb.error == 0; ++i) {
arena_t *arena = pool->arenas[i];
if (arena != NULL) {
malloc_mutex_lock(&arena->lock);
arena_runs_avail_tree_iter(arena, check_tree_chunks_avail_iter_cb,
&arg_cb);
arena_chunk_t *spare = arena->spare;
if (spare != NULL) {
size_t spare_size = arena_mapbits_unallocated_size_get(spare,
map_bias);
arg_cb.size += spare_size;
/* check that spare is zeroed */
if ((arena_mapbits_unzeroed_get(spare, map_bias) == 0) &&
check_is_unzeroed(
(void *)((uintptr_t)spare + (map_bias << LG_PAGE)),
spare_size)) {
arg_cb.error += 1;
malloc_printf("<jemalloc>: Error in pool_check(): "
"spare 0x%p, is marked as zeroed, but is dirty\n",
spare);
}
}
malloc_mutex_unlock(&arena->lock);
}
}
malloc_rwlock_unlock(&pool->arenas_lock);
malloc_mutex_unlock(&pool->chunks_mtx);
malloc_mutex_unlock(&pool->memory_range_mtx);
malloc_mutex_unlock(&pools_lock);
if (arg_cb.error != 0) {
return 0;
}
if (total_size < arg_cb.size) {
malloc_printf("<jemalloc>: Error in pool_check(): total size of all "
"chunks: %zu is greater than associated memory range size: %zu\n",
arg_cb.size, total_size);
return 0;
}
return 1;
}
/*
* add more memory to a pool
*/
size_t
je_pool_extend(pool_t *pool, void *addr, size_t size, int zeroed)
{
char *usable_addr = addr;
size_t nodes_number = size/chunksize;
if (size < POOL_MINIMAL_SIZE)
return 0;
/* preallocate the chunk tree nodes for the max possible number of chunks */
nodes_number = base_node_prealloc(pool, nodes_number);
pool_memory_range_node_t *node = base_alloc(pool,
sizeof (*pool->memory_range_list));
if (nodes_number > 0 || node == NULL) {
/*
* If base allocation using existing chunks fails, then use the new
* chunk as a source for further base allocations.
*/
malloc_mutex_lock(&pool->base_mtx);
/* preinit base allocator in unused space */
pool->base_next_addr = (void *)CACHELINE_CEILING((uintptr_t)addr);
pool->base_past_addr = (void *)((uintptr_t)addr + size);
malloc_mutex_unlock(&pool->base_mtx);
if (nodes_number > 0)
nodes_number = base_node_prealloc(pool, nodes_number);
assert(nodes_number == 0);
if (node == NULL)
node = base_alloc(pool, sizeof (*pool->memory_range_list));
assert(node != NULL);
/* pointer to the address of chunks, align the address to chunksize */
usable_addr = (void *)CHUNK_CEILING((uintptr_t)pool->base_next_addr);
/* reduce end of base allocator up to chunks */
pool->base_past_addr = usable_addr;
}
usable_addr = (void *)CHUNK_CEILING((uintptr_t)usable_addr);
size_t usable_size = (size - (uintptr_t)(usable_addr - (char *)addr))
& ~chunksize_mask;
assert(usable_size > 0);
node->addr = (uintptr_t)addr;
node->addr_end = (uintptr_t)addr + size;
node->usable_addr = (uintptr_t)usable_addr;
node->usable_addr_end = (uintptr_t)usable_addr + usable_size;
malloc_mutex_lock(&pool->memory_range_mtx);
node->next = pool->memory_range_list;
pool->memory_range_list = node;
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, zeroed);
malloc_mutex_unlock(&pool->memory_range_mtx);
return usable_size;
}
static void *
pool_ialloc_prof_sample(pool_t *pool, size_t usize, prof_thr_cnt_t *cnt,
void *(*ialloc)(pool_t *, size_t))
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = ialloc(pool, SMALL_MAXCLASS+1);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = ialloc(pool, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
pool_ialloc_prof(pool_t *pool, size_t usize,
void *(*ialloc)(pool_t *, size_t))
{
void *p;
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(usize, cnt);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = pool_ialloc_prof_sample(pool, usize, cnt, ialloc);
else
p = ialloc(pool, usize);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
pool_imalloc_body(pool_t *pool, size_t size, size_t *usize)
{
if (malloc_init())
return (NULL);
if (config_prof && opt_prof) {
*usize = s2u(size);
return (pool_ialloc_prof(pool, *usize, pool_imalloc));
}
if (config_stats || (config_valgrind && in_valgrind))
*usize = s2u(size);
return (pool_imalloc(pool, size));
}
void *
je_pool_malloc(pool_t *pool, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
ret = pool_imalloc_body(pool, size, &usize);
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in pool_malloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
void *
je_pool_calloc(pool_t *pool, size_t num, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t num_size;
num_size = num * size;
if (num_size == 0) {
if (num == 0 || size == 0)
num_size = 1;
else {
ret = NULL;
goto label_return;
}
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
&& (num_size / size != num)) {
ret = NULL;
goto label_return;
}
if (config_prof && opt_prof) {
usize = s2u(num_size);
ret = pool_ialloc_prof(pool, usize, pool_icalloc);
} else {
if (config_stats || (config_valgrind && in_valgrind))
usize = s2u(num_size);
ret = pool_icalloc(pool, num_size);
}
label_return:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in pool_calloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
static void *
pool_irealloc_prof_sample(pool_t *pool, void *oldptr, size_t usize,
prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = pool_iralloc(pool, oldptr, SMALL_MAXCLASS+1, 0, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = pool_iralloc(pool, oldptr, usize, 0, 0, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
pool_irealloc_prof(pool_t *pool, void *oldptr, size_t old_usize,
size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(oldptr);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = pool_irealloc_prof_sample(pool, oldptr, usize, cnt);
else
p = pool_iralloc(pool, oldptr, usize, 0, 0, false);
if (p == NULL)
return (NULL);
prof_realloc(p, usize, cnt, old_usize, old_ctx);
return (p);
}
JEMALLOC_INLINE_C void
pool_ifree(pool_t *pool, void *ptr)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
arena_chunk_t *chunk;
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && in_valgrind)
rzsize = p2rz(ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, true);
else
huge_dalloc(pool, ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
void *
je_pool_ralloc(pool_t *pool, void *ptr, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
pool_ifree(pool, ptr);
return (NULL);
}
size = 1;
}
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
malloc_init();
if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && in_valgrind))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && in_valgrind)
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
usize = s2u(size);
PROF_ALLOC_PREP(usize, cnt);
ret = pool_irealloc_prof(pool, ptr, old_usize,
usize, cnt);
} else {
if (config_stats || (config_valgrind && in_valgrind))
usize = s2u(size);
ret = pool_iralloc(pool, ptr, size, 0, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
ret = pool_imalloc_body(pool, size, &usize);
}
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in pool_ralloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_usize;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
return (ret);
}
static void *
pool_imemalign_prof_sample(pool_t *pool, size_t alignment, size_t usize,
prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
p = pool_ipalloc(pool, sa2u(SMALL_MAXCLASS+1, alignment),
alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = pool_ipalloc(pool, usize, alignment, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
pool_imemalign_prof(pool_t *pool, size_t alignment, size_t usize,
prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = pool_imemalign_prof_sample(pool, alignment, usize, cnt);
else
p = pool_ipalloc(pool, usize, alignment, false);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
JEMALLOC_ATTR(nonnull(1))
static int
pool_imemalign(pool_t *pool, void **memptr, size_t alignment, size_t size,
size_t min_alignment)
{
int ret;
size_t usize;
void *result;
assert(min_alignment != 0);
if (malloc_init()) {
result = NULL;
goto label_oom;
} else {
if (size == 0)
size = 1;
/* Make sure that alignment is a large enough power of 2. */
if (((alignment - 1) & alignment) != 0
|| (alignment < min_alignment)) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating pool"
" aligned memory: invalid alignment\n");
abort();
}
result = NULL;
ret = EINVAL;
goto label_return;
}
usize = sa2u(size, alignment);
if (usize == 0) {
result = NULL;
goto label_oom;
}
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(usize, cnt);
result = pool_imemalign_prof(pool, alignment,
usize, cnt);
} else
result = pool_ipalloc(pool, usize, alignment, false);
if (result == NULL)
goto label_oom;
}
*memptr = result;
ret = 0;
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, result);
return (ret);
label_oom:
assert(result == NULL);
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating pool "
"aligned memory: out of memory\n");
abort();
}
ret = ENOMEM;
goto label_return;
}
void *
je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size)
{
void *ret;
int err;
if ((err = pool_imemalign(pool, &ret, alignment, size, 1)) != 0) {
ret = NULL;
set_errno(err);
}
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
false);
return (ret);
}
void
je_pool_free(pool_t *pool, void *ptr)
{
UTRACE(ptr, 0, 0);
if (ptr != NULL)
pool_ifree(pool, ptr);
}
void
je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts)
{
stats_print(pool, write_cb, cbopaque, opts);
}
void
je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *))
{
if (malloc_func != NULL && free_func != NULL) {
malloc_mutex_lock(&pool_base_lock);
if (pools == NULL) {
base_malloc_fn = malloc_func;
base_free_fn = free_func;
}
malloc_mutex_unlock(&pool_base_lock);
}
}
size_t
je_pool_malloc_usable_size(pool_t *pool, void *ptr)
{
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return 0;
if (config_ivsalloc) {
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(pool->chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return 0;
}
return (ptr != NULL) ? pool_isalloc(pool, ptr, config_prof) : 0;
}
JEMALLOC_ALWAYS_INLINE_C void *
imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
alignment)));
if (alignment != 0)
return (ipalloct(usize, alignment, zero, try_tcache, arena));
else if (zero)
return (icalloct(usize, try_tcache, arena));
else
return (imalloct(usize, try_tcache, arena));
}
static void *
imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
size_t usize_promoted = (alignment == 0) ?
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
assert(usize_promoted != 0);
p = imallocx(usize_promoted, alignment, zero, try_tcache,
arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = imallocx(usize, alignment, zero, try_tcache, arena);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U) {
p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
arena, cnt);
} else
p = imallocx(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
void *
je_mallocx(size_t size, int flags)
{
void *p;
size_t usize;
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
pool_t *pool = &base_pool;
arena_t dummy_arena;
DUMMY_ARENA_INITIALIZE(dummy_arena, pool);
arena_t *arena;
bool try_tcache;
assert(size != 0);
if (malloc_init_base_pool())
goto label_oom;
if (arena_ind != UINT_MAX) {
malloc_rwlock_rdlock(&pool->arenas_lock);
arena = pool->arenas[arena_ind];
malloc_rwlock_unlock(&pool->arenas_lock);
try_tcache = false;
} else {
arena = &dummy_arena;
try_tcache = true;
}
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(usize, cnt);
p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
cnt);
} else
p = imallocx(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
UTRACE(0, size, 0);
return (NULL);
}
static void *
irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else {
p = iralloct(oldptr, size, 0, alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
}
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena, prof_thr_cnt_t *cnt)
{
void *p;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(oldptr);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
try_tcache_alloc, try_tcache_dalloc, arena, cnt);
else {
p = iralloct(oldptr, size, 0, alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
}
if (p == NULL)
return (NULL);
if (p == oldptr && alignment != 0) {
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
* alignment, and that the alignment constraint was
* serendipitously satisfied. Additionally, old_usize may not
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
*usize = isalloc(p, config_prof);
}
prof_realloc(p, *usize, cnt, old_usize, old_ctx);
return (p);
}
void *
je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
pool_t *pool = &base_pool;
arena_t dummy_arena;
DUMMY_ARENA_INITIALIZE(dummy_arena, pool);
bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return (NULL);
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
try_tcache_alloc = false;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
try_tcache_dalloc = (chunk == ptr || chunk->arena !=
pool->arenas[arena_ind]);
arena = pool->arenas[arena_ind];
} else {
try_tcache_alloc = true;
try_tcache_dalloc = true;
arena = &dummy_arena;
}
if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && in_valgrind))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && in_valgrind)
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
PROF_ALLOC_PREP(usize, cnt);
p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
try_tcache_alloc, try_tcache_dalloc, arena, cnt);
if (p == NULL)
goto label_oom;
} else {
p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
try_tcache_dalloc, arena);
if (p == NULL)
goto label_oom;
if (config_stats || (config_valgrind && in_valgrind))
usize = isalloc(p, config_prof);
}
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_usize;
}
UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
old_rzsize, false, zero);
return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
UTRACE(ptr, size, 0);
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, bool zero, arena_t *arena)
{
size_t usize;
if (ixalloc(ptr, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
return (usize);
}
static size_t
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, size_t max_usize, bool zero, arena_t *arena,
prof_thr_cnt_t *cnt)
{
size_t usize;
if (cnt == NULL)
return (old_usize);
/* Use minimum usize to determine whether promotion may happen. */
if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
SMALL_MAXCLASS) {
if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
if (max_usize < PAGE)
arena_prof_promoted(ptr, usize);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero, arena);
}
return (usize);
}
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, size_t max_usize, bool zero, arena_t *arena,
prof_thr_cnt_t *cnt)
{
size_t usize;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(ptr);
if ((uintptr_t)cnt != (uintptr_t)1U) {
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
alignment, zero, max_usize, arena, cnt);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero, arena);
}
if (usize == old_usize)
return (usize);
prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
return (usize);
}
size_t
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
pool_t *pool = &base_pool;
arena_t dummy_arena;
DUMMY_ARENA_INITIALIZE(dummy_arena, pool);
arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return (0);
if (arena_ind != UINT_MAX)
arena = pool->arenas[arena_ind];
else
arena = &dummy_arena;
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && in_valgrind)
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
/*
* usize isn't knowable before ixalloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
* decide whether to sample.
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment);
PROF_ALLOC_PREP(max_usize, cnt);
usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
max_usize, zero, arena, cnt);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero, arena);
}
if (usize == old_usize)
goto label_not_resized;
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_usize;
}
JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
return (usize);
}
size_t
je_sallocx(const void *ptr, int flags)
{
size_t usize;
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return (0);
if (config_ivsalloc)
usize = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
usize = isalloc(ptr, config_prof);
}
return (usize);
}
void
je_dallocx(void *ptr, int flags)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
pool_t *pool = &base_pool;
bool try_tcache;
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
try_tcache = (chunk == ptr || chunk->arena !=
pool->arenas[arena_ind]);
} else
try_tcache = true;
UTRACE(ptr, 0, 0);
if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_prof && opt_prof) {
if (config_stats == false && config_valgrind == false)
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
}
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && in_valgrind)
rzsize = p2rz(ptr);
iqalloct(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
size_t
je_nallocx(size_t size, int flags)
{
size_t usize;
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
assert(size != 0);
if (malloc_init_base_pool())
return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
return (usize);
}
int
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
int
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
return (ctl_nametomib(name, mibp, miblenp));
}
int
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
int
je_navsnprintf(char *str, size_t size, const char *format, va_list ap)
{
return malloc_vsnprintf(str, size, format, ap);
}
void
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
stats_print(&base_pool, write_cb, cbopaque, opts);
}
size_t
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
assert(malloc_initialized || IS_INITIALIZER);
if (malloc_thread_init())
return (0);
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
else
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
return (ret);
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
/*
* If an application creates a thread before doing any allocation in the main
* thread, then calls fork(2) in the main thread followed by memory allocation
* in the child process, a race can occur that results in deadlock within the
* child: the main thread may have forked while the created thread had
* partially initialized the allocator. Ordinarily jemalloc prevents
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
* constructor is a partial solution to this problem. It may still possible to
* trigger the deadlock described above, but doing so would involve forking via
* a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor(102))
void
jemalloc_constructor(void)
{
malloc_init();
}
JEMALLOC_ATTR(destructor(101))
void
jemalloc_destructor(void)
{
if (base_pool_initialized == false)
return;
tcache_thread_cleanup(tcache_tsd_get());
arenas_cleanup(arenas_tsd_get());
je_base_pool_destroy();
}
#define FOREACH_POOL(func) \
do { \
unsigned i; \
for (i = 0; i < npools; i++) { \
if (pools[i] != NULL) \
(func)(pools[i]); \
} \
} while(0)
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
#else
JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
unsigned i, j;
pool_t *pool;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (malloc_initialized == false)
return;
#endif
assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */
ctl_prefork();
prof_prefork();
pool_prefork();
for (i = 0; i < npools; i++) {
pool = pools[i];
if (pool != NULL) {
malloc_rwlock_prefork(&pool->arenas_lock);
for (j = 0; j < pool->narenas_total; j++) {
if (pool->arenas[j] != NULL)
arena_prefork(pool->arenas[j]);
}
}
}
FOREACH_POOL(chunk_prefork0);
FOREACH_POOL(base_prefork);
FOREACH_POOL(chunk_prefork1);
chunk_dss_prefork();
FOREACH_POOL(huge_prefork);
}
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_postfork_parent(void)
#else
JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
unsigned i, j;
pool_t *pool;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (malloc_initialized == false)
return;
#endif
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
FOREACH_POOL(huge_postfork_parent);
chunk_dss_postfork_parent();
FOREACH_POOL(chunk_postfork_parent1);
FOREACH_POOL(base_postfork_parent);
FOREACH_POOL(chunk_postfork_parent0);
for (i = 0; i < npools; i++) {
pool = pools[i];
if (pool != NULL) {
for (j = 0; j < pool->narenas_total; j++) {
if (pool->arenas[j] != NULL)
arena_postfork_parent(pool->arenas[j]);
}
malloc_rwlock_postfork_parent(&pool->arenas_lock);
}
}
pool_postfork_parent();
prof_postfork_parent();
ctl_postfork_parent();
}
void
jemalloc_postfork_child(void)
{
unsigned i, j;
pool_t *pool;
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
FOREACH_POOL(huge_postfork_child);
chunk_dss_postfork_child();
FOREACH_POOL(chunk_postfork_child1);
FOREACH_POOL(base_postfork_child);
FOREACH_POOL(chunk_postfork_child0);
for (i = 0; i < npools; i++) {
pool = pools[i];
if (pool != NULL) {
for (j = 0; j < pool->narenas_total; j++) {
if (pool->arenas[j] != NULL)
arena_postfork_child(pool->arenas[j]);
}
malloc_rwlock_postfork_child(&pool->arenas_lock);
}
}
pool_postfork_child();
prof_postfork_child();
ctl_postfork_child();
}
/******************************************************************************/
/*
* The following functions are used for TLS allocation/deallocation in static
* binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
* is that these avoid accessing TLS variables.
*/
static void *
a0alloc(size_t size, bool zero)
{
if (malloc_init_base_pool())
return (NULL);
if (size == 0)
size = 1;
if (size <= arena_maxclass)
return (arena_malloc(base_pool.arenas[0], size, zero, false));
else
return (huge_malloc(NULL, size, zero));
}
void *
a0malloc(size_t size)
{
return (a0alloc(size, false));
}
void *
a0calloc(size_t num, size_t size)
{
return (a0alloc(num * size, true));
}
void
a0free(void *ptr)
{
arena_chunk_t *chunk;
if (ptr == NULL)
return;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, false);
else
huge_dalloc(&base_pool, ptr);
}
/******************************************************************************/
| 80,508 | 23.227806 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/rtree.c
|
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc,
pool_t *pool)
{
rtree_t *ret;
unsigned bits_per_level, bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
if (bits > bits_in_leaf) {
height = 1 + (bits - bits_in_leaf) / bits_per_level;
if ((height-1) * bits_per_level + bits_in_leaf != bits)
height++;
} else {
height = 1;
}
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
ret = (rtree_t*)alloc(pool, offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
ret->alloc = alloc;
ret->dalloc = dalloc;
ret->pool = pool;
if (malloc_mutex_init(&ret->mutex)) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
ret->height = height;
if (height > 1) {
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
ret->level2bits[0] = (bits - bits_in_leaf) %
bits_per_level;
} else
ret->level2bits[0] = bits_per_level;
for (i = 1; i < height-1; i++)
ret->level2bits[i] = bits_per_level;
ret->level2bits[height-1] = bits_in_leaf;
} else
ret->level2bits[0] = bits;
ret->root = (void**)alloc(pool, sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
return (ret);
}
static void
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
{
if (level < rtree->height - 1) {
size_t nchildren, i;
nchildren = ZU(1) << rtree->level2bits[level];
for (i = 0; i < nchildren; i++) {
void **child = (void **)node[i];
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
}
if (rtree->dalloc)
rtree->dalloc(rtree->pool, node);
}
void
rtree_delete(rtree_t *rtree)
{
rtree_delete_subtree(rtree, rtree->root, 0);
malloc_mutex_destroy(&rtree->mutex);
if (rtree->dalloc)
rtree->dalloc(rtree->pool, rtree);
}
void
rtree_prefork(rtree_t *rtree)
{
malloc_mutex_prefork(&rtree->mutex);
}
void
rtree_postfork_parent(rtree_t *rtree)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
}
| 2,549 | 21.767857 | 81 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/huge.c
|
#define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
void *
huge_malloc(arena_t *arena, size_t size, bool zero)
{
return (huge_palloc(arena, size, chunksize, zero));
}
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
pool_t *pool;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (NULL);
}
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = choose_arena(arena);
if (arena == NULL)
return (NULL);
pool = arena->pool;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc(pool);
if (node == NULL)
return (NULL);
ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
if (ret == NULL) {
base_node_dalloc(pool, node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
node->arena = arena;
malloc_mutex_lock(&pool->huge_mtx);
extent_tree_ad_insert(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
return (ret);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static bool
huge_ralloc_no_move_expand(pool_t *pool, char *ptr, size_t oldsize, size_t size, bool zero) {
size_t csize;
void *expand_addr;
size_t expand_size;
extent_node_t *node, key;
arena_t *arena;
bool is_zeroed;
void *ret;
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (true);
}
expand_addr = ptr + oldsize;
expand_size = csize - oldsize;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Find the current arena. */
arena = node->arena;
malloc_mutex_unlock(&pool->huge_mtx);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
&is_zeroed);
if (ret == NULL)
return (true);
assert(ret == expand_addr);
malloc_mutex_lock(&pool->huge_mtx);
/* Update the size of the huge allocation. */
node->size = csize;
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && !zero) {
if (unlikely(opt_junk))
memset(expand_addr, 0xa5, expand_size);
else if (unlikely(opt_zero) && !is_zeroed)
memset(expand_addr, 0, expand_size);
}
return (false);
}
bool
huge_ralloc_no_move(pool_t *pool, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
/* Both allocations must be huge to avoid a move. */
if (oldsize <= arena_maxclass)
return (true);
assert(CHUNK_CEILING(oldsize) == oldsize);
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
return (false);
}
/* Overflow. */
if (CHUNK_CEILING(size) == 0)
return (true);
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) {
extent_node_t *node, key;
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Update the size of the huge allocation. */
node->size = CHUNK_CEILING(size);
malloc_mutex_unlock(&pool->huge_mtx);
excess_addr = (char *)node->addr + CHUNK_CEILING(size);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size);
/* Zap the excess chunks. */
huge_dalloc_junk(excess_addr, excess_size);
arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size);
return (false);
}
/* Attempt to expand the allocation in-place. */
if (huge_ralloc_no_move_expand(pool, ptr, oldsize, size + extra, zero)) {
if (extra == 0)
return (true);
/* Try again, this time without extra. */
return (huge_ralloc_no_move_expand(pool, ptr, oldsize, size, zero));
}
return (false);
}
void *
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
if (huge_ralloc_no_move(arena->pool, ptr, oldsize, size, extra, zero) == false)
return (ptr);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(arena, size + extra, alignment, zero);
else
ret = huge_malloc(arena, size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(arena, size, alignment, zero);
else
ret = huge_malloc(arena, size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (ret);
}
void
huge_dalloc(pool_t *pool, void *ptr)
{
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
base_node_dalloc(pool, node);
}
size_t
huge_salloc(const void *ptr)
{
size_t ret = 0;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != 0)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
size_t
huge_pool_salloc(pool_t *pool, const void *ptr)
{
size_t ret = 0;
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
return (ret);
}
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret = NULL;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->prof_ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
void
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
extent_node_t *node, key;
size_t i;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
node->prof_ctx = ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (node != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
}
/*
* Called at each pool opening.
*/
bool
huge_boot(pool_t *pool)
{
if (malloc_mutex_init(&pool->huge_mtx))
return (true);
return (false);
}
/*
* Called only at pool creation.
*/
bool
huge_init(pool_t *pool)
{
if (huge_boot(pool))
return (true);
/* Initialize chunks data. */
extent_tree_ad_new(&pool->huge);
return (false);
}
void
huge_prefork(pool_t *pool)
{
malloc_mutex_prefork(&pool->huge_mtx);
}
void
huge_postfork_parent(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->huge_mtx);
}
void
huge_postfork_child(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->huge_mtx);
}
| 9,358 | 21.021176 | 93 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/tsd.c
|
#define JEMALLOC_TSD_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
/******************************************************************************/
void *
malloc_tsd_malloc(size_t size)
{
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
return (arena_malloc(base_pool.arenas[0], size, false, false));
}
void
malloc_tsd_dalloc(void *wrapper)
{
idalloct(wrapper, false);
}
void
malloc_tsd_no_cleanup(void *arg)
{
not_reached();
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
void
_malloc_thread_cleanup(void)
{
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
for (i = 0; i < ncleanups; i++)
pending[i] = true;
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
if (pending[i])
again = true;
}
}
} while (again);
}
#endif
void
malloc_tsd_cleanup_register(bool (*f)(void))
{
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
void
malloc_tsd_boot(void)
{
ncleanups = 0;
}
#ifdef _WIN32
static BOOL WINAPI
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
isthreaded = true;
break;
#endif
case DLL_THREAD_DETACH:
_malloc_thread_cleanup();
break;
default:
break;
}
return (true);
}
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
{
pthread_t self = pthread_self();
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(&head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(&head->lock);
return (iter->data);
}
}
/* Insert block into list. */
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(&head->lock);
return (NULL);
}
void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
malloc_mutex_lock(&head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(&head->lock);
}
#endif
| 2,824 | 19.035461 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/mb.c
|
#define JEMALLOC_MB_C_
#include "jemalloc/internal/jemalloc_internal.h"
| 72 | 23.333333 | 48 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/hash.c
|
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_internal.h"
| 74 | 24 | 48 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/tcache.c
|
#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#define ARR_INITIALIZER JEMALLOC_ARG_CONCAT({0})
malloc_tsd_data(, tcache, tsd_tcache_t, TSD_TCACHE_INITIALIZER)
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
size_t tcache_maxclass;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
{
return (arena_salloc(ptr, false));
}
void
tcache_event_hard(tcache_t *tcache)
{
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < NBINS);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
arena_mapp_get(chunk, pageind);
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true);
}
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
} else {
/*
* This object was allocated via a different
* arena bin than the one that is currently
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &tcache->arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large_locked(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
tcache->arena = arena;
}
void
tcache_arena_dissociate(tcache_t *tcache)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
tcache_stats_merge(tcache, tcache->arena);
malloc_mutex_unlock(&tcache->arena->lock);
}
}
tcache_t *
tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
if (tcache == NULL) {
if (create == false) {
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(&dummy)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tsd_tcache_t *tsd = tcache_tsd_get();
tcache = TCACHE_STATE_REINCARNATED;
tsd->seqno[pool->pool_id] = pool->seqno;
tsd->tcaches[pool->pool_id] = tcache;
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
return (NULL);
}
tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
unsigned i;
tsd_tcache_t *tsd = tcache_tsd_get();
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
*
* That this works relies on the same logic as in ipalloc(), but we
* cannot directly call ipalloc() here due to tcache bootstrapping
* issues.
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
if (size <= SMALL_MAXCLASS)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
tsd->seqno[arena->pool->pool_id] = arena->pool->seqno;
tsd->tcaches[arena->pool->pool_id] = tcache;
return (tcache);
}
void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
size_t tcache_size;
tcache_arena_dissociate(tcache);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
arena_dalloc_large(arena, chunk, tcache);
} else
idalloct(tcache, false);
}
bool
tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len)
{
if (len == UINT_MAX)
return (true);
assert(len < POOLS_MAX);
/* round up the new length to the nearest power of 2... */
size_t npools = 1ULL << (32 - __builtin_clz(len + 1));
/* ... but not less than */
if (npools < POOLS_MIN)
npools = POOLS_MIN;
unsigned *tseqno = base_malloc_fn(npools * sizeof (unsigned));
if (tseqno == NULL)
return (true);
if (tsd->seqno != NULL)
memcpy(tseqno, tsd->seqno, tsd->npools * sizeof (unsigned));
memset(&tseqno[tsd->npools], 0, (npools - tsd->npools) * sizeof (unsigned));
tcache_t **tcaches = base_malloc_fn(npools * sizeof (tcache_t *));
if (tcaches == NULL) {
base_free_fn(tseqno);
return (true);
}
if (tsd->tcaches != NULL)
memcpy(tcaches, tsd->tcaches, tsd->npools * sizeof (tcache_t *));
memset(&tcaches[tsd->npools], 0, (npools - tsd->npools) * sizeof (tcache_t *));
base_free_fn(tsd->seqno);
tsd->seqno = tseqno;
base_free_fn(tsd->tcaches);
tsd->tcaches = tcaches;
tsd->npools = npools;
return (false);
}
void
tcache_thread_cleanup(void *arg)
{
int i;
tsd_tcache_t *tsd_array = arg;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd_array->npools; ++i) {
tcache_t *tcache = tsd_array->tcaches[i];
if (tcache != NULL) {
if (tcache == TCACHE_STATE_DISABLED) {
/* Do nothing. */
} else if (tcache == TCACHE_STATE_REINCARNATED) {
/*
* Another destructor called an allocator function after this
* destructor was called. Reset tcache to
* TCACHE_STATE_PURGATORY in order to receive another callback.
*/
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
} else if (tcache == TCACHE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
* cause re-creation of the tcache. This time, do nothing, so
* that the destructor will not be called again.
*/
} else if (tcache != NULL) {
assert(tcache != TCACHE_STATE_PURGATORY);
if (pools[i] != NULL && tsd_array->seqno[i] == pools[i]->seqno)
tcache_destroy(tcache);
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
}
}
}
base_free_fn(tsd_array->seqno);
base_free_fn(tsd_array->tcaches);
tsd_array->npools = 0;
malloc_mutex_unlock(&pools_lock);
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
bool
tcache_boot0(void)
{
unsigned i;
/* Array still initialized */
if (tcache_bin_info != NULL)
return (false);
/*
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1ULL << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1ULL << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1ULL << opt_lg_tcache_max);
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(&base_pool,
nhbins * sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
return (false);
}
bool
tcache_boot1(void)
{
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
return (true);
return (false);
}
| 15,882 | 26.057922 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk.c
|
#define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void chunk_dalloc_core(pool_t *pool, void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
bool zeroed;
if (base) {
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return (NULL);
}
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
key.addr = new_addr;
key.size = alloc_size;
malloc_mutex_lock(&pool->chunks_mtx);
node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL || (new_addr && node->addr != new_addr)) {
malloc_mutex_unlock(&pool->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
zeroed = node->zeroed;
if (zeroed)
*zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock(&pool->chunks_mtx);
node = base_node_alloc(pool);
if (node == NULL) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
malloc_mutex_lock(&pool->chunks_mtx);
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&pool->chunks_mtx);
if (node != NULL)
base_node_dalloc(pool, node);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
return (ret);
}
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
static void *
chunk_alloc_core(pool_t *pool, void *new_addr, size_t size, size_t alignment,
bool base, bool *zero, dss_prec_t dss_prec)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL
&& (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* All strategies for allocation failed. */
return (NULL);
}
static bool
chunk_register(pool_t *pool, void *chunk, size_t size, bool base)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
if (config_ivsalloc && base == false) {
if (rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 1))
return (true);
}
if (config_stats || config_prof) {
bool gdump;
malloc_mutex_lock(&pool->chunks_mtx);
if (config_stats)
pool->stats_chunks.nchunks += (size / chunksize);
pool->stats_chunks.curchunks += (size / chunksize);
if (pool->stats_chunks.curchunks > pool->stats_chunks.highchunks) {
pool->stats_chunks.highchunks =
pool->stats_chunks.curchunks;
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&pool->chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
return (false);
}
void *
chunk_alloc_base(pool_t *pool, size_t size)
{
void *ret;
bool zero;
zero = false;
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
ret = chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, NULL, size,
chunksize, false, &zero);
} else {
ret = chunk_alloc_core(pool, NULL, size, chunksize, true, &zero,
chunk_dss_prec_get());
}
if (ret == NULL)
return (NULL);
if (chunk_register(pool, ret, size, true)) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
return (ret);
}
void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero,
arena->ind, arena->pool);
if (ret != NULL && chunk_register(arena->pool, ret, size, false)) {
chunk_dalloc(ret, size, arena->ind, arena->pool);
ret = NULL;
}
return (ret);
}
/* Default arena chunk allocation routine in the absence of user override. */
void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind, pool_t *pool)
{
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
return (chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, new_addr, size,
alignment, false, zero));
} else {
malloc_rwlock_rdlock(&pool->arenas_lock);
dss_prec_t dss_prec = pool->arenas[arena_ind]->dss_prec;
malloc_rwlock_unlock(&pool->arenas_lock);
return (chunk_alloc_core(pool, new_addr, size, alignment,
false, zero, dss_prec));
}
}
void
chunk_record(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size, bool zeroed)
{
bool unzeroed, file_mapped;
extent_node_t *xnode, *node, *prev, *xprev, key;
file_mapped = pool_is_file_mapped(pool);
unzeroed = pages_purge(chunk, size, file_mapped);
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* If pages_purge() returned that the pages were zeroed
* as a side effect of purging we can safely do this assignment.
*/
if (zeroed == false && unzeroed == false) {
zeroed = true;
}
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc(pool);
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&pool->chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
node->zeroed = (node->zeroed && zeroed);
extent_tree_szad_insert(chunks_szad, node);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto label_return;
}
node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = zeroed;
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
xprev = prev;
}
label_return:
malloc_mutex_unlock(&pool->chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dalloc(pool, xnode);
if (xprev != NULL)
base_node_dalloc(pool, xprev);
}
void
chunk_unmap(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (have_dss && chunk_in_dss(chunk))
chunk_record(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss, chunk, size, false);
else if (chunk_dalloc_mmap(chunk, size))
chunk_record(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap, chunk, size, false);
}
static void
chunk_dalloc_core(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) {
malloc_mutex_lock(&pool->chunks_mtx);
assert(pool->stats_chunks.curchunks >= (size / chunksize));
pool->stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&pool->chunks_mtx);
}
chunk_unmap(pool, chunk, size);
}
/* Default arena chunk deallocation routine in the absence of user override. */
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool)
{
chunk_dalloc_core(pool, chunk, size);
return (false);
}
bool
chunk_global_boot()
{
if (have_dss && chunk_dss_boot())
return (true);
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
return (false);
}
/*
* Called at each pool opening.
*/
bool
chunk_boot(pool_t *pool)
{
if (config_stats || config_prof) {
if (malloc_mutex_init(&pool->chunks_mtx))
return (true);
}
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
if (malloc_mutex_init(&rtree->mutex))
return (true);
}
return (false);
}
/*
* Called only at pool creation.
*/
bool
chunk_init(pool_t *pool)
{
if (chunk_boot(pool))
return (true);
if (config_stats || config_prof)
memset(&pool->stats_chunks, 0, sizeof(chunk_stats_t));
extent_tree_szad_new(&pool->chunks_szad_mmap);
extent_tree_ad_new(&pool->chunks_ad_mmap);
extent_tree_szad_new(&pool->chunks_szad_dss);
extent_tree_ad_new(&pool->chunks_ad_dss);
if (config_ivsalloc) {
pool->chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, base_alloc, NULL, pool);
if (pool->chunks_rtree == NULL)
return (true);
}
return (false);
}
void
chunk_prefork0(pool_t *pool)
{
if (config_ivsalloc)
rtree_prefork(pool->chunks_rtree);
}
void
chunk_prefork1(pool_t *pool)
{
malloc_mutex_prefork(&pool->chunks_mtx);
}
void
chunk_postfork_parent0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_parent(pool->chunks_rtree);
}
void
chunk_postfork_parent1(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->chunks_mtx);
}
void
chunk_postfork_child0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_child(pool->chunks_rtree);
}
void
chunk_postfork_child1(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->chunks_mtx);
}
| 13,794 | 26.371032 | 93 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk_mmap.c
|
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
bool *zero);
/******************************************************************************/
static void *
pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
abort();
}
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
}
static void
pages_unmap(void *addr, size_t size)
{
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
if (munmap(addr, size) == -1)
#endif
{
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
if (opt_abort)
abort();
}
}
static void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
pages_unmap(new_addr, size);
return (NULL);
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0)
pages_unmap(addr, leadsize);
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
return (ret);
}
#endif
}
bool
pages_purge(void *addr, size_t length, bool file_mapped)
{
bool unzeroed;
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#elif defined(JEMALLOC_HAVE_MADVISE)
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || file_mapped || err != 0);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#else
/* Last resort no-op. */
unzeroed = true;
#endif
return (unzeroed);
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
}
assert(ret != NULL);
*zero = true;
return (ret);
}
bool
chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
return (config_munmap == false);
}
| 5,078 | 22.733645 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/quarantine.c
|
#define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/*
* quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
/* Data. */
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
quarantine->curobjs = 0;
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
quarantine_tsd_set(&quarantine);
return (quarantine);
}
static quarantine_t *
quarantine_grow(quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(quarantine);
return (quarantine);
}
ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
quarantine->lg_maxobjs)) {
/* objs ring buffer data are contiguous. */
memcpy(ret->objs, &quarantine->objs[quarantine->first],
quarantine->curobjs * sizeof(quarantine_obj_t));
} else {
/* objs ring buffer data wrap around. */
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
quarantine->first;
size_t ncopy_b = quarantine->curobjs - ncopy_a;
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
* sizeof(quarantine_obj_t));
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloc(quarantine);
return (ret);
}
static void
quarantine_drain_one(quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloc(obj->ptr);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1);
}
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(quarantine);
}
void
quarantine(void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* Make a note that quarantine() was called after
* quarantine_cleanup() was called.
*/
quarantine = QUARANTINE_STATE_REINCARNATED;
quarantine_tsd_set(&quarantine);
}
idalloc(ptr);
return;
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
*/
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
quarantine = quarantine_grow(quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
if (quarantine->curbytes + usize <= opt_quarantine) {
size_t offset = (quarantine->first + quarantine->curobjs) &
((ZU(1) << quarantine->lg_maxobjs) - 1);
quarantine_obj_t *obj = &quarantine->objs[offset];
obj->ptr = ptr;
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
if (config_fill && opt_junk) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if ((config_valgrind == false || in_valgrind == false)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
/*
* Another destructor deallocated memory after this destructor
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
* in order to receive another callback.
*/
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to QUARANTINE_STATE_PURGATORY so that other destructors
* wouldn't cause re-creation of the quarantine. This time, do
* nothing, so that the destructor will not be called again.
*/
} else if (quarantine != NULL) {
quarantine_drain(quarantine, 0);
idalloc(quarantine);
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
}
}
bool
quarantine_boot(void)
{
cassert(config_fill);
if (quarantine_tsd_boot())
return (true);
return (false);
}
| 5,792 | 27.965 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/mutex.c
|
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
#include <dlfcn.h>
#endif
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_once(void)
{
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
isthreaded = true;
}
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
pthread_once(&once_control, pthread_create_once);
return (pthread_create_fptr(thread, attr, start_routine, arg));
}
#endif
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
static void *
base_calloc_wrapper(size_t number, size_t size)
{
return base_calloc(&base_pool, number, size);
}
/* XXX We need somewhere to allocate mutexes from during early initialization */
#define BOOTSTRAP_POOL_SIZE 4096
#define BP_MASK 0xfffffffffffffff0UL
static char bootstrap_pool[BOOTSTRAP_POOL_SIZE] __attribute__((aligned (16)));
static char *bpp = bootstrap_pool;
static void *
bootstrap_calloc(size_t number, size_t size)
{
size_t my_size = ((number * size) + 0xf) & BP_MASK;
bpp += my_size;
if ((bpp - bootstrap_pool) > BOOTSTRAP_POOL_SIZE) {
return NULL;
}
return (void *)(bpp - my_size);
}
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
base_calloc_wrapper) != 0)
return (true);
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
}
bool
mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return (false);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
void
malloc_rwlock_prefork(malloc_rwlock_t *rwlock)
{
malloc_rwlock_wrlock(rwlock);
}
void
malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock)
{
malloc_rwlock_unlock(rwlock);
}
void
malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_rwlock_unlock(rwlock);
#else
if (malloc_rwlock_init(rwlock)) {
malloc_printf("<jemalloc>: Error re-initializing rwlock in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
| 4,488 | 21.445 | 83 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/arena.c
|
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
arena_bin_info_t arena_bin_info[NBINS];
JEMALLOC_ALIGNED(CACHELINE)
const uint32_t small_bin2size_tab[NBINS] = {
#define B2S_bin_yes(size) \
size,
#define B2S_bin_no(size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
B2S_bin_##bin((ZU(1)<<(lg_grp)) + (ZU(ndelta)<<(lg_delta)))
SIZE_CLASSES
#undef B2S_bin_yes
#undef B2S_bin_no
#undef SC
};
JEMALLOC_ALIGNED(CACHELINE)
const uint8_t small_size2bin_tab[] = {
#define S2B_3(i) i,
#define S2B_4(i) S2B_3(i) S2B_3(i)
#define S2B_5(i) S2B_4(i) S2B_4(i)
#define S2B_6(i) S2B_5(i) S2B_5(i)
#define S2B_7(i) S2B_6(i) S2B_6(i)
#define S2B_8(i) S2B_7(i) S2B_7(i)
#define S2B_9(i) S2B_8(i) S2B_8(i)
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
#undef S2B_4
#undef S2B_5
#undef S2B_6
#undef S2B_7
#undef S2B_8
#undef S2B_9
#undef S2B_no
#undef SC
};
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
/******************************************************************************/
JEMALLOC_INLINE_C size_t
arena_mapelm_to_bits(arena_chunk_map_t *mapelm)
{
return (mapelm->bits);
}
static inline int
arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
{
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
u.rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
{
int ret;
size_t a_size;
size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK;
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
if (a_mapelm & CHUNK_MAP_KEY)
a_size = a_mapelm & ~PAGE_MASK;
else
a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0 && (!(a_mapelm & CHUNK_MAP_KEY)))
ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
u.rb_link, arena_avail_comp)
arena_chunk_map_t *
arena_runs_avail_tree_iter(arena_t *arena, arena_chunk_map_t *(*cb)
(arena_avail_tree_t *, arena_chunk_map_t *, void *), void *arg)
{
return arena_avail_tree_iter(&arena->runs_avail, NULL, cb, arg);
}
static inline int
arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
{
assert(a != NULL);
assert(b != NULL);
/*
* Short-circuit for self comparison. The following comparison code
* would come to the same result, but at the cost of executing the slow
* path.
*/
if (a == b)
return (0);
/*
* Order such that chunks with higher fragmentation are "less than"
* those with lower fragmentation -- purging order is from "least" to
* "greatest". Fragmentation is measured as:
*
* mean current avail run size
* --------------------------------
* mean defragmented avail run size
*
* navail
* -----------
* nruns_avail nruns_avail-nruns_adjac
* = ========================= = -----------------------
* navail nruns_avail
* -----------------------
* nruns_avail-nruns_adjac
*
* The following code multiplies away the denominator prior to
* comparison, in order to avoid division.
*
*/
{
size_t a_val = (a->nruns_avail - a->nruns_adjac) *
b->nruns_avail;
size_t b_val = (b->nruns_avail - b->nruns_adjac) *
a->nruns_avail;
if (a_val < b_val)
return (1);
if (a_val > b_val)
return (-1);
}
/*
* Break ties by chunk address. For fragmented chunks, report lower
* addresses as "lower", so that fragmentation reduction happens first
* at lower addresses. However, use the opposite ordering for
* unfragmented chunks, in order to increase the chances of
* re-allocating dirty runs.
*/
{
uintptr_t a_chunk = (uintptr_t)a;
uintptr_t b_chunk = (uintptr_t)b;
int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
if (a->nruns_adjac == 0) {
assert(b->nruns_adjac == 0);
ret = -ret;
}
return (ret);
}
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
dirty_link, arena_chunk_dirty_comp)
static inline bool
arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
{
bool ret;
if (pageind-1 < map_bias)
ret = false;
else {
ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
assert(ret == false || arena_mapbits_dirty_get(chunk,
pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
}
return (ret);
}
static inline bool
arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
{
bool ret;
if (pageind+npages == chunk_npages)
ret = false;
else {
assert(pageind+npages < chunk_npages);
ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
!= arena_mapbits_dirty_get(chunk, pageind+npages));
}
return (ret);
}
static inline bool
arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
{
return (arena_avail_adjac_pred(chunk, pageind) ||
arena_avail_adjac_succ(chunk, pageind, npages));
}
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
{
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
/*
* chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
* removed and reinserted even if the run to be inserted is clean.
*/
if (chunk->ndirty != 0)
arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
chunk->nruns_adjac++;
if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
chunk->nruns_adjac++;
chunk->nruns_avail++;
assert(chunk->nruns_avail > chunk->nruns_adjac);
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
arena->ndirty += npages;
chunk->ndirty += npages;
}
if (chunk->ndirty != 0)
arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
pageind));
}
static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
{
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
/*
* chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
* removed and reinserted even if the run to be removed is clean.
*/
if (chunk->ndirty != 0)
arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
chunk->nruns_adjac--;
if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
chunk->nruns_adjac--;
chunk->nruns_avail--;
assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
== 0 && chunk->nruns_adjac == 0));
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
arena->ndirty -= npages;
chunk->ndirty -= npages;
}
if (chunk->ndirty != 0)
arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
pageind));
}
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
unsigned regind;
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
assert(run->nfree > 0);
assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
if (regind == run->nextind)
run->nextind++;
assert(regind < run->nextind);
return (ret);
}
static inline void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) %
(uintptr_t)bin_info->reg_interval == 0);
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
run->nfree++;
}
static inline void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
static inline void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
<< LG_PAGE)), PAGE);
}
static inline void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
static void
arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
if (config_stats) {
ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
sub_pages) << LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(arena->pool, cactive_diff);
}
}
static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
size_t flag_dirty, size_t need_pages)
{
size_t total_pages, rem_pages;
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
flag_dirty);
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
if (flag_dirty != 0) {
arena_mapbits_unallocated_set(chunk,
run_ind+need_pages, (rem_pages << LG_PAGE),
flag_dirty);
arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE),
flag_dirty);
} else {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE),
arena_mapbits_unzeroed_get(chunk,
run_ind+need_pages));
arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE),
arena_mapbits_unzeroed_get(chunk,
run_ind+total_pages-1));
}
arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
false, true);
}
}
static void
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
bool remove, bool zero)
{
arena_chunk_t *chunk;
size_t flag_dirty, run_ind, need_pages, i;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
if (remove) {
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
need_pages);
}
if (zero) {
if (flag_dirty == 0) {
/*
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
*/
for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
!= 0)
arena_run_zero(chunk, run_ind+i, 1);
else if (config_debug) {
arena_run_page_validate_zeroed(chunk,
run_ind+i);
} else {
arena_run_page_mark_zeroed(chunk,
run_ind+i);
}
}
} else {
/* The run is dirty, so all pages must be zeroed. */
arena_run_zero(chunk, run_ind, need_pages);
}
} else {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
/*
* Set the last element first, in case the run only contains one page
* (i.e. both statements set the same element).
*/
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
}
static void
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
arena_run_split_large_helper(arena, run, size, true, zero);
}
static void
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
arena_run_split_large_helper(arena, run, size, false, zero);
}
static void
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
size_t binind)
{
arena_chunk_t *chunk;
size_t flag_dirty, run_ind, need_pages, i;
assert(binind != BININD_INVALID);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
/*
* Propagate the dirty and unzeroed flags to the allocated small run,
* so that arena_dalloc_bin_run() has the ability to conditionally trim
* clean pages.
*/
arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not actually
* cause an observable failure.
*/
if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
run_ind) == 0)
arena_run_page_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) {
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i);
}
arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
binind, flag_dirty);
if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
run_ind+need_pages-1) == 0)
arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
arena_chunk_init_spare(arena_t *arena)
{
arena_chunk_t *chunk;
assert(arena->spare != NULL);
chunk = arena->spare;
arena->spare = NULL;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
return (chunk);
}
static arena_chunk_t *
arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
bool *zero)
{
arena_chunk_t *chunk;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
arena, NULL, size, alignment, zero);
malloc_mutex_lock(&arena->lock);
if (config_stats && chunk != NULL)
arena->stats.mapped += chunksize;
return (chunk);
}
void *
arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena->stats.mapped += size;
arena->stats.allocated_huge += size;
arena->stats.nmalloc_huge++;
arena->stats.nrequests_huge++;
}
arena->nactive += (size >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena, new_addr, size,
alignment, zero);
if (config_stats) {
if (ret != NULL)
stats_cactive_add(arena->pool, size);
else {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
arena->stats.mapped -= size;
arena->stats.allocated_huge -= size;
arena->stats.nmalloc_huge--;
malloc_mutex_unlock(&arena->lock);
}
}
return (ret);
}
static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
bool zero;
size_t unzeroed, i;
assert(arena->spare == NULL);
zero = false;
chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
if (chunk == NULL)
return (NULL);
chunk->arena = arena;
/*
* Claim that no pages are in use, since the header is merely overhead.
*/
chunk->ndirty = 0;
chunk->nruns_avail = 0;
chunk->nruns_adjac = 0;
/*
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
*/
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
unzeroed);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/
if (zero == false) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
(uintptr_t)arena_mapp_get(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
}
}
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
unzeroed);
return (chunk);
}
static arena_chunk_t *
arena_chunk_alloc(arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
chunk = arena_chunk_init_hard(arena);
if (chunk == NULL)
return (NULL);
}
/* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
false, false);
return (chunk);
}
static void
arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
{
chunk_dalloc_t *chunk_dalloc;
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk_dalloc((void *)chunk, chunksize, arena->ind, arena->pool);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.mapped -= chunksize;
}
void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
{
chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
arena->stats.mapped -= size;
arena->stats.allocated_huge -= size;
arena->stats.ndalloc_huge++;
stats_cactive_sub(arena->pool, size);
}
arena->nactive -= (size >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, size, arena->ind, arena->pool);
}
static void
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
false, false);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
arena_chunk_dalloc_internal(arena, spare);
} else
arena->spare = chunk;
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
arena_run_split_large(arena, run, size, zero);
return (run);
}
return (NULL);
}
static arena_run_t *
arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_large_helper(arena, size, zero);
if (run != NULL)
return (run);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
arena_run_split_large(arena, run, size, zero);
return (run);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_large_helper(arena, size, zero));
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
arena_run_split_small(arena, run, size, binind);
return (run);
}
return (NULL);
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_small_helper(arena, size, binind);
if (run != NULL)
return (run);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
arena_run_split_small(arena, run, size, binind);
return (run);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_small_helper(arena, size, binind));
}
static inline void
arena_maybe_purge(arena_t *arena)
{
size_t npurgeable, threshold;
/* Don't purge if the option is disabled. */
if (opt_lg_dirty_mult < 0)
return;
/* Don't purge if all dirty pages are already being purged. */
if (arena->ndirty <= arena->npurgatory)
return;
npurgeable = arena->ndirty - arena->npurgatory;
threshold = (arena->nactive >> opt_lg_dirty_mult);
/*
* Don't purge unless the number of purgeable pages exceeds the
* threshold.
*/
if (npurgeable <= threshold)
return;
arena_purge(arena, false);
}
static arena_chunk_t *
chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
{
size_t *ndirty = (size_t *)arg;
assert(chunk->ndirty != 0);
*ndirty += chunk->ndirty;
return (NULL);
}
static size_t
arena_compute_npurgatory(arena_t *arena, bool all)
{
size_t npurgatory, npurgeable;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
npurgeable = arena->ndirty - arena->npurgatory;
if (all == false) {
size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
npurgatory = npurgeable - threshold;
} else
npurgatory = npurgeable;
return (npurgatory);
}
static void
arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
arena_chunk_mapelms_t *mapelms)
{
size_t pageind, npages;
/*
* Temporarily allocate free dirty runs within chunk. If all is false,
* only operate on dirty runs that are fragments; otherwise operate on
* all dirty runs.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
(all || arena_avail_adjac(chunk, pageind,
npages))) {
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE));
arena_run_split_large(arena, run, run_size,
false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(mapelms, mapelm, u.ql_link);
}
} else {
/* Skip run. */
if (arena_mapbits_large_get(chunk, pageind) != 0) {
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
size_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE));
assert(arena_mapbits_small_runind_get(chunk,
pageind) == 0);
binind = arena_bin_index(arena, run->bin);
bin_info = &arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
}
}
assert(pageind == chunk_npages);
assert(chunk->ndirty == 0 || all == false);
assert(chunk->nruns_adjac == 0);
}
static size_t
arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_mapelms_t *mapelms)
{
size_t npurged, pageind, npages, nmadvise;
arena_chunk_map_t *mapelm;
malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
npurged = 0;
ql_foreach(mapelm, mapelms, u.ql_link) {
bool unzeroed, file_mapped;
size_t flag_unzeroed, i;
pageind = arena_mapelm_to_pageind(mapelm);
npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE;
assert(pageind + npages <= chunk_npages);
file_mapped = pool_is_file_mapped(arena->pool);
unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
LG_PAGE)), (npages << LG_PAGE), file_mapped);
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
/*
* Set the unzeroed flag for all pages, now that pages_purge()
* has returned whether the pages were zeroed as a side effect
* of purging. This chunk map modification is safe even though
* the arena mutex isn't currently owned by this thread,
* because the run is marked as allocated, thus protecting it
* from being modified by any other thread. As long as these
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for (i = 0; i < npages; i++) {
arena_mapbits_unzeroed_set(chunk, pageind+i,
flag_unzeroed);
}
npurged += npages;
if (config_stats)
nmadvise++;
}
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.nmadvise += nmadvise;
return (npurged);
}
static void
arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_mapelms_t *mapelms)
{
arena_chunk_map_t *mapelm;
size_t pageind;
/* Deallocate runs. */
for (mapelm = ql_first(mapelms); mapelm != NULL;
mapelm = ql_first(mapelms)) {
arena_run_t *run;
pageind = arena_mapelm_to_pageind(mapelm);
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
LG_PAGE));
ql_remove(mapelms, mapelm, u.ql_link);
arena_run_dalloc(arena, run, false, true);
}
}
static inline size_t
arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
size_t npurged;
arena_chunk_mapelms_t mapelms;
ql_new(&mapelms);
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
*/
if (chunk == arena->spare) {
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
arena_chunk_alloc(arena);
}
if (config_stats)
arena->stats.purged += chunk->ndirty;
/*
* Operate on all dirty runs if there is no clean/dirty run
* fragmentation.
*/
if (chunk->nruns_adjac == 0)
all = true;
arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
arena_chunk_unstash_purged(arena, chunk, &mapelms);
return (npurged);
}
static void
arena_purge(arena_t *arena, bool all)
{
arena_chunk_t *chunk;
size_t npurgatory;
if (config_debug) {
size_t ndirty = 0;
arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
chunks_dirty_iter_cb, (void *)&ndirty);
assert(ndirty == arena->ndirty);
}
assert(arena->ndirty > arena->npurgatory || all);
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory) || all);
if (config_stats)
arena->stats.npurge++;
/*
* Add the minimum number of pages this thread should try to purge to
* arena->npurgatory. This will keep multiple threads from racing to
* reduce ndirty below the threshold.
*/
npurgatory = arena_compute_npurgatory(arena, all);
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
size_t npurgeable, npurged, nunpurged;
/* Get next chunk with dirty pages. */
chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
if (chunk == NULL) {
/*
* This thread was unable to purge as many pages as
* originally intended, due to races with other threads
* that either did some of the purging work, or re-used
* dirty pages.
*/
arena->npurgatory -= npurgatory;
return;
}
npurgeable = chunk->ndirty;
assert(npurgeable != 0);
if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
/*
* This thread will purge all the dirty pages in chunk,
* so set npurgatory to reflect this thread's intent to
* purge the pages. This tends to reduce the chances
* of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
* threshold.
* 2) This thread drops arena->lock.
* 3) Another thread causes one or more pages to be
* dirtied, and immediately determines that it must
* purge dirty pages.
*
* If this scenario *does* play out, that's okay,
* because all of the purging work being done really
* needs to happen.
*/
arena->npurgatory += npurgeable - npurgatory;
npurgatory = npurgeable;
}
/*
* Keep track of how many pages are purgeable, versus how many
* actually get purged, and adjust counters accordingly.
*/
arena->npurgatory -= npurgeable;
npurgatory -= npurgeable;
npurged = arena_chunk_purge(arena, chunk, all);
nunpurged = npurgeable - npurged;
arena->npurgatory += nunpurged;
npurgatory += nunpurged;
}
}
void
arena_purge_all(arena_t *arena)
{
malloc_mutex_lock(&arena->lock);
arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
}
static void
arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
{
size_t size = *p_size;
size_t run_ind = *p_run_ind;
size_t run_pages = *p_run_pages;
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages);
size_t nrun_pages = nrun_size >> LG_PAGE;
/*
* Remove successor from runs_avail; the coalesced run is
* inserted later.
*/
assert(arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
false, true);
size += nrun_size;
run_pages += nrun_pages;
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
size);
}
/* Try to coalesce backward. */
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
flag_dirty) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
run_ind -= prun_pages;
/*
* Remove predecessor from runs_avail; the coalesced run is
* inserted later.
*/
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
false);
size += prun_size;
run_pages += prun_pages;
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
size);
}
*p_size = size;
*p_run_ind = run_ind;
*p_run_pages = run_pages;
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
assert(size == PAGE ||
arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
size_t binind = arena_bin_index(arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
size = bin_info->run_size;
}
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
arena_mapbits_unallocated_set(chunk, run_ind, size,
CHUNK_MAP_DIRTY);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
}
arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
flag_dirty);
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
/* Deallocate chunk if it is now completely unused. */
if (size == arena_maxclass) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxclass >> LG_PAGE));
arena_chunk_dalloc(arena, chunk);
}
/*
* It is okay to do dirty page processing here even if the chunk was
* deallocated above, since in that case it is the spare. Waiting
* until after possible chunk deallocation to do dirty processing
* allows for an old spare to be fully deallocated, thus decreasing the
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
arena_maybe_purge(arena);
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
assert(oldsize > newsize);
/*
* Update the chunk map so that arena_run_dalloc() can treat the
* leading run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
if (config_debug) {
UNUSED size_t tail_npages = newsize >> LG_PAGE;
assert(arena_mapbits_large_size_get(chunk,
pageind+head_npages+tail_npages-1) == 0);
assert(arena_mapbits_dirty_get(chunk,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
flag_dirty);
arena_run_dalloc(arena, run, false, false);
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
{
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
size_t head_npages = newsize >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
assert(oldsize > newsize);
/*
* Update the chunk map so that arena_run_dalloc() can treat the
* trailing run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
if (config_debug) {
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
assert(arena_mapbits_large_size_get(chunk,
pageind+head_npages+tail_npages-1) == 0);
assert(arena_mapbits_dirty_get(chunk,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
flag_dirty);
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
dirty, false);
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
arena_run_t *run;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) <<
LG_PAGE));
return (run);
}
return (NULL);
}
static void
arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
arena_run_tree_insert(&bin->runs, mapelm);
}
static void
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
arena_run_tree_remove(&bin->runs, mapelm);
}
static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
arena_run_t *run = arena_bin_runs_first(bin);
if (run != NULL) {
arena_bin_runs_remove(bin, run);
if (config_stats)
bin->stats.reruns++;
}
return (run);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
size_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */
run = arena_bin_nonfull_run_tryget(bin);
if (run != NULL)
return (run);
/* No existing runs have any space available. */
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
bitmap_init(bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
/********************************/
malloc_mutex_lock(&bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
bin->stats.curruns++;
}
return (run);
}
/*
* arena_run_alloc_small() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
run = arena_bin_nonfull_run_tryget(bin);
if (run != NULL)
return (run);
return (NULL);
}
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
void *ret;
size_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
assert(bin->runcur->nfree > 0);
ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
arena_chunk_t *chunk;
/*
* arena_run_alloc_small() may have allocated run, or
* it may have pulled run from the bin's run tree.
* Therefore it is unsafe to make any assumptions about
* how run has previously been used, and
* arena_bin_lower_run() must be called, as if a region
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs)
arena_dalloc_bin_run(arena, chunk, run, bin);
else
arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
}
if (run == NULL)
return (NULL);
bin->runcur = run;
assert(bin->runcur->nfree > 0);
return (arena_run_reg_alloc(bin->runcur, bin_info));
}
void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
arena_run_t *run;
void *ptr;
assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(arena, prof_accumbytes))
prof_idump();
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL)
break;
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
true);
}
tbin->avail[i] = ptr;
}
if (config_stats) {
bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
if (zero) {
size_t redzone_size = bin_info->redzone_size;
memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
redzone_size);
memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
redzone_size);
} else {
memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
"(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
after ? "after" : "before", ptr, usize, byte);
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(arena_redzone_corruption_impl);
#endif
static void
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{
size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size;
size_t i;
bool error = false;
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, false, i, *byte);
if (reset)
*byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, true, i, *byte);
if (reset)
*byte = 0xa5;
}
}
if (opt_abort && error)
abort();
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
{
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
size_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
assert(opt_junk);
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
binind = small_size2bin(usize);
assert(binind < NBINS);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
}
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
void *ret;
arena_bin_t *bin;
arena_run_t *run;
size_t binind;
if (arena == NULL)
return NULL;
binind = small_size2bin(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
size = small_bin2size(binind);
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) {
malloc_mutex_unlock(&bin->lock);
return (NULL);
}
if (config_stats) {
bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
prof_idump();
if (zero == false) {
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
return (ret);
}
void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
UNUSED bool idump;
if (arena == NULL)
return NULL;
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc_large(arena, size, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
if (config_stats) {
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
}
return (ret);
}
/* Only handles large allocations that require more than page alignment. */
void *
arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
size_t alloc_size, leadsize, trailsize;
arena_run_t *run;
arena_chunk_t *chunk;
assert((size & PAGE_MASK) == 0);
alignment = PAGE_CEILING(alignment);
alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
(uintptr_t)run;
assert(alloc_size >= leadsize + size);
trailsize = alloc_size - leadsize - size;
ret = (void *)((uintptr_t)run + leadsize);
if (leadsize != 0) {
arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
leadsize);
}
if (trailsize != 0) {
arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
false);
}
arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
if (config_stats) {
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
malloc_mutex_unlock(&arena->lock);
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
return (ret);
}
void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind, binind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr, false) == PAGE);
assert(isalloc(ptr, true) == PAGE);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = small_size2bin(size);
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
assert(isalloc(ptr, false) == PAGE);
assert(isalloc(ptr, true) == size);
}
static void
arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/* Dissociate run from bin. */
if (run == bin->runcur)
bin->runcur = NULL;
else {
size_t binind = arena_bin_index(chunk->arena, bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (bin_info->nregs != 1) {
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
arena_bin_runs_remove(bin, run);
}
}
}
static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
size_t binind;
arena_bin_info_t *bin_info;
size_t npages, run_ind, past;
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs,
arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
== NULL);
binind = arena_bin_index(chunk->arena, run->bin);
bin_info = &arena_bin_info[binind];
malloc_mutex_unlock(&bin->lock);
/******************************/
npages = bin_info->run_size >> LG_PAGE;
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
past = (size_t)(PAGE_CEILING((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
bin_info->reg_interval - bin_info->redzone_size) -
(uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
/*
* If the run was originally clean, and some pages were never touched,
* trim the clean pages before deallocating the dirty portion of the
* run.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+npages-1));
if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
npages) {
/* Trim clean pages. Convert to large run beforehand. */
assert(npages > 0);
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */
}
arena_run_dalloc(arena, run, true, false);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
if (config_stats)
bin->stats.curruns--;
}
static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/*
* Make sure that if bin->runcur is non-NULL, it refers to the lowest
* non-full run. It is okay to NULL runcur out rather than proactively
* keeping it pointing at the lowest non-full run.
*/
if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
bin->runcur = run;
if (config_stats)
bin->stats.reruns++;
} else
arena_bin_runs_insert(bin, run);
}
void
arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
{
size_t pageind;
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
size_t size, binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind));
bin_info = &arena_bin_info[binind];
if (config_fill || config_stats)
size = bin_info->reg_size;
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) {
bin->stats.allocated -= size;
bin->stats.ndalloc++;
}
}
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm)
{
arena_run_t *run;
arena_bin_t *bin;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
malloc_mutex_unlock(&bin->lock);
}
void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
arena_chunk_map_t *mapelm;
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
static void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && opt_junk)
memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t usize = arena_mapbits_large_size_get(chunk, pageind);
arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= usize;
arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
}
}
arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
}
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
malloc_mutex_lock(&arena->lock);
arena_dalloc_large_locked(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size)
{
assert(size < oldsize);
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
true);
if (config_stats) {
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
malloc_mutex_unlock(&arena->lock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = oldsize >> LG_PAGE;
size_t followsize;
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
/* Try to extend the run. */
assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock);
if (pageind + npages < chunk_npages &&
arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
(followsize = arena_mapbits_unallocated_size_get(chunk,
pageind+npages)) >= size - oldsize) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
size_t flag_dirty;
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << LG_PAGE)), splitsize, zero);
size = oldsize + splitsize;
npages = size >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
* was dirty before allocation. This is rather pedantic,
* because there's not actually any sequence of events that
* could cause the resulting run to be passed to
* arena_run_dalloc() with the dirty argument set to false
* (which is when dirty flag consistency would really matter).
*/
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
arena_mapbits_dirty_get(chunk, pageind+npages-1);
arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
if (config_stats) {
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
malloc_mutex_unlock(&arena->lock);
return (false);
}
malloc_mutex_unlock(&arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && opt_junk) {
memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
static bool
arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
size_t psize;
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
return (false);
} else {
arena_chunk_t *chunk;
arena_t *arena;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
if (psize < oldsize) {
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, psize);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
} else {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
if (config_fill && ret == false && zero == false) {
if (opt_junk) {
memset((void *)((uintptr_t)ptr +
oldsize), 0xa5, isalloc(ptr,
config_prof) - oldsize);
} else if (opt_zero) {
memset((void *)((uintptr_t)ptr +
oldsize), 0, isalloc(ptr,
config_prof) - oldsize);
}
}
return (ret);
}
}
}
bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize <= arena_maxclass) {
if (oldsize <= SMALL_MAXCLASS) {
assert(small_size2bin(oldsize) < NBINS);
assert(arena_bin_info[small_size2bin(oldsize)].reg_size
== oldsize);
if ((size + extra <= SMALL_MAXCLASS &&
small_size2bin(size + extra) ==
small_size2bin(oldsize)) || (size <= oldsize &&
size + extra >= oldsize))
return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
return (false);
}
}
}
/* Reallocation would require a move. */
return (true);
}
void *
arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
return (ptr);
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
*/
if (alignment != 0) {
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
} else
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment != 0) {
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena);
} else
ret = arena_malloc(arena, size, zero, try_tcache_alloc);
if (ret == NULL)
return (NULL);
}
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (ret);
}
dss_prec_t
arena_dss_prec_get(arena_t *arena)
{
dss_prec_t ret;
malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec;
malloc_mutex_unlock(&arena->lock);
return (ret);
}
bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
if (have_dss == false)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
return (false);
}
void
arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
*nactive += arena->nactive;
*ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
astats->allocated_huge += arena->stats.allocated_huge;
astats->nmalloc_huge += arena->stats.nmalloc_huge;
astats->ndalloc_huge += arena->stats.ndalloc_huge;
astats->nrequests_huge += arena->stats.nrequests_huge;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
}
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
}
}
/*
* Called at each pool opening.
*/
bool
arena_boot(arena_t *arena)
{
unsigned i;
arena_bin_t *bin;
if (malloc_mutex_init(&arena->lock))
return (true);
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
}
arena->nthreads = 0;
return (false);
}
/*
* Called only at pool/arena creation.
*/
bool
arena_new(pool_t *pool, arena_t *arena, unsigned ind)
{
unsigned i;
arena_bin_t *bin;
arena->ind = ind;
arena->nthreads = 0;
arena->chunk_alloc = chunk_alloc_default;
arena->chunk_dalloc = chunk_dalloc_default;
arena->pool = pool;
if (malloc_mutex_init(&arena->lock))
return (true);
if (config_stats) {
memset(&arena->stats, 0, sizeof(arena_stats_t));
arena->stats.lstats =
(malloc_large_stats_t *)base_alloc(pool, nlclasses *
sizeof(malloc_large_stats_t));
if (arena->stats.lstats == NULL)
return (true);
memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
if (config_tcache)
ql_new(&arena->tcache_ql);
}
if (config_prof)
arena->prof_accumbytes = 0;
arena->dss_prec = chunk_dss_prec_get();
/* Initialize chunks. */
arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->nactive = 0;
arena->ndirty = 0;
arena->npurgatory = 0;
arena_avail_tree_new(&arena->runs_avail);
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
return (false);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
* *) bin_info->run_size >= min_run_size
* *) bin_info->run_size <= arena_maxclass
* *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
* *) bin_info->nregs <= RUN_MAXREGS
*
* bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
* calculated here, since these settings are all interdependent.
*/
static size_t
bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
{
size_t pad_size;
size_t try_run_size, good_run_size;
uint32_t try_nregs, good_nregs;
uint32_t try_hdr_size, good_hdr_size;
uint32_t try_bitmap_offset, good_bitmap_offset;
uint32_t try_redzone0_offset, good_redzone0_offset;
assert(min_run_size >= PAGE);
assert(min_run_size <= arena_maxclass);
/*
* Determine redzone size based on minimum alignment and minimum
* redzone size. Add padding to the end of the run if it is needed to
* align the regions. The padding allows each redzone to be half the
* minimum alignment; without the padding, each redzone would have to
* be twice as large in order to maintain alignment.
*/
if (config_fill && opt_redzone) {
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
} else {
bin_info->redzone_size = align_min >> 1;
pad_size = bin_info->redzone_size;
}
} else {
bin_info->redzone_size = 0;
pad_size = 0;
}
bin_info->reg_interval = bin_info->reg_size +
(bin_info->redzone_size << 1);
/*
* Calculate known-valid settings before entering the run_size
* expansion loop, so that the first part of the loop always copies
* valid settings.
*
* The do..while loop iteratively reduces the number of regions until
* the run header and the regions no longer overlap. A closed formula
* would be quite messy, since there is an interdependency between the
* header's mask length and the number of regions.
*/
try_run_size = min_run_size;
try_nregs = ((try_run_size - sizeof(arena_run_t)) /
bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
+ 1; /* Counter-act try_nregs-- in loop. */
}
do {
try_nregs--;
try_hdr_size = sizeof(arena_run_t);
/* Pad to a long boundary. */
try_hdr_size = LONG_CEILING(try_hdr_size);
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
try_redzone0_offset = try_run_size - (try_nregs *
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
/* run_size expansion loop. */
do {
/*
* Copy valid settings before trying more aggressive settings.
*/
good_run_size = try_run_size;
good_nregs = try_nregs;
good_hdr_size = try_hdr_size;
good_bitmap_offset = try_bitmap_offset;
good_redzone0_offset = try_redzone0_offset;
/* Try more aggressive settings. */
try_run_size += PAGE;
try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
+ 1; /* Counter-act try_nregs-- in loop. */
}
do {
try_nregs--;
try_hdr_size = sizeof(arena_run_t);
/* Pad to a long boundary. */
try_hdr_size = LONG_CEILING(try_hdr_size);
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
try_redzone0_offset = try_run_size - (try_nregs *
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
RUN_MAX_OVRHD_RELAX
&& (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
&& try_nregs < RUN_MAXREGS);
assert(good_hdr_size <= good_redzone0_offset);
/* Copy final settings. */
bin_info->run_size = good_run_size;
bin_info->nregs = good_nregs;
bin_info->bitmap_offset = good_bitmap_offset;
bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
return (good_run_size);
}
static void
bin_info_init(void)
{
arena_bin_info_t *bin_info;
size_t prev_run_size = PAGE;
#define BIN_INFO_INIT_bin_yes(index, size) \
bin_info = &arena_bin_info[index]; \
bin_info->reg_size = size; \
prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<(lg_grp)) + (ZU(ndelta)<<(lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes
#undef BIN_INFO_INIT_bin_no
#undef SC
}
void
arena_params_boot(void)
{
size_t header_size;
unsigned i;
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
* itself, so some iteration is necessary to compute the map bias.
*
* 1) Compute safe header_size and map_bias values that include enough
* space for an unbiased page map.
* 2) Refine map_bias based on (1) to omit the header pages in the page
* map. The resulting map_bias may be one too small.
* 3) Refine map_bias based on (2). The result will be >= the result
* from (2), and will always be correct.
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
header_size = offsetof(arena_chunk_t, map) +
(sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
!= 0);
}
assert(map_bias > 0);
arena_maxclass = chunksize - (map_bias << LG_PAGE);
bin_info_init();
}
void
arena_prefork(arena_t *arena)
{
unsigned i;
malloc_mutex_prefork(&arena->lock);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
}
void
arena_postfork_parent(arena_t *arena)
{
unsigned i;
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
malloc_mutex_postfork_parent(&arena->lock);
}
void
arena_postfork_child(arena_t *arena)
{
unsigned i;
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
malloc_mutex_postfork_child(&arena->lock);
}
| 76,957 | 27.545252 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/pool.c
|
#define JEMALLOC_POOL_C_
#include "jemalloc/internal/jemalloc_internal.h"
malloc_mutex_t pool_base_lock;
malloc_mutex_t pools_lock;
/*
* Initialize runtime state of the pool.
* Called both at pool creation and each pool opening.
*/
bool
pool_boot(pool_t *pool, unsigned pool_id)
{
pool->pool_id = pool_id;
if (malloc_mutex_init(&pool->memory_range_mtx))
return (true);
/*
* Rwlock initialization must be deferred if we are
* creating the base pool in the JEMALLOC_LAZY_LOCK case.
* This is safe because the lock won't be used until
* isthreaded has been set.
*/
if ((isthreaded || (pool != &base_pool))
&& malloc_rwlock_init(&pool->arenas_lock))
return (true);
return (false);
}
/*
* Initialize runtime state of the pool.
* Called at each pool opening.
*/
bool
pool_runtime_init(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_boot(pool))
return (true);
if (chunk_boot(pool))
return (true);
if (huge_boot(pool))
return (true);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(pool->arenas,
sizeof(arena_t) * pool->narenas_total);
for (size_t i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
if (arena_boot(arena))
return (true);
}
}
return (false);
}
/*
* Initialize pool and create its base arena.
* Called only at pool creation.
*/
bool
pool_new(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_init(pool))
return (true);
if (chunk_init(pool))
return (true);
if (huge_init(pool))
return (true);
if (pools_shared_data_create())
return (true);
pool->stats_cactive = 0;
pool->ctl_stats_active = 0;
pool->ctl_stats_allocated = 0;
pool->ctl_stats_mapped = 0;
pool->narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
if (pool->narenas_auto > chunksize / sizeof(arena_t *)) {
pool->narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
pool->narenas_auto);
}
pool->narenas_total = pool->narenas_auto;
/* Allocate and initialize arenas. */
pool->arenas = (arena_t **)base_calloc(pool, sizeof(arena_t *),
pool->narenas_total);
if (pool->arenas == NULL)
return (true);
arenas_extend(pool, 0);
return false;
}
/* Release the arenas associated with a pool. */
void
pool_destroy(pool_t *pool)
{
size_t i, j;
for (i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
//arena_purge_all(arena); /* XXX */
for (j = 0; j < NBINS; j++)
malloc_mutex_destroy(&arena->bins[j].lock);
malloc_mutex_destroy(&arena->lock);
}
}
/*
* Set 'pool_id' to an incorrect value so that the pool cannot be used
* after being deleted.
*/
pool->pool_id = UINT_MAX;
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
malloc_mutex_destroy(&rtree->mutex);
}
malloc_mutex_destroy(&pool->memory_range_mtx);
malloc_mutex_destroy(&pool->base_mtx);
malloc_mutex_destroy(&pool->base_node_mtx);
malloc_mutex_destroy(&pool->chunks_mtx);
malloc_mutex_destroy(&pool->huge_mtx);
malloc_rwlock_destroy(&pool->arenas_lock);
}
void pool_prefork()
{
malloc_mutex_prefork(&pools_lock);
malloc_mutex_prefork(&pool_base_lock);
}
void pool_postfork_parent()
{
malloc_mutex_postfork_parent(&pools_lock);
malloc_mutex_postfork_parent(&pool_base_lock);
}
void pool_postfork_child()
{
malloc_mutex_postfork_child(&pools_lock);
malloc_mutex_postfork_child(&pool_base_lock);
}
| 3,723 | 21.166667 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/prof.c
|
#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
#include <unwind.h>
#endif
/******************************************************************************/
/* Data. */
malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
bool opt_prof = false;
bool opt_prof_active = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_final = true;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
JE_PATH_MAX +
#endif
1];
uint64_t prof_interval = 0;
/*
* Table of mutexes that are shared among ctx's. These are leaf locks, so
* there is no problem with using them for more than one ctx at the same time.
* The primary motivation for this sharing though is that ctx's are ephemeral,
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
static malloc_mutex_t *ctx_locks;
static unsigned cum_ctxs; /* Atomic counter. */
/*
* Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
static ckh_t bt2ctx;
static malloc_mutex_t bt2ctx_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
static uint64_t prof_dump_iseq;
static uint64_t prof_dump_mseq;
static uint64_t prof_dump_useq;
/*
* This buffer is rather large for stack allocation, so use a single buffer for
* all profile dumps.
*/
static malloc_mutex_t prof_dump_mtx;
static char prof_dump_buf[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PROF_DUMP_BUFSIZE
#else
1
#endif
];
static size_t prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
/******************************************************************************/
void
bt_init(prof_bt_t *bt, void **vec)
{
cassert(config_prof);
bt->vec = vec;
bt->len = 0;
}
static void
bt_destroy(prof_bt_t *bt)
{
cassert(config_prof);
idalloc(bt);
}
static prof_bt_t *
bt_dup(prof_bt_t *bt)
{
prof_bt_t *ret;
cassert(config_prof);
/*
* Create a single allocation that has space for vec immediately
* following the prof_bt_t structure. The backtraces that get
* stored in the backtrace caches are copied from stack-allocated
* temporary variables, so size is known at creation time. Making this
* a contiguous object improves cache locality.
*/
ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
(bt->len * sizeof(void *)));
if (ret == NULL)
return (NULL);
ret->vec = (void **)((uintptr_t)ret +
QUANTUM_CEILING(sizeof(prof_bt_t)));
memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
ret->len = bt->len;
return (ret);
}
static inline void
prof_enter(prof_tdata_t *prof_tdata)
{
cassert(config_prof);
assert(prof_tdata->enq == false);
prof_tdata->enq = true;
malloc_mutex_lock(&bt2ctx_mtx);
}
static inline void
prof_leave(prof_tdata_t *prof_tdata)
{
bool idump, gdump;
cassert(config_prof);
malloc_mutex_unlock(&bt2ctx_mtx);
assert(prof_tdata->enq);
prof_tdata->enq = false;
idump = prof_tdata->enq_idump;
prof_tdata->enq_idump = false;
gdump = prof_tdata->enq_gdump;
prof_tdata->enq_gdump = false;
if (idump)
prof_idump();
if (gdump)
prof_gdump();
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
prof_backtrace(prof_bt_t *bt)
{
int nframes;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
if (nframes <= 0)
return;
bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
{
cassert(config_prof);
return (_URC_NO_REASON);
}
static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg)
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
void *ip;
cassert(config_prof);
ip = (void *)_Unwind_GetIP(context);
if (ip == NULL)
return (_URC_END_OF_STACK);
data->bt->vec[data->bt->len] = ip;
data->bt->len++;
if (data->bt->len == data->max)
return (_URC_END_OF_STACK);
return (_URC_NO_REASON);
}
void
prof_backtrace(prof_bt_t *bt)
{
prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
_Unwind_Backtrace(prof_unwind_callback, &data);
}
#elif (defined(JEMALLOC_PROF_GCC))
void
prof_backtrace(prof_bt_t *bt)
{
#define BT_FRAME(i) \
if ((i) < PROF_BT_MAX) { \
void *p; \
if (__builtin_frame_address(i) == 0) \
return; \
p = __builtin_return_address(i); \
if (p == NULL) \
return; \
bt->vec[(i)] = p; \
bt->len = (i) + 1; \
} else \
return;
cassert(config_prof);
BT_FRAME(0)
BT_FRAME(1)
BT_FRAME(2)
BT_FRAME(3)
BT_FRAME(4)
BT_FRAME(5)
BT_FRAME(6)
BT_FRAME(7)
BT_FRAME(8)
BT_FRAME(9)
BT_FRAME(10)
BT_FRAME(11)
BT_FRAME(12)
BT_FRAME(13)
BT_FRAME(14)
BT_FRAME(15)
BT_FRAME(16)
BT_FRAME(17)
BT_FRAME(18)
BT_FRAME(19)
BT_FRAME(20)
BT_FRAME(21)
BT_FRAME(22)
BT_FRAME(23)
BT_FRAME(24)
BT_FRAME(25)
BT_FRAME(26)
BT_FRAME(27)
BT_FRAME(28)
BT_FRAME(29)
BT_FRAME(30)
BT_FRAME(31)
BT_FRAME(32)
BT_FRAME(33)
BT_FRAME(34)
BT_FRAME(35)
BT_FRAME(36)
BT_FRAME(37)
BT_FRAME(38)
BT_FRAME(39)
BT_FRAME(40)
BT_FRAME(41)
BT_FRAME(42)
BT_FRAME(43)
BT_FRAME(44)
BT_FRAME(45)
BT_FRAME(46)
BT_FRAME(47)
BT_FRAME(48)
BT_FRAME(49)
BT_FRAME(50)
BT_FRAME(51)
BT_FRAME(52)
BT_FRAME(53)
BT_FRAME(54)
BT_FRAME(55)
BT_FRAME(56)
BT_FRAME(57)
BT_FRAME(58)
BT_FRAME(59)
BT_FRAME(60)
BT_FRAME(61)
BT_FRAME(62)
BT_FRAME(63)
BT_FRAME(64)
BT_FRAME(65)
BT_FRAME(66)
BT_FRAME(67)
BT_FRAME(68)
BT_FRAME(69)
BT_FRAME(70)
BT_FRAME(71)
BT_FRAME(72)
BT_FRAME(73)
BT_FRAME(74)
BT_FRAME(75)
BT_FRAME(76)
BT_FRAME(77)
BT_FRAME(78)
BT_FRAME(79)
BT_FRAME(80)
BT_FRAME(81)
BT_FRAME(82)
BT_FRAME(83)
BT_FRAME(84)
BT_FRAME(85)
BT_FRAME(86)
BT_FRAME(87)
BT_FRAME(88)
BT_FRAME(89)
BT_FRAME(90)
BT_FRAME(91)
BT_FRAME(92)
BT_FRAME(93)
BT_FRAME(94)
BT_FRAME(95)
BT_FRAME(96)
BT_FRAME(97)
BT_FRAME(98)
BT_FRAME(99)
BT_FRAME(100)
BT_FRAME(101)
BT_FRAME(102)
BT_FRAME(103)
BT_FRAME(104)
BT_FRAME(105)
BT_FRAME(106)
BT_FRAME(107)
BT_FRAME(108)
BT_FRAME(109)
BT_FRAME(110)
BT_FRAME(111)
BT_FRAME(112)
BT_FRAME(113)
BT_FRAME(114)
BT_FRAME(115)
BT_FRAME(116)
BT_FRAME(117)
BT_FRAME(118)
BT_FRAME(119)
BT_FRAME(120)
BT_FRAME(121)
BT_FRAME(122)
BT_FRAME(123)
BT_FRAME(124)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
#undef BT_FRAME
}
#else
void
prof_backtrace(prof_bt_t *bt)
{
cassert(config_prof);
not_reached();
}
#endif
static malloc_mutex_t *
prof_ctx_mutex_choose(void)
{
unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
}
static void
prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
{
ctx->bt = bt;
ctx->lock = prof_ctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
* prof_ctx_merge()/prof_ctx_destroy().
*/
ctx->nlimbo = 1;
ql_elm_new(ctx, dump_link);
memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
ql_new(&ctx->cnts_ql);
}
static void
prof_ctx_destroy(prof_ctx_t *ctx)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() increments ctx->nlimbo in order to avoid a race
* condition with this function, as does prof_ctx_merge() in order to
* avoid a race between the main body of prof_ctx_merge() and entry
* into this function.
*/
prof_tdata = prof_tdata_get(false);
assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
prof_enter(prof_tdata);
malloc_mutex_lock(ctx->lock);
if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
ctx->nlimbo == 1) {
assert(ctx->cnt_merged.curbytes == 0);
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
not_reached();
prof_leave(prof_tdata);
/* Destroy ctx. */
malloc_mutex_unlock(ctx->lock);
bt_destroy(ctx->bt);
idalloc(ctx);
} else {
/*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx->nlimbo--;
malloc_mutex_unlock(ctx->lock);
prof_leave(prof_tdata);
}
}
static void
prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
{
bool destroy;
cassert(config_prof);
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
ql_remove(&ctx->cnts_ql, cnt, cnts_link);
if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
/*
* Increment ctx->nlimbo in order to keep another thread from
* winning the race to destroy ctx while this one has ctx->lock
* dropped. Without this, it would be possible for another
* thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx->nlimbo++;
destroy = true;
} else
destroy = false;
malloc_mutex_unlock(ctx->lock);
if (destroy)
prof_ctx_destroy(ctx);
}
static bool
prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
prof_ctx_t **p_ctx, bool *p_new_ctx)
{
union {
prof_ctx_t *p;
void *v;
} ctx;
union {
prof_bt_t *p;
void *v;
} btkey;
bool new_ctx;
prof_enter(prof_tdata);
if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
/* bt has never been seen before. Insert it. */
ctx.v = imalloc(sizeof(prof_ctx_t));
if (ctx.v == NULL) {
prof_leave(prof_tdata);
return (true);
}
btkey.p = bt_dup(bt);
if (btkey.v == NULL) {
prof_leave(prof_tdata);
idalloc(ctx.v);
return (true);
}
prof_ctx_init(ctx.p, btkey.p);
if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
/* OOM. */
prof_leave(prof_tdata);
idalloc(btkey.v);
idalloc(ctx.v);
return (true);
}
new_ctx = true;
} else {
/*
* Increment nlimbo, in order to avoid a race condition with
* prof_ctx_merge()/prof_ctx_destroy().
*/
malloc_mutex_lock(ctx.p->lock);
ctx.p->nlimbo++;
malloc_mutex_unlock(ctx.p->lock);
new_ctx = false;
}
prof_leave(prof_tdata);
*p_btkey = btkey.v;
*p_ctx = ctx.p;
*p_new_ctx = new_ctx;
return (false);
}
prof_thr_cnt_t *
prof_lookup(prof_bt_t *bt)
{
union {
prof_thr_cnt_t *p;
void *v;
} ret;
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (NULL);
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
void *btkey;
prof_ctx_t *ctx;
bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
return (NULL);
/* Link a prof_thd_cnt_t into ctx for this thread. */
if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0);
/*
* Flush the least recently used cnt in order to keep
* bt2cnt from becoming too large.
*/
ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
assert(ret.v != NULL);
if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
NULL, NULL))
not_reached();
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
} else {
assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
if (new_ctx)
prof_ctx_destroy(ctx);
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
ql_elm_new(ret.p, lru_link);
}
/* Finish initializing ret. */
ret.p->ctx = ctx;
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
if (new_ctx)
prof_ctx_destroy(ctx);
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
malloc_mutex_lock(ctx->lock);
ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
ctx->nlimbo--;
malloc_mutex_unlock(ctx->lock);
} else {
/* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
}
return (ret.p);
}
void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
if (!config_prof)
return;
if (prof_tdata == NULL)
prof_tdata = prof_tdata_get(false);
if (opt_lg_prof_sample == 0) {
prof_tdata->bytes_until_sample = 0;
return;
}
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
#endif
}
#ifdef JEMALLOC_JET
size_t
prof_bt_count(void)
{
size_t bt_count;
prof_tdata_t *prof_tdata;
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (0);
prof_enter(prof_tdata);
bt_count = ckh_count(&bt2ctx);
prof_leave(prof_tdata);
return (bt_count);
}
#endif
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
#endif
static int
prof_dump_open(bool propagate_err, const char *filename)
{
int fd;
fd = creat(filename, 0644);
if (fd == -1 && propagate_err == false) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
if (opt_abort)
abort();
}
return (fd);
}
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open)
prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
#endif
static bool
prof_dump_flush(bool propagate_err)
{
bool ret = false;
ssize_t err;
cassert(config_prof);
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
if (propagate_err == false) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
if (opt_abort)
abort();
}
ret = true;
}
prof_dump_buf_end = 0;
return (ret);
}
static bool
prof_dump_close(bool propagate_err)
{
bool ret;
assert(prof_dump_fd != -1);
ret = prof_dump_flush(propagate_err);
close(prof_dump_fd);
prof_dump_fd = -1;
return (ret);
}
static bool
prof_dump_write(bool propagate_err, const char *s)
{
unsigned i, slen, n;
cassert(config_prof);
i = 0;
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
if (prof_dump_buf_end == sizeof(prof_dump_buf))
if (prof_dump_flush(propagate_err) && propagate_err)
return (true);
if (prof_dump_buf_end + slen <= sizeof(prof_dump_buf)) {
/* Finish writing. */
n = slen - i;
} else {
/* Write as much of s as will fit. */
n = sizeof(prof_dump_buf) - prof_dump_buf_end;
}
memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
prof_dump_buf_end += n;
i += n;
}
return (false);
}
JEMALLOC_ATTR(format(printf, 2, 3))
static bool
prof_dump_printf(bool propagate_err, const char *format, ...)
{
bool ret;
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
ret = prof_dump_write(propagate_err, buf);
return (ret);
}
static void
prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
prof_ctx_list_t *ctx_ql)
{
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
cassert(config_prof);
malloc_mutex_lock(ctx->lock);
/*
* Increment nlimbo so that ctx won't go away before dump.
* Additionally, link ctx into the dump list so that it is included in
* prof_dump()'s second pass.
*/
ctx->nlimbo++;
ql_tail_insert(ctx_ql, ctx, dump_link);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
volatile unsigned *epoch = &thr_cnt->epoch;
while (true) {
unsigned epoch0 = *epoch;
/* Make sure epoch is even. */
if (epoch0 & 1U)
continue;
memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
/* Terminate if epoch didn't change while reading. */
if (*epoch == epoch0)
break;
}
ctx->cnt_summed.curobjs += tcnt.curobjs;
ctx->cnt_summed.curbytes += tcnt.curbytes;
if (opt_prof_accum) {
ctx->cnt_summed.accumobjs += tcnt.accumobjs;
ctx->cnt_summed.accumbytes += tcnt.accumbytes;
}
}
if (ctx->cnt_summed.curobjs != 0)
(*leak_nctx)++;
/* Add to cnt_all. */
cnt_all->curobjs += ctx->cnt_summed.curobjs;
cnt_all->curbytes += ctx->cnt_summed.curbytes;
if (opt_prof_accum) {
cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
}
malloc_mutex_unlock(ctx->lock);
}
static bool
prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
if (opt_lg_prof_sample == 0) {
if (prof_dump_printf(propagate_err,
"heap profile: %"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @ heapprofile\n",
cnt_all->curobjs, cnt_all->curbytes,
cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
} else {
if (prof_dump_printf(propagate_err,
"heap profile: %"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
cnt_all->curobjs, cnt_all->curbytes,
cnt_all->accumobjs, cnt_all->accumbytes,
((uint64_t)1U << opt_lg_prof_sample)))
return (true);
}
return (false);
}
static void
prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{
ctx->nlimbo--;
ql_remove(ctx_ql, ctx, dump_link);
}
static void
prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{
malloc_mutex_lock(ctx->lock);
prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
malloc_mutex_unlock(ctx->lock);
}
static bool
prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
prof_ctx_list_t *ctx_ql)
{
bool ret;
unsigned i;
cassert(config_prof);
/*
* Current statistics can sum to 0 as a result of unmerged per thread
* statistics. Additionally, interval- and growth-triggered dumps can
* occur between the time a ctx is created and when its statistics are
* filled in. Avoid dumping any ctx that is an artifact of either
* implementation detail.
*/
malloc_mutex_lock(ctx->lock);
if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
assert(ctx->cnt_summed.curobjs == 0);
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0);
ret = false;
goto label_return;
}
if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @",
ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
ret = true;
goto label_return;
}
for (i = 0; i < bt->len; i++) {
if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
}
}
if (prof_dump_write(propagate_err, "\n")) {
ret = true;
goto label_return;
}
ret = false;
label_return:
prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
malloc_mutex_unlock(ctx->lock);
return (ret);
}
static int
prof_getpid(void)
{
#ifdef _WIN32
return (GetCurrentProcessId());
#else
return (getpid());
#endif
}
static bool
prof_dump_maps(bool propagate_err)
{
bool ret;
int mfd;
char filename[JE_PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
#else
malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
(int)prof_getpid());
#endif
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
propagate_err) {
ret = true;
goto label_return;
}
nread = 0;
do {
prof_dump_buf_end += nread;
if (prof_dump_buf_end == sizeof(prof_dump_buf)) {
/* Make space in prof_dump_buf before read(). */
if (prof_dump_flush(propagate_err) &&
propagate_err) {
ret = true;
goto label_return;
}
}
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
sizeof(prof_dump_buf) - prof_dump_buf_end);
} while (nread > 0);
} else {
ret = true;
goto label_return;
}
ret = false;
label_return:
if (mfd != -1)
close(mfd);
return (ret);
}
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
const char *filename)
{
if (cnt_all->curbytes != 0) {
malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
PRId64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
leak_nctx, (leak_nctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run pprof on \"%s\" for leak detail\n",
filename);
}
}
static bool
prof_dump(bool propagate_err, const char *filename, bool leakcheck)
{
prof_tdata_t *prof_tdata;
prof_cnt_t cnt_all;
size_t tabind;
union {
prof_ctx_t *p;
void *v;
} ctx;
size_t leak_nctx;
prof_ctx_list_t ctx_ql;
cassert(config_prof);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
malloc_mutex_lock(&prof_dump_mtx);
/* Merge per thread profile stats, and sum them in cnt_all. */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
leak_nctx = 0;
ql_new(&ctx_ql);
prof_enter(prof_tdata);
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
prof_leave(prof_tdata);
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
goto label_open_close_error;
/* Dump profile header. */
if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
/* Dump per ctx profile stats. */
while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
goto label_write_error;
}
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
goto label_write_error;
if (prof_dump_close(propagate_err))
goto label_open_close_error;
malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
prof_leakcheck(&cnt_all, leak_nctx, filename);
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
while ((ctx.p = ql_first(&ctx_ql)) != NULL)
prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
#define DUMP_FILENAME_BUFSIZE (JE_PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
prof_dump_filename(char *filename, char v, uint64_t vseq)
{
cassert(config_prof);
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"PRIu64".%c%"PRIu64".heap",
opt_prof_prefix, (int)prof_getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"PRIu64".%c.heap",
opt_prof_prefix, (int)prof_getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
static void
prof_fdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (prof_booted == false)
return;
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, opt_prof_leak);
}
}
void
prof_idump(void)
{
prof_tdata_t *prof_tdata;
char filename[JE_PATH_MAX + 1];
cassert(config_prof);
if (prof_booted == false)
return;
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
prof_tdata->enq_idump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, false);
}
}
bool
prof_mdump(const char *filename)
{
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (opt_prof == false || prof_booted == false)
return (true);
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(true, filename, false));
}
void
prof_gdump(void)
{
prof_tdata_t *prof_tdata;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (prof_booted == false)
return;
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
prof_tdata->enq_gdump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, false);
}
}
static void
prof_bt_hash(const void *key, size_t r_hash[2])
{
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
}
static bool
prof_bt_keycomp(const void *k1, const void *k2)
{
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
if (bt1->len != bt2->len)
return (false);
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
prof_tdata_t *
prof_tdata_init(void)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
if (prof_tdata == NULL)
return (NULL);
if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
idalloc(prof_tdata);
return (NULL);
}
ql_new(&prof_tdata->lru_ql);
prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
if (prof_tdata->vec == NULL) {
ckh_delete(&prof_tdata->bt2cnt);
idalloc(prof_tdata);
return (NULL);
}
prof_tdata->prng_state = (uint64_t)(uintptr_t)prof_tdata;
prof_sample_threshold_update(prof_tdata);
prof_tdata->enq = false;
prof_tdata->enq_idump = false;
prof_tdata->enq_gdump = false;
prof_tdata_tsd_set(&prof_tdata);
return (prof_tdata);
}
void
prof_tdata_cleanup(void *arg)
{
prof_thr_cnt_t *cnt;
prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
cassert(config_prof);
if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
/*
* Another destructor deallocated memory after this destructor
* was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
* in order to receive another callback.
*/
prof_tdata = PROF_TDATA_STATE_PURGATORY;
prof_tdata_tsd_set(&prof_tdata);
} else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to PROF_TDATA_STATE_PURGATORY so that other destructors
* wouldn't cause re-creation of the prof_tdata. This time, do
* nothing, so that the destructor will not be called again.
*/
} else if (prof_tdata != NULL) {
/*
* Delete the hash table. All of its contents can still be
* iterated over via the LRU.
*/
ckh_delete(&prof_tdata->bt2cnt);
/*
* Iteratively merge cnt's into the global stats and delete
* them.
*/
while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
prof_ctx_merge(cnt->ctx, cnt);
idalloc(cnt);
}
idalloc(prof_tdata->vec);
idalloc(prof_tdata);
prof_tdata = PROF_TDATA_STATE_PURGATORY;
prof_tdata_tsd_set(&prof_tdata);
}
}
void
prof_boot0(void)
{
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
void
prof_boot1(void)
{
cassert(config_prof);
/*
* opt_prof must be in its final state before any arenas are
* initialized, so this function must be executed early.
*/
if (opt_prof_leak && opt_prof == false) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
*/
opt_prof = true;
opt_prof_gdump = false;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
}
}
}
bool
prof_boot2(void)
{
cassert(config_prof);
if (opt_prof) {
unsigned i;
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
if (malloc_mutex_init(&bt2ctx_mtx))
return (true);
if (prof_tdata_tsd_boot()) {
malloc_write(
"<jemalloc>: Error in pthread_key_create()\n");
abort();
}
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
ctx_locks = (malloc_mutex_t *)base_malloc_fn(PROF_NCTX_LOCKS *
sizeof(malloc_mutex_t));
if (ctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&ctx_locks[i]))
return (true);
}
}
#ifdef JEMALLOC_PROF_LIBGCC
/*
* Cause the backtracing machinery to allocate its internal state
* before enabling profiling.
*/
_Unwind_Backtrace(prof_unwind_init_callback, NULL);
#endif
prof_booted = true;
return (false);
}
void
prof_prefork(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_prefork(&bt2ctx_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(&ctx_locks[i]);
}
}
void
prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(&ctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&bt2ctx_mtx);
}
}
void
prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(&ctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&bt2ctx_mtx);
}
}
/******************************************************************************/
| 32,664 | 21.419355 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/SFMT.c
|
/*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test/jemalloc_test.h"
#define BLOCK_SIZE 10000
#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
#define COUNT_1 1000
#define COUNT_2 700
static const uint32_t init_gen_rand_32_expected[] = {
3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
};
static const uint32_t init_by_array_32_expected[] = {
2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
};
static const uint64_t init_gen_rand_64_expected[] = {
KQU(16924766246869039260), KQU( 8201438687333352714),
KQU( 2265290287015001750), KQU(18397264611805473832),
KQU( 3375255223302384358), KQU( 6345559975416828796),
KQU(18229739242790328073), KQU( 7596792742098800905),
KQU( 255338647169685981), KQU( 2052747240048610300),
KQU(18328151576097299343), KQU(12472905421133796567),
KQU(11315245349717600863), KQU(16594110197775871209),
KQU(15708751964632456450), KQU(10452031272054632535),
KQU(11097646720811454386), KQU( 4556090668445745441),
KQU(17116187693090663106), KQU(14931526836144510645),
KQU( 9190752218020552591), KQU( 9625800285771901401),
KQU(13995141077659972832), KQU( 5194209094927829625),
KQU( 4156788379151063303), KQU( 8523452593770139494),
KQU(14082382103049296727), KQU( 2462601863986088483),
KQU( 3030583461592840678), KQU( 5221622077872827681),
KQU( 3084210671228981236), KQU(13956758381389953823),
KQU(13503889856213423831), KQU(15696904024189836170),
KQU( 4612584152877036206), KQU( 6231135538447867881),
KQU(10172457294158869468), KQU( 6452258628466708150),
KQU(14044432824917330221), KQU( 370168364480044279),
KQU(10102144686427193359), KQU( 667870489994776076),
KQU( 2732271956925885858), KQU(18027788905977284151),
KQU(15009842788582923859), KQU( 7136357960180199542),
KQU(15901736243475578127), KQU(16951293785352615701),
KQU(10551492125243691632), KQU(17668869969146434804),
KQU(13646002971174390445), KQU( 9804471050759613248),
KQU( 5511670439655935493), KQU(18103342091070400926),
KQU(17224512747665137533), KQU(15534627482992618168),
KQU( 1423813266186582647), KQU(15821176807932930024),
KQU( 30323369733607156), KQU(11599382494723479403),
KQU( 653856076586810062), KQU( 3176437395144899659),
KQU(14028076268147963917), KQU(16156398271809666195),
KQU( 3166955484848201676), KQU( 5746805620136919390),
KQU(17297845208891256593), KQU(11691653183226428483),
KQU(17900026146506981577), KQU(15387382115755971042),
KQU(16923567681040845943), KQU( 8039057517199388606),
KQU(11748409241468629263), KQU( 794358245539076095),
KQU(13438501964693401242), KQU(14036803236515618962),
KQU( 5252311215205424721), KQU(17806589612915509081),
KQU( 6802767092397596006), KQU(14212120431184557140),
KQU( 1072951366761385712), KQU(13098491780722836296),
KQU( 9466676828710797353), KQU(12673056849042830081),
KQU(12763726623645357580), KQU(16468961652999309493),
KQU(15305979875636438926), KQU(17444713151223449734),
KQU( 5692214267627883674), KQU(13049589139196151505),
KQU( 880115207831670745), KQU( 1776529075789695498),
KQU(16695225897801466485), KQU(10666901778795346845),
KQU( 6164389346722833869), KQU( 2863817793264300475),
KQU( 9464049921886304754), KQU( 3993566636740015468),
KQU( 9983749692528514136), KQU(16375286075057755211),
KQU(16042643417005440820), KQU(11445419662923489877),
KQU( 7999038846885158836), KQU( 6721913661721511535),
KQU( 5363052654139357320), KQU( 1817788761173584205),
KQU(13290974386445856444), KQU( 4650350818937984680),
KQU( 8219183528102484836), KQU( 1569862923500819899),
KQU( 4189359732136641860), KQU(14202822961683148583),
KQU( 4457498315309429058), KQU(13089067387019074834),
KQU(11075517153328927293), KQU(10277016248336668389),
KQU( 7070509725324401122), KQU(17808892017780289380),
KQU(13143367339909287349), KQU( 1377743745360085151),
KQU( 5749341807421286485), KQU(14832814616770931325),
KQU( 7688820635324359492), KQU(10960474011539770045),
KQU( 81970066653179790), KQU(12619476072607878022),
KQU( 4419566616271201744), KQU(15147917311750568503),
KQU( 5549739182852706345), KQU( 7308198397975204770),
KQU(13580425496671289278), KQU(17070764785210130301),
KQU( 8202832846285604405), KQU( 6873046287640887249),
KQU( 6927424434308206114), KQU( 6139014645937224874),
KQU(10290373645978487639), KQU(15904261291701523804),
KQU( 9628743442057826883), KQU(18383429096255546714),
KQU( 4977413265753686967), KQU( 7714317492425012869),
KQU( 9025232586309926193), KQU(14627338359776709107),
KQU(14759849896467790763), KQU(10931129435864423252),
KQU( 4588456988775014359), KQU(10699388531797056724),
KQU( 468652268869238792), KQU( 5755943035328078086),
KQU( 2102437379988580216), KQU( 9986312786506674028),
KQU( 2654207180040945604), KQU( 8726634790559960062),
KQU( 100497234871808137), KQU( 2800137176951425819),
KQU( 6076627612918553487), KQU( 5780186919186152796),
KQU( 8179183595769929098), KQU( 6009426283716221169),
KQU( 2796662551397449358), KQU( 1756961367041986764),
KQU( 6972897917355606205), KQU(14524774345368968243),
KQU( 2773529684745706940), KQU( 4853632376213075959),
KQU( 4198177923731358102), KQU( 8271224913084139776),
KQU( 2741753121611092226), KQU(16782366145996731181),
KQU(15426125238972640790), KQU(13595497100671260342),
KQU( 3173531022836259898), KQU( 6573264560319511662),
KQU(18041111951511157441), KQU( 2351433581833135952),
KQU( 3113255578908173487), KQU( 1739371330877858784),
KQU(16046126562789165480), KQU( 8072101652214192925),
KQU(15267091584090664910), KQU( 9309579200403648940),
KQU( 5218892439752408722), KQU(14492477246004337115),
KQU(17431037586679770619), KQU( 7385248135963250480),
KQU( 9580144956565560660), KQU( 4919546228040008720),
KQU(15261542469145035584), KQU(18233297270822253102),
KQU( 5453248417992302857), KQU( 9309519155931460285),
KQU(10342813012345291756), KQU(15676085186784762381),
KQU(15912092950691300645), KQU( 9371053121499003195),
KQU( 9897186478226866746), KQU(14061858287188196327),
KQU( 122575971620788119), KQU(12146750969116317754),
KQU( 4438317272813245201), KQU( 8332576791009527119),
KQU(13907785691786542057), KQU(10374194887283287467),
KQU( 2098798755649059566), KQU( 3416235197748288894),
KQU( 8688269957320773484), KQU( 7503964602397371571),
KQU(16724977015147478236), KQU( 9461512855439858184),
KQU(13259049744534534727), KQU( 3583094952542899294),
KQU( 8764245731305528292), KQU(13240823595462088985),
KQU(13716141617617910448), KQU(18114969519935960955),
KQU( 2297553615798302206), KQU( 4585521442944663362),
KQU(17776858680630198686), KQU( 4685873229192163363),
KQU( 152558080671135627), KQU(15424900540842670088),
KQU(13229630297130024108), KQU(17530268788245718717),
KQU(16675633913065714144), KQU( 3158912717897568068),
KQU(15399132185380087288), KQU( 7401418744515677872),
KQU(13135412922344398535), KQU( 6385314346100509511),
KQU(13962867001134161139), KQU(10272780155442671999),
KQU(12894856086597769142), KQU(13340877795287554994),
KQU(12913630602094607396), KQU(12543167911119793857),
KQU(17343570372251873096), KQU(10959487764494150545),
KQU( 6966737953093821128), KQU(13780699135496988601),
KQU( 4405070719380142046), KQU(14923788365607284982),
KQU( 2869487678905148380), KQU( 6416272754197188403),
KQU(15017380475943612591), KQU( 1995636220918429487),
KQU( 3402016804620122716), KQU(15800188663407057080),
KQU(11362369990390932882), KQU(15262183501637986147),
KQU(10239175385387371494), KQU( 9352042420365748334),
KQU( 1682457034285119875), KQU( 1724710651376289644),
KQU( 2038157098893817966), KQU( 9897825558324608773),
KQU( 1477666236519164736), KQU(16835397314511233640),
KQU(10370866327005346508), KQU(10157504370660621982),
KQU(12113904045335882069), KQU(13326444439742783008),
KQU(11302769043000765804), KQU(13594979923955228484),
KQU(11779351762613475968), KQU( 3786101619539298383),
KQU( 8021122969180846063), KQU(15745904401162500495),
KQU(10762168465993897267), KQU(13552058957896319026),
KQU(11200228655252462013), KQU( 5035370357337441226),
KQU( 7593918984545500013), KQU( 5418554918361528700),
KQU( 4858270799405446371), KQU( 9974659566876282544),
KQU(18227595922273957859), KQU( 2772778443635656220),
KQU(14285143053182085385), KQU( 9939700992429600469),
KQU(12756185904545598068), KQU( 2020783375367345262),
KQU( 57026775058331227), KQU( 950827867930065454),
KQU( 6602279670145371217), KQU( 2291171535443566929),
KQU( 5832380724425010313), KQU( 1220343904715982285),
KQU(17045542598598037633), KQU(15460481779702820971),
KQU(13948388779949365130), KQU(13975040175430829518),
KQU(17477538238425541763), KQU(11104663041851745725),
KQU(15860992957141157587), KQU(14529434633012950138),
KQU( 2504838019075394203), KQU( 7512113882611121886),
KQU( 4859973559980886617), KQU( 1258601555703250219),
KQU(15594548157514316394), KQU( 4516730171963773048),
KQU(11380103193905031983), KQU( 6809282239982353344),
KQU(18045256930420065002), KQU( 2453702683108791859),
KQU( 977214582986981460), KQU( 2006410402232713466),
KQU( 6192236267216378358), KQU( 3429468402195675253),
KQU(18146933153017348921), KQU(17369978576367231139),
KQU( 1246940717230386603), KQU(11335758870083327110),
KQU(14166488801730353682), KQU( 9008573127269635732),
KQU(10776025389820643815), KQU(15087605441903942962),
KQU( 1359542462712147922), KQU(13898874411226454206),
KQU(17911176066536804411), KQU( 9435590428600085274),
KQU( 294488509967864007), KQU( 8890111397567922046),
KQU( 7987823476034328778), KQU(13263827582440967651),
KQU( 7503774813106751573), KQU(14974747296185646837),
KQU( 8504765037032103375), KQU(17340303357444536213),
KQU( 7704610912964485743), KQU( 8107533670327205061),
KQU( 9062969835083315985), KQU(16968963142126734184),
KQU(12958041214190810180), KQU( 2720170147759570200),
KQU( 2986358963942189566), KQU(14884226322219356580),
KQU( 286224325144368520), KQU(11313800433154279797),
KQU(18366849528439673248), KQU(17899725929482368789),
KQU( 3730004284609106799), KQU( 1654474302052767205),
KQU( 5006698007047077032), KQU( 8196893913601182838),
KQU(15214541774425211640), KQU(17391346045606626073),
KQU( 8369003584076969089), KQU( 3939046733368550293),
KQU(10178639720308707785), KQU( 2180248669304388697),
KQU( 62894391300126322), KQU( 9205708961736223191),
KQU( 6837431058165360438), KQU( 3150743890848308214),
KQU(17849330658111464583), KQU(12214815643135450865),
KQU(13410713840519603402), KQU( 3200778126692046802),
KQU(13354780043041779313), KQU( 800850022756886036),
KQU(15660052933953067433), KQU( 6572823544154375676),
KQU(11030281857015819266), KQU(12682241941471433835),
KQU(11654136407300274693), KQU( 4517795492388641109),
KQU( 9757017371504524244), KQU(17833043400781889277),
KQU(12685085201747792227), KQU(10408057728835019573),
KQU( 98370418513455221), KQU( 6732663555696848598),
KQU(13248530959948529780), KQU( 3530441401230622826),
KQU(18188251992895660615), KQU( 1847918354186383756),
KQU( 1127392190402660921), KQU(11293734643143819463),
KQU( 3015506344578682982), KQU(13852645444071153329),
KQU( 2121359659091349142), KQU( 1294604376116677694),
KQU( 5616576231286352318), KQU( 7112502442954235625),
KQU(11676228199551561689), KQU(12925182803007305359),
KQU( 7852375518160493082), KQU( 1136513130539296154),
KQU( 5636923900916593195), KQU( 3221077517612607747),
KQU(17784790465798152513), KQU( 3554210049056995938),
KQU(17476839685878225874), KQU( 3206836372585575732),
KQU( 2765333945644823430), KQU(10080070903718799528),
KQU( 5412370818878286353), KQU( 9689685887726257728),
KQU( 8236117509123533998), KQU( 1951139137165040214),
KQU( 4492205209227980349), KQU(16541291230861602967),
KQU( 1424371548301437940), KQU( 9117562079669206794),
KQU(14374681563251691625), KQU(13873164030199921303),
KQU( 6680317946770936731), KQU(15586334026918276214),
KQU(10896213950976109802), KQU( 9506261949596413689),
KQU( 9903949574308040616), KQU( 6038397344557204470),
KQU( 174601465422373648), KQU(15946141191338238030),
KQU(17142225620992044937), KQU( 7552030283784477064),
KQU( 2947372384532947997), KQU( 510797021688197711),
KQU( 4962499439249363461), KQU( 23770320158385357),
KQU( 959774499105138124), KQU( 1468396011518788276),
KQU( 2015698006852312308), KQU( 4149400718489980136),
KQU( 5992916099522371188), KQU(10819182935265531076),
KQU(16189787999192351131), KQU( 342833961790261950),
KQU(12470830319550495336), KQU(18128495041912812501),
KQU( 1193600899723524337), KQU( 9056793666590079770),
KQU( 2154021227041669041), KQU( 4963570213951235735),
KQU( 4865075960209211409), KQU( 2097724599039942963),
KQU( 2024080278583179845), KQU(11527054549196576736),
KQU(10650256084182390252), KQU( 4808408648695766755),
KQU( 1642839215013788844), KQU(10607187948250398390),
KQU( 7076868166085913508), KQU( 730522571106887032),
KQU(12500579240208524895), KQU( 4484390097311355324),
KQU(15145801330700623870), KQU( 8055827661392944028),
KQU( 5865092976832712268), KQU(15159212508053625143),
KQU( 3560964582876483341), KQU( 4070052741344438280),
KQU( 6032585709886855634), KQU(15643262320904604873),
KQU( 2565119772293371111), KQU( 318314293065348260),
KQU(15047458749141511872), KQU( 7772788389811528730),
KQU( 7081187494343801976), KQU( 6465136009467253947),
KQU(10425940692543362069), KQU( 554608190318339115),
KQU(14796699860302125214), KQU( 1638153134431111443),
KQU(10336967447052276248), KQU( 8412308070396592958),
KQU( 4004557277152051226), KQU( 8143598997278774834),
KQU(16413323996508783221), KQU(13139418758033994949),
KQU( 9772709138335006667), KQU( 2818167159287157659),
KQU(17091740573832523669), KQU(14629199013130751608),
KQU(18268322711500338185), KQU( 8290963415675493063),
KQU( 8830864907452542588), KQU( 1614839084637494849),
KQU(14855358500870422231), KQU( 3472996748392519937),
KQU(15317151166268877716), KQU( 5825895018698400362),
KQU(16730208429367544129), KQU(10481156578141202800),
KQU( 4746166512382823750), KQU(12720876014472464998),
KQU( 8825177124486735972), KQU(13733447296837467838),
KQU( 6412293741681359625), KQU( 8313213138756135033),
KQU(11421481194803712517), KQU( 7997007691544174032),
KQU( 6812963847917605930), KQU( 9683091901227558641),
KQU(14703594165860324713), KQU( 1775476144519618309),
KQU( 2724283288516469519), KQU( 717642555185856868),
KQU( 8736402192215092346), KQU(11878800336431381021),
KQU( 4348816066017061293), KQU( 6115112756583631307),
KQU( 9176597239667142976), KQU(12615622714894259204),
KQU(10283406711301385987), KQU( 5111762509485379420),
KQU( 3118290051198688449), KQU( 7345123071632232145),
KQU( 9176423451688682359), KQU( 4843865456157868971),
KQU(12008036363752566088), KQU(12058837181919397720),
KQU( 2145073958457347366), KQU( 1526504881672818067),
KQU( 3488830105567134848), KQU(13208362960674805143),
KQU( 4077549672899572192), KQU( 7770995684693818365),
KQU( 1398532341546313593), KQU(12711859908703927840),
KQU( 1417561172594446813), KQU(17045191024194170604),
KQU( 4101933177604931713), KQU(14708428834203480320),
KQU(17447509264469407724), KQU(14314821973983434255),
KQU(17990472271061617265), KQU( 5087756685841673942),
KQU(12797820586893859939), KQU( 1778128952671092879),
KQU( 3535918530508665898), KQU( 9035729701042481301),
KQU(14808661568277079962), KQU(14587345077537747914),
KQU(11920080002323122708), KQU( 6426515805197278753),
KQU( 3295612216725984831), KQU(11040722532100876120),
KQU(12305952936387598754), KQU(16097391899742004253),
KQU( 4908537335606182208), KQU(12446674552196795504),
KQU(16010497855816895177), KQU( 9194378874788615551),
KQU( 3382957529567613384), KQU( 5154647600754974077),
KQU( 9801822865328396141), KQU( 9023662173919288143),
KQU(17623115353825147868), KQU( 8238115767443015816),
KQU(15811444159859002560), KQU( 9085612528904059661),
KQU( 6888601089398614254), KQU( 258252992894160189),
KQU( 6704363880792428622), KQU( 6114966032147235763),
KQU(11075393882690261875), KQU( 8797664238933620407),
KQU( 5901892006476726920), KQU( 5309780159285518958),
KQU(14940808387240817367), KQU(14642032021449656698),
KQU( 9808256672068504139), KQU( 3670135111380607658),
KQU(11211211097845960152), KQU( 1474304506716695808),
KQU(15843166204506876239), KQU( 7661051252471780561),
KQU(10170905502249418476), KQU( 7801416045582028589),
KQU( 2763981484737053050), KQU( 9491377905499253054),
KQU(16201395896336915095), KQU( 9256513756442782198),
KQU( 5411283157972456034), KQU( 5059433122288321676),
KQU( 4327408006721123357), KQU( 9278544078834433377),
KQU( 7601527110882281612), KQU(11848295896975505251),
KQU(12096998801094735560), KQU(14773480339823506413),
KQU(15586227433895802149), KQU(12786541257830242872),
KQU( 6904692985140503067), KQU( 5309011515263103959),
KQU(12105257191179371066), KQU(14654380212442225037),
KQU( 2556774974190695009), KQU( 4461297399927600261),
KQU(14888225660915118646), KQU(14915459341148291824),
KQU( 2738802166252327631), KQU( 6047155789239131512),
KQU(12920545353217010338), KQU(10697617257007840205),
KQU( 2751585253158203504), KQU(13252729159780047496),
KQU(14700326134672815469), KQU(14082527904374600529),
KQU(16852962273496542070), KQU(17446675504235853907),
KQU(15019600398527572311), KQU(12312781346344081551),
KQU(14524667935039810450), KQU( 5634005663377195738),
KQU(11375574739525000569), KQU( 2423665396433260040),
KQU( 5222836914796015410), KQU( 4397666386492647387),
KQU( 4619294441691707638), KQU( 665088602354770716),
KQU(13246495665281593610), KQU( 6564144270549729409),
KQU(10223216188145661688), KQU( 3961556907299230585),
KQU(11543262515492439914), KQU(16118031437285993790),
KQU( 7143417964520166465), KQU(13295053515909486772),
KQU( 40434666004899675), KQU(17127804194038347164),
KQU( 8599165966560586269), KQU( 8214016749011284903),
KQU(13725130352140465239), KQU( 5467254474431726291),
KQU( 7748584297438219877), KQU(16933551114829772472),
KQU( 2169618439506799400), KQU( 2169787627665113463),
KQU(17314493571267943764), KQU(18053575102911354912),
KQU(11928303275378476973), KQU(11593850925061715550),
KQU(17782269923473589362), KQU( 3280235307704747039),
KQU( 6145343578598685149), KQU(17080117031114086090),
KQU(18066839902983594755), KQU( 6517508430331020706),
KQU( 8092908893950411541), KQU(12558378233386153732),
KQU( 4476532167973132976), KQU(16081642430367025016),
KQU( 4233154094369139361), KQU( 8693630486693161027),
KQU(11244959343027742285), KQU(12273503967768513508),
KQU(14108978636385284876), KQU( 7242414665378826984),
KQU( 6561316938846562432), KQU( 8601038474994665795),
KQU(17532942353612365904), KQU(17940076637020912186),
KQU( 7340260368823171304), KQU( 7061807613916067905),
KQU(10561734935039519326), KQU(17990796503724650862),
KQU( 6208732943911827159), KQU( 359077562804090617),
KQU(14177751537784403113), KQU(10659599444915362902),
KQU(15081727220615085833), KQU(13417573895659757486),
KQU(15513842342017811524), KQU(11814141516204288231),
KQU( 1827312513875101814), KQU( 2804611699894603103),
KQU(17116500469975602763), KQU(12270191815211952087),
KQU(12256358467786024988), KQU(18435021722453971267),
KQU( 671330264390865618), KQU( 476504300460286050),
KQU(16465470901027093441), KQU( 4047724406247136402),
KQU( 1322305451411883346), KQU( 1388308688834322280),
KQU( 7303989085269758176), KQU( 9323792664765233642),
KQU( 4542762575316368936), KQU(17342696132794337618),
KQU( 4588025054768498379), KQU(13415475057390330804),
KQU(17880279491733405570), KQU(10610553400618620353),
KQU( 3180842072658960139), KQU(13002966655454270120),
KQU( 1665301181064982826), KQU( 7083673946791258979),
KQU( 190522247122496820), KQU(17388280237250677740),
KQU( 8430770379923642945), KQU(12987180971921668584),
KQU( 2311086108365390642), KQU( 2870984383579822345),
KQU(14014682609164653318), KQU(14467187293062251484),
KQU( 192186361147413298), KQU(15171951713531796524),
KQU( 9900305495015948728), KQU(17958004775615466344),
KQU(14346380954498606514), KQU(18040047357617407096),
KQU( 5035237584833424532), KQU(15089555460613972287),
KQU( 4131411873749729831), KQU( 1329013581168250330),
KQU(10095353333051193949), KQU(10749518561022462716),
KQU( 9050611429810755847), KQU(15022028840236655649),
KQU( 8775554279239748298), KQU(13105754025489230502),
KQU(15471300118574167585), KQU( 89864764002355628),
KQU( 8776416323420466637), KQU( 5280258630612040891),
KQU( 2719174488591862912), KQU( 7599309137399661994),
KQU(15012887256778039979), KQU(14062981725630928925),
KQU(12038536286991689603), KQU( 7089756544681775245),
KQU(10376661532744718039), KQU( 1265198725901533130),
KQU(13807996727081142408), KQU( 2935019626765036403),
KQU( 7651672460680700141), KQU( 3644093016200370795),
KQU( 2840982578090080674), KQU(17956262740157449201),
KQU(18267979450492880548), KQU(11799503659796848070),
KQU( 9942537025669672388), KQU(11886606816406990297),
KQU( 5488594946437447576), KQU( 7226714353282744302),
KQU( 3784851653123877043), KQU( 878018453244803041),
KQU(12110022586268616085), KQU( 734072179404675123),
KQU(11869573627998248542), KQU( 469150421297783998),
KQU( 260151124912803804), KQU(11639179410120968649),
KQU( 9318165193840846253), KQU(12795671722734758075),
KQU(15318410297267253933), KQU( 691524703570062620),
KQU( 5837129010576994601), KQU(15045963859726941052),
KQU( 5850056944932238169), KQU(12017434144750943807),
KQU( 7447139064928956574), KQU( 3101711812658245019),
KQU(16052940704474982954), KQU(18195745945986994042),
KQU( 8932252132785575659), KQU(13390817488106794834),
KQU(11582771836502517453), KQU( 4964411326683611686),
KQU( 2195093981702694011), KQU(14145229538389675669),
KQU(16459605532062271798), KQU( 866316924816482864),
KQU( 4593041209937286377), KQU( 8415491391910972138),
KQU( 4171236715600528969), KQU(16637569303336782889),
KQU( 2002011073439212680), KQU(17695124661097601411),
KQU( 4627687053598611702), KQU( 7895831936020190403),
KQU( 8455951300917267802), KQU( 2923861649108534854),
KQU( 8344557563927786255), KQU( 6408671940373352556),
KQU(12210227354536675772), KQU(14294804157294222295),
KQU(10103022425071085127), KQU(10092959489504123771),
KQU( 6554774405376736268), KQU(12629917718410641774),
KQU( 6260933257596067126), KQU( 2460827021439369673),
KQU( 2541962996717103668), KQU( 597377203127351475),
KQU( 5316984203117315309), KQU( 4811211393563241961),
KQU(13119698597255811641), KQU( 8048691512862388981),
KQU(10216818971194073842), KQU( 4612229970165291764),
KQU(10000980798419974770), KQU( 6877640812402540687),
KQU( 1488727563290436992), KQU( 2227774069895697318),
KQU(11237754507523316593), KQU(13478948605382290972),
KQU( 1963583846976858124), KQU( 5512309205269276457),
KQU( 3972770164717652347), KQU( 3841751276198975037),
KQU(10283343042181903117), KQU( 8564001259792872199),
KQU(16472187244722489221), KQU( 8953493499268945921),
KQU( 3518747340357279580), KQU( 4003157546223963073),
KQU( 3270305958289814590), KQU( 3966704458129482496),
KQU( 8122141865926661939), KQU(14627734748099506653),
KQU(13064426990862560568), KQU( 2414079187889870829),
KQU( 5378461209354225306), KQU(10841985740128255566),
KQU( 538582442885401738), KQU( 7535089183482905946),
KQU(16117559957598879095), KQU( 8477890721414539741),
KQU( 1459127491209533386), KQU(17035126360733620462),
KQU( 8517668552872379126), KQU(10292151468337355014),
KQU(17081267732745344157), KQU(13751455337946087178),
KQU(14026945459523832966), KQU( 6653278775061723516),
KQU(10619085543856390441), KQU( 2196343631481122885),
KQU(10045966074702826136), KQU(10082317330452718282),
KQU( 5920859259504831242), KQU( 9951879073426540617),
KQU( 7074696649151414158), KQU(15808193543879464318),
KQU( 7385247772746953374), KQU( 3192003544283864292),
KQU(18153684490917593847), KQU(12423498260668568905),
KQU(10957758099756378169), KQU(11488762179911016040),
KQU( 2099931186465333782), KQU(11180979581250294432),
KQU( 8098916250668367933), KQU( 3529200436790763465),
KQU(12988418908674681745), KQU( 6147567275954808580),
KQU( 3207503344604030989), KQU(10761592604898615360),
KQU( 229854861031893504), KQU( 8809853962667144291),
KQU(13957364469005693860), KQU( 7634287665224495886),
KQU(12353487366976556874), KQU( 1134423796317152034),
KQU( 2088992471334107068), KQU( 7393372127190799698),
KQU( 1845367839871058391), KQU( 207922563987322884),
KQU(11960870813159944976), KQU(12182120053317317363),
KQU(17307358132571709283), KQU(13871081155552824936),
KQU(18304446751741566262), KQU( 7178705220184302849),
KQU(10929605677758824425), KQU(16446976977835806844),
KQU(13723874412159769044), KQU( 6942854352100915216),
KQU( 1726308474365729390), KQU( 2150078766445323155),
KQU(15345558947919656626), KQU(12145453828874527201),
KQU( 2054448620739726849), KQU( 2740102003352628137),
KQU(11294462163577610655), KQU( 756164283387413743),
KQU(17841144758438810880), KQU(10802406021185415861),
KQU( 8716455530476737846), KQU( 6321788834517649606),
KQU(14681322910577468426), KQU(17330043563884336387),
KQU(12701802180050071614), KQU(14695105111079727151),
KQU( 5112098511654172830), KQU( 4957505496794139973),
KQU( 8270979451952045982), KQU(12307685939199120969),
KQU(12425799408953443032), KQU( 8376410143634796588),
KQU(16621778679680060464), KQU( 3580497854566660073),
KQU( 1122515747803382416), KQU( 857664980960597599),
KQU( 6343640119895925918), KQU(12878473260854462891),
KQU(10036813920765722626), KQU(14451335468363173812),
KQU( 5476809692401102807), KQU(16442255173514366342),
KQU(13060203194757167104), KQU(14354124071243177715),
KQU(15961249405696125227), KQU(13703893649690872584),
KQU( 363907326340340064), KQU( 6247455540491754842),
KQU(12242249332757832361), KQU( 156065475679796717),
KQU( 9351116235749732355), KQU( 4590350628677701405),
KQU( 1671195940982350389), KQU(13501398458898451905),
KQU( 6526341991225002255), KQU( 1689782913778157592),
KQU( 7439222350869010334), KQU(13975150263226478308),
KQU(11411961169932682710), KQU(17204271834833847277),
KQU( 541534742544435367), KQU( 6591191931218949684),
KQU( 2645454775478232486), KQU( 4322857481256485321),
KQU( 8477416487553065110), KQU(12902505428548435048),
KQU( 971445777981341415), KQU(14995104682744976712),
KQU( 4243341648807158063), KQU( 8695061252721927661),
KQU( 5028202003270177222), KQU( 2289257340915567840),
KQU(13870416345121866007), KQU(13994481698072092233),
KQU( 6912785400753196481), KQU( 2278309315841980139),
KQU( 4329765449648304839), KQU( 5963108095785485298),
KQU( 4880024847478722478), KQU(16015608779890240947),
KQU( 1866679034261393544), KQU( 914821179919731519),
KQU( 9643404035648760131), KQU( 2418114953615593915),
KQU( 944756836073702374), KQU(15186388048737296834),
KQU( 7723355336128442206), KQU( 7500747479679599691),
KQU(18013961306453293634), KQU( 2315274808095756456),
KQU(13655308255424029566), KQU(17203800273561677098),
KQU( 1382158694422087756), KQU( 5090390250309588976),
KQU( 517170818384213989), KQU( 1612709252627729621),
KQU( 1330118955572449606), KQU( 300922478056709885),
KQU(18115693291289091987), KQU(13491407109725238321),
KQU(15293714633593827320), KQU( 5151539373053314504),
KQU( 5951523243743139207), KQU(14459112015249527975),
KQU( 5456113959000700739), KQU( 3877918438464873016),
KQU(12534071654260163555), KQU(15871678376893555041),
KQU(11005484805712025549), KQU(16353066973143374252),
KQU( 4358331472063256685), KQU( 8268349332210859288),
KQU(12485161590939658075), KQU(13955993592854471343),
KQU( 5911446886848367039), KQU(14925834086813706974),
KQU( 6590362597857994805), KQU( 1280544923533661875),
KQU( 1637756018947988164), KQU( 4734090064512686329),
KQU(16693705263131485912), KQU( 6834882340494360958),
KQU( 8120732176159658505), KQU( 2244371958905329346),
KQU(10447499707729734021), KQU( 7318742361446942194),
KQU( 8032857516355555296), KQU(14023605983059313116),
KQU( 1032336061815461376), KQU( 9840995337876562612),
KQU( 9869256223029203587), KQU(12227975697177267636),
KQU(12728115115844186033), KQU( 7752058479783205470),
KQU( 729733219713393087), KQU(12954017801239007622)
};
static const uint64_t init_by_array_64_expected[] = {
KQU( 2100341266307895239), KQU( 8344256300489757943),
KQU(15687933285484243894), KQU( 8268620370277076319),
KQU(12371852309826545459), KQU( 8800491541730110238),
KQU(18113268950100835773), KQU( 2886823658884438119),
KQU( 3293667307248180724), KQU( 9307928143300172731),
KQU( 7688082017574293629), KQU( 900986224735166665),
KQU( 9977972710722265039), KQU( 6008205004994830552),
KQU( 546909104521689292), KQU( 7428471521869107594),
KQU(14777563419314721179), KQU(16116143076567350053),
KQU( 5322685342003142329), KQU( 4200427048445863473),
KQU( 4693092150132559146), KQU(13671425863759338582),
KQU( 6747117460737639916), KQU( 4732666080236551150),
KQU( 5912839950611941263), KQU( 3903717554504704909),
KQU( 2615667650256786818), KQU(10844129913887006352),
KQU(13786467861810997820), KQU(14267853002994021570),
KQU(13767807302847237439), KQU(16407963253707224617),
KQU( 4802498363698583497), KQU( 2523802839317209764),
KQU( 3822579397797475589), KQU( 8950320572212130610),
KQU( 3745623504978342534), KQU(16092609066068482806),
KQU( 9817016950274642398), KQU(10591660660323829098),
KQU(11751606650792815920), KQU( 5122873818577122211),
KQU(17209553764913936624), KQU( 6249057709284380343),
KQU(15088791264695071830), KQU(15344673071709851930),
KQU( 4345751415293646084), KQU( 2542865750703067928),
KQU(13520525127852368784), KQU(18294188662880997241),
KQU( 3871781938044881523), KQU( 2873487268122812184),
KQU(15099676759482679005), KQU(15442599127239350490),
KQU( 6311893274367710888), KQU( 3286118760484672933),
KQU( 4146067961333542189), KQU(13303942567897208770),
KQU( 8196013722255630418), KQU( 4437815439340979989),
KQU(15433791533450605135), KQU( 4254828956815687049),
KQU( 1310903207708286015), KQU(10529182764462398549),
KQU(14900231311660638810), KQU( 9727017277104609793),
KQU( 1821308310948199033), KQU(11628861435066772084),
KQU( 9469019138491546924), KQU( 3145812670532604988),
KQU( 9938468915045491919), KQU( 1562447430672662142),
KQU(13963995266697989134), KQU( 3356884357625028695),
KQU( 4499850304584309747), KQU( 8456825817023658122),
KQU(10859039922814285279), KQU( 8099512337972526555),
KQU( 348006375109672149), KQU(11919893998241688603),
KQU( 1104199577402948826), KQU(16689191854356060289),
KQU(10992552041730168078), KQU( 7243733172705465836),
KQU( 5668075606180319560), KQU(18182847037333286970),
KQU( 4290215357664631322), KQU( 4061414220791828613),
KQU(13006291061652989604), KQU( 7140491178917128798),
KQU(12703446217663283481), KQU( 5500220597564558267),
KQU(10330551509971296358), KQU(15958554768648714492),
KQU( 5174555954515360045), KQU( 1731318837687577735),
KQU( 3557700801048354857), KQU(13764012341928616198),
KQU(13115166194379119043), KQU( 7989321021560255519),
KQU( 2103584280905877040), KQU( 9230788662155228488),
KQU(16396629323325547654), KQU( 657926409811318051),
KQU(15046700264391400727), KQU( 5120132858771880830),
KQU( 7934160097989028561), KQU( 6963121488531976245),
KQU(17412329602621742089), KQU(15144843053931774092),
KQU(17204176651763054532), KQU(13166595387554065870),
KQU( 8590377810513960213), KQU( 5834365135373991938),
KQU( 7640913007182226243), KQU( 3479394703859418425),
KQU(16402784452644521040), KQU( 4993979809687083980),
KQU(13254522168097688865), KQU(15643659095244365219),
KQU( 5881437660538424982), KQU(11174892200618987379),
KQU( 254409966159711077), KQU(17158413043140549909),
KQU( 3638048789290376272), KQU( 1376816930299489190),
KQU( 4622462095217761923), KQU(15086407973010263515),
KQU(13253971772784692238), KQU( 5270549043541649236),
KQU(11182714186805411604), KQU(12283846437495577140),
KQU( 5297647149908953219), KQU(10047451738316836654),
KQU( 4938228100367874746), KQU(12328523025304077923),
KQU( 3601049438595312361), KQU( 9313624118352733770),
KQU(13322966086117661798), KQU(16660005705644029394),
KQU(11337677526988872373), KQU(13869299102574417795),
KQU(15642043183045645437), KQU( 3021755569085880019),
KQU( 4979741767761188161), KQU(13679979092079279587),
KQU( 3344685842861071743), KQU(13947960059899588104),
KQU( 305806934293368007), KQU( 5749173929201650029),
KQU(11123724852118844098), KQU(15128987688788879802),
KQU(15251651211024665009), KQU( 7689925933816577776),
KQU(16732804392695859449), KQU(17087345401014078468),
KQU(14315108589159048871), KQU( 4820700266619778917),
KQU(16709637539357958441), KQU( 4936227875177351374),
KQU( 2137907697912987247), KQU(11628565601408395420),
KQU( 2333250549241556786), KQU( 5711200379577778637),
KQU( 5170680131529031729), KQU(12620392043061335164),
KQU( 95363390101096078), KQU( 5487981914081709462),
KQU( 1763109823981838620), KQU( 3395861271473224396),
KQU( 1300496844282213595), KQU( 6894316212820232902),
KQU(10673859651135576674), KQU( 5911839658857903252),
KQU(17407110743387299102), KQU( 8257427154623140385),
KQU(11389003026741800267), KQU( 4070043211095013717),
KQU(11663806997145259025), KQU(15265598950648798210),
KQU( 630585789434030934), KQU( 3524446529213587334),
KQU( 7186424168495184211), KQU(10806585451386379021),
KQU(11120017753500499273), KQU( 1586837651387701301),
KQU(17530454400954415544), KQU( 9991670045077880430),
KQU( 7550997268990730180), KQU( 8640249196597379304),
KQU( 3522203892786893823), KQU(10401116549878854788),
KQU(13690285544733124852), KQU( 8295785675455774586),
KQU(15535716172155117603), KQU( 3112108583723722511),
KQU(17633179955339271113), KQU(18154208056063759375),
KQU( 1866409236285815666), KQU(13326075895396412882),
KQU( 8756261842948020025), KQU( 6281852999868439131),
KQU(15087653361275292858), KQU(10333923911152949397),
KQU( 5265567645757408500), KQU(12728041843210352184),
KQU( 6347959327507828759), KQU( 154112802625564758),
KQU(18235228308679780218), KQU( 3253805274673352418),
KQU( 4849171610689031197), KQU(17948529398340432518),
KQU(13803510475637409167), KQU(13506570190409883095),
KQU(15870801273282960805), KQU( 8451286481299170773),
KQU( 9562190620034457541), KQU( 8518905387449138364),
KQU(12681306401363385655), KQU( 3788073690559762558),
KQU( 5256820289573487769), KQU( 2752021372314875467),
KQU( 6354035166862520716), KQU( 4328956378309739069),
KQU( 449087441228269600), KQU( 5533508742653090868),
KQU( 1260389420404746988), KQU(18175394473289055097),
KQU( 1535467109660399420), KQU( 8818894282874061442),
KQU(12140873243824811213), KQU(15031386653823014946),
KQU( 1286028221456149232), KQU( 6329608889367858784),
KQU( 9419654354945132725), KQU( 6094576547061672379),
KQU(17706217251847450255), KQU( 1733495073065878126),
KQU(16918923754607552663), KQU( 8881949849954945044),
KQU(12938977706896313891), KQU(14043628638299793407),
KQU(18393874581723718233), KQU( 6886318534846892044),
KQU(14577870878038334081), KQU(13541558383439414119),
KQU(13570472158807588273), KQU(18300760537910283361),
KQU( 818368572800609205), KQU( 1417000585112573219),
KQU(12337533143867683655), KQU(12433180994702314480),
KQU( 778190005829189083), KQU(13667356216206524711),
KQU( 9866149895295225230), KQU(11043240490417111999),
KQU( 1123933826541378598), KQU( 6469631933605123610),
KQU(14508554074431980040), KQU(13918931242962026714),
KQU( 2870785929342348285), KQU(14786362626740736974),
KQU(13176680060902695786), KQU( 9591778613541679456),
KQU( 9097662885117436706), KQU( 749262234240924947),
KQU( 1944844067793307093), KQU( 4339214904577487742),
KQU( 8009584152961946551), KQU(16073159501225501777),
KQU( 3335870590499306217), KQU(17088312653151202847),
KQU( 3108893142681931848), KQU(16636841767202792021),
KQU(10423316431118400637), KQU( 8008357368674443506),
KQU(11340015231914677875), KQU(17687896501594936090),
KQU(15173627921763199958), KQU( 542569482243721959),
KQU(15071714982769812975), KQU( 4466624872151386956),
KQU( 1901780715602332461), KQU( 9822227742154351098),
KQU( 1479332892928648780), KQU( 6981611948382474400),
KQU( 7620824924456077376), KQU(14095973329429406782),
KQU( 7902744005696185404), KQU(15830577219375036920),
KQU(10287076667317764416), KQU(12334872764071724025),
KQU( 4419302088133544331), KQU(14455842851266090520),
KQU(12488077416504654222), KQU( 7953892017701886766),
KQU( 6331484925529519007), KQU( 4902145853785030022),
KQU(17010159216096443073), KQU(11945354668653886087),
KQU(15112022728645230829), KQU(17363484484522986742),
KQU( 4423497825896692887), KQU( 8155489510809067471),
KQU( 258966605622576285), KQU( 5462958075742020534),
KQU( 6763710214913276228), KQU( 2368935183451109054),
KQU(14209506165246453811), KQU( 2646257040978514881),
KQU( 3776001911922207672), KQU( 1419304601390147631),
KQU(14987366598022458284), KQU( 3977770701065815721),
KQU( 730820417451838898), KQU( 3982991703612885327),
KQU( 2803544519671388477), KQU(17067667221114424649),
KQU( 2922555119737867166), KQU( 1989477584121460932),
KQU(15020387605892337354), KQU( 9293277796427533547),
KQU(10722181424063557247), KQU(16704542332047511651),
KQU( 5008286236142089514), KQU(16174732308747382540),
KQU(17597019485798338402), KQU(13081745199110622093),
KQU( 8850305883842258115), KQU(12723629125624589005),
KQU( 8140566453402805978), KQU(15356684607680935061),
KQU(14222190387342648650), KQU(11134610460665975178),
KQU( 1259799058620984266), KQU(13281656268025610041),
KQU( 298262561068153992), KQU(12277871700239212922),
KQU(13911297774719779438), KQU(16556727962761474934),
KQU(17903010316654728010), KQU( 9682617699648434744),
KQU(14757681836838592850), KQU( 1327242446558524473),
KQU(11126645098780572792), KQU( 1883602329313221774),
KQU( 2543897783922776873), KQU(15029168513767772842),
KQU(12710270651039129878), KQU(16118202956069604504),
KQU(15010759372168680524), KQU( 2296827082251923948),
KQU(10793729742623518101), KQU(13829764151845413046),
KQU(17769301223184451213), KQU( 3118268169210783372),
KQU(17626204544105123127), KQU( 7416718488974352644),
KQU(10450751996212925994), KQU( 9352529519128770586),
KQU( 259347569641110140), KQU( 8048588892269692697),
KQU( 1774414152306494058), KQU(10669548347214355622),
KQU(13061992253816795081), KQU(18432677803063861659),
KQU( 8879191055593984333), KQU(12433753195199268041),
KQU(14919392415439730602), KQU( 6612848378595332963),
KQU( 6320986812036143628), KQU(10465592420226092859),
KQU( 4196009278962570808), KQU( 3747816564473572224),
KQU(17941203486133732898), KQU( 2350310037040505198),
KQU( 5811779859134370113), KQU(10492109599506195126),
KQU( 7699650690179541274), KQU( 1954338494306022961),
KQU(14095816969027231152), KQU( 5841346919964852061),
KQU(14945969510148214735), KQU( 3680200305887550992),
KQU( 6218047466131695792), KQU( 8242165745175775096),
KQU(11021371934053307357), KQU( 1265099502753169797),
KQU( 4644347436111321718), KQU( 3609296916782832859),
KQU( 8109807992218521571), KQU(18387884215648662020),
KQU(14656324896296392902), KQU(17386819091238216751),
KQU(17788300878582317152), KQU( 7919446259742399591),
KQU( 4466613134576358004), KQU(12928181023667938509),
KQU(13147446154454932030), KQU(16552129038252734620),
KQU( 8395299403738822450), KQU(11313817655275361164),
KQU( 434258809499511718), KQU( 2074882104954788676),
KQU( 7929892178759395518), KQU( 9006461629105745388),
KQU( 5176475650000323086), KQU(11128357033468341069),
KQU(12026158851559118955), KQU(14699716249471156500),
KQU( 448982497120206757), KQU( 4156475356685519900),
KQU( 6063816103417215727), KQU(10073289387954971479),
KQU( 8174466846138590962), KQU( 2675777452363449006),
KQU( 9090685420572474281), KQU( 6659652652765562060),
KQU(12923120304018106621), KQU(11117480560334526775),
KQU( 937910473424587511), KQU( 1838692113502346645),
KQU(11133914074648726180), KQU( 7922600945143884053),
KQU(13435287702700959550), KQU( 5287964921251123332),
KQU(11354875374575318947), KQU(17955724760748238133),
KQU(13728617396297106512), KQU( 4107449660118101255),
KQU( 1210269794886589623), KQU(11408687205733456282),
KQU( 4538354710392677887), KQU(13566803319341319267),
KQU(17870798107734050771), KQU( 3354318982568089135),
KQU( 9034450839405133651), KQU(13087431795753424314),
KQU( 950333102820688239), KQU( 1968360654535604116),
KQU(16840551645563314995), KQU( 8867501803892924995),
KQU(11395388644490626845), KQU( 1529815836300732204),
KQU(13330848522996608842), KQU( 1813432878817504265),
KQU( 2336867432693429560), KQU(15192805445973385902),
KQU( 2528593071076407877), KQU( 128459777936689248),
KQU( 9976345382867214866), KQU( 6208885766767996043),
KQU(14982349522273141706), KQU( 3099654362410737822),
KQU(13776700761947297661), KQU( 8806185470684925550),
KQU( 8151717890410585321), KQU( 640860591588072925),
KQU(14592096303937307465), KQU( 9056472419613564846),
KQU(14861544647742266352), KQU(12703771500398470216),
KQU( 3142372800384138465), KQU( 6201105606917248196),
KQU(18337516409359270184), KQU(15042268695665115339),
KQU(15188246541383283846), KQU(12800028693090114519),
KQU( 5992859621101493472), KQU(18278043971816803521),
KQU( 9002773075219424560), KQU( 7325707116943598353),
KQU( 7930571931248040822), KQU( 5645275869617023448),
KQU( 7266107455295958487), KQU( 4363664528273524411),
KQU(14313875763787479809), KQU(17059695613553486802),
KQU( 9247761425889940932), KQU(13704726459237593128),
KQU( 2701312427328909832), KQU(17235532008287243115),
KQU(14093147761491729538), KQU( 6247352273768386516),
KQU( 8268710048153268415), KQU( 7985295214477182083),
KQU(15624495190888896807), KQU( 3772753430045262788),
KQU( 9133991620474991698), KQU( 5665791943316256028),
KQU( 7551996832462193473), KQU(13163729206798953877),
KQU( 9263532074153846374), KQU( 1015460703698618353),
KQU(17929874696989519390), KQU(18257884721466153847),
KQU(16271867543011222991), KQU( 3905971519021791941),
KQU(16814488397137052085), KQU( 1321197685504621613),
KQU( 2870359191894002181), KQU(14317282970323395450),
KQU(13663920845511074366), KQU( 2052463995796539594),
KQU(14126345686431444337), KQU( 1727572121947022534),
KQU(17793552254485594241), KQU( 6738857418849205750),
KQU( 1282987123157442952), KQU(16655480021581159251),
KQU( 6784587032080183866), KQU(14726758805359965162),
KQU( 7577995933961987349), KQU(12539609320311114036),
KQU(10789773033385439494), KQU( 8517001497411158227),
KQU(10075543932136339710), KQU(14838152340938811081),
KQU( 9560840631794044194), KQU(17445736541454117475),
KQU(10633026464336393186), KQU(15705729708242246293),
KQU( 1117517596891411098), KQU( 4305657943415886942),
KQU( 4948856840533979263), KQU(16071681989041789593),
KQU(13723031429272486527), KQU( 7639567622306509462),
KQU(12670424537483090390), KQU( 9715223453097197134),
KQU( 5457173389992686394), KQU( 289857129276135145),
KQU(17048610270521972512), KQU( 692768013309835485),
KQU(14823232360546632057), KQU(18218002361317895936),
KQU( 3281724260212650204), KQU(16453957266549513795),
KQU( 8592711109774511881), KQU( 929825123473369579),
KQU(15966784769764367791), KQU( 9627344291450607588),
KQU(10849555504977813287), KQU( 9234566913936339275),
KQU( 6413807690366911210), KQU(10862389016184219267),
KQU(13842504799335374048), KQU( 1531994113376881174),
KQU( 2081314867544364459), KQU(16430628791616959932),
KQU( 8314714038654394368), KQU( 9155473892098431813),
KQU(12577843786670475704), KQU( 4399161106452401017),
KQU( 1668083091682623186), KQU( 1741383777203714216),
KQU( 2162597285417794374), KQU(15841980159165218736),
KQU( 1971354603551467079), KQU( 1206714764913205968),
KQU( 4790860439591272330), KQU(14699375615594055799),
KQU( 8374423871657449988), KQU(10950685736472937738),
KQU( 697344331343267176), KQU(10084998763118059810),
KQU(12897369539795983124), KQU(12351260292144383605),
KQU( 1268810970176811234), KQU( 7406287800414582768),
KQU( 516169557043807831), KQU( 5077568278710520380),
KQU( 3828791738309039304), KQU( 7721974069946943610),
KQU( 3534670260981096460), KQU( 4865792189600584891),
KQU(16892578493734337298), KQU( 9161499464278042590),
KQU(11976149624067055931), KQU(13219479887277343990),
KQU(14161556738111500680), KQU(14670715255011223056),
KQU( 4671205678403576558), KQU(12633022931454259781),
KQU(14821376219869187646), KQU( 751181776484317028),
KQU( 2192211308839047070), KQU(11787306362361245189),
KQU(10672375120744095707), KQU( 4601972328345244467),
KQU(15457217788831125879), KQU( 8464345256775460809),
KQU(10191938789487159478), KQU( 6184348739615197613),
KQU(11425436778806882100), KQU( 2739227089124319793),
KQU( 461464518456000551), KQU( 4689850170029177442),
KQU( 6120307814374078625), KQU(11153579230681708671),
KQU( 7891721473905347926), KQU(10281646937824872400),
KQU( 3026099648191332248), KQU( 8666750296953273818),
KQU(14978499698844363232), KQU(13303395102890132065),
KQU( 8182358205292864080), KQU(10560547713972971291),
KQU(11981635489418959093), KQU( 3134621354935288409),
KQU(11580681977404383968), KQU(14205530317404088650),
KQU( 5997789011854923157), KQU(13659151593432238041),
KQU(11664332114338865086), KQU( 7490351383220929386),
KQU( 7189290499881530378), KQU(15039262734271020220),
KQU( 2057217285976980055), KQU( 555570804905355739),
KQU(11235311968348555110), KQU(13824557146269603217),
KQU(16906788840653099693), KQU( 7222878245455661677),
KQU( 5245139444332423756), KQU( 4723748462805674292),
KQU(12216509815698568612), KQU(17402362976648951187),
KQU(17389614836810366768), KQU( 4880936484146667711),
KQU( 9085007839292639880), KQU(13837353458498535449),
KQU(11914419854360366677), KQU(16595890135313864103),
KQU( 6313969847197627222), KQU(18296909792163910431),
KQU(10041780113382084042), KQU( 2499478551172884794),
KQU(11057894246241189489), KQU( 9742243032389068555),
KQU(12838934582673196228), KQU(13437023235248490367),
KQU(13372420669446163240), KQU( 6752564244716909224),
KQU( 7157333073400313737), KQU(12230281516370654308),
KQU( 1182884552219419117), KQU( 2955125381312499218),
KQU(10308827097079443249), KQU( 1337648572986534958),
KQU(16378788590020343939), KQU( 108619126514420935),
KQU( 3990981009621629188), KQU( 5460953070230946410),
KQU( 9703328329366531883), KQU(13166631489188077236),
KQU( 1104768831213675170), KQU( 3447930458553877908),
KQU( 8067172487769945676), KQU( 5445802098190775347),
KQU( 3244840981648973873), KQU(17314668322981950060),
KQU( 5006812527827763807), KQU(18158695070225526260),
KQU( 2824536478852417853), KQU(13974775809127519886),
KQU( 9814362769074067392), KQU(17276205156374862128),
KQU(11361680725379306967), KQU( 3422581970382012542),
KQU(11003189603753241266), KQU(11194292945277862261),
KQU( 6839623313908521348), KQU(11935326462707324634),
KQU( 1611456788685878444), KQU(13112620989475558907),
KQU( 517659108904450427), KQU(13558114318574407624),
KQU(15699089742731633077), KQU( 4988979278862685458),
KQU( 8111373583056521297), KQU( 3891258746615399627),
KQU( 8137298251469718086), KQU(12748663295624701649),
KQU( 4389835683495292062), KQU( 5775217872128831729),
KQU( 9462091896405534927), KQU( 8498124108820263989),
KQU( 8059131278842839525), KQU(10503167994254090892),
KQU(11613153541070396656), KQU(18069248738504647790),
KQU( 570657419109768508), KQU( 3950574167771159665),
KQU( 5514655599604313077), KQU( 2908460854428484165),
KQU(10777722615935663114), KQU(12007363304839279486),
KQU( 9800646187569484767), KQU( 8795423564889864287),
KQU(14257396680131028419), KQU( 6405465117315096498),
KQU( 7939411072208774878), KQU(17577572378528990006),
KQU(14785873806715994850), KQU(16770572680854747390),
KQU(18127549474419396481), KQU(11637013449455757750),
KQU(14371851933996761086), KQU( 3601181063650110280),
KQU( 4126442845019316144), KQU(10198287239244320669),
KQU(18000169628555379659), KQU(18392482400739978269),
KQU( 6219919037686919957), KQU( 3610085377719446052),
KQU( 2513925039981776336), KQU(16679413537926716955),
KQU(12903302131714909434), KQU( 5581145789762985009),
KQU(12325955044293303233), KQU(17216111180742141204),
KQU( 6321919595276545740), KQU( 3507521147216174501),
KQU( 9659194593319481840), KQU(11473976005975358326),
KQU(14742730101435987026), KQU( 492845897709954780),
KQU(16976371186162599676), KQU(17712703422837648655),
KQU( 9881254778587061697), KQU( 8413223156302299551),
KQU( 1563841828254089168), KQU( 9996032758786671975),
KQU( 138877700583772667), KQU(13003043368574995989),
KQU( 4390573668650456587), KQU( 8610287390568126755),
KQU(15126904974266642199), KQU( 6703637238986057662),
KQU( 2873075592956810157), KQU( 6035080933946049418),
KQU(13382846581202353014), KQU( 7303971031814642463),
KQU(18418024405307444267), KQU( 5847096731675404647),
KQU( 4035880699639842500), KQU(11525348625112218478),
KQU( 3041162365459574102), KQU( 2604734487727986558),
KQU(15526341771636983145), KQU(14556052310697370254),
KQU(12997787077930808155), KQU( 9601806501755554499),
KQU(11349677952521423389), KQU(14956777807644899350),
KQU(16559736957742852721), KQU(12360828274778140726),
KQU( 6685373272009662513), KQU(16932258748055324130),
KQU(15918051131954158508), KQU( 1692312913140790144),
KQU( 546653826801637367), KQU( 5341587076045986652),
KQU(14975057236342585662), KQU(12374976357340622412),
KQU(10328833995181940552), KQU(12831807101710443149),
KQU(10548514914382545716), KQU( 2217806727199715993),
KQU(12627067369242845138), KQU( 4598965364035438158),
KQU( 150923352751318171), KQU(14274109544442257283),
KQU( 4696661475093863031), KQU( 1505764114384654516),
KQU(10699185831891495147), KQU( 2392353847713620519),
KQU( 3652870166711788383), KQU( 8640653276221911108),
KQU( 3894077592275889704), KQU( 4918592872135964845),
KQU(16379121273281400789), KQU(12058465483591683656),
KQU(11250106829302924945), KQU( 1147537556296983005),
KQU( 6376342756004613268), KQU(14967128191709280506),
KQU(18007449949790627628), KQU( 9497178279316537841),
KQU( 7920174844809394893), KQU(10037752595255719907),
KQU(15875342784985217697), KQU(15311615921712850696),
KQU( 9552902652110992950), KQU(14054979450099721140),
KQU( 5998709773566417349), KQU(18027910339276320187),
KQU( 8223099053868585554), KQU( 7842270354824999767),
KQU( 4896315688770080292), KQU(12969320296569787895),
KQU( 2674321489185759961), KQU( 4053615936864718439),
KQU(11349775270588617578), KQU( 4743019256284553975),
KQU( 5602100217469723769), KQU(14398995691411527813),
KQU( 7412170493796825470), KQU( 836262406131744846),
KQU( 8231086633845153022), KQU( 5161377920438552287),
KQU( 8828731196169924949), KQU(16211142246465502680),
KQU( 3307990879253687818), KQU( 5193405406899782022),
KQU( 8510842117467566693), KQU( 6070955181022405365),
KQU(14482950231361409799), KQU(12585159371331138077),
KQU( 3511537678933588148), KQU( 2041849474531116417),
KQU(10944936685095345792), KQU(18303116923079107729),
KQU( 2720566371239725320), KQU( 4958672473562397622),
KQU( 3032326668253243412), KQU(13689418691726908338),
KQU( 1895205511728843996), KQU( 8146303515271990527),
KQU(16507343500056113480), KQU( 473996939105902919),
KQU( 9897686885246881481), KQU(14606433762712790575),
KQU( 6732796251605566368), KQU( 1399778120855368916),
KQU( 935023885182833777), KQU(16066282816186753477),
KQU( 7291270991820612055), KQU(17530230393129853844),
KQU(10223493623477451366), KQU(15841725630495676683),
KQU(17379567246435515824), KQU( 8588251429375561971),
KQU(18339511210887206423), KQU(17349587430725976100),
KQU(12244876521394838088), KQU( 6382187714147161259),
KQU(12335807181848950831), KQU(16948885622305460665),
KQU(13755097796371520506), KQU(14806740373324947801),
KQU( 4828699633859287703), KQU( 8209879281452301604),
KQU(12435716669553736437), KQU(13970976859588452131),
KQU( 6233960842566773148), KQU(12507096267900505759),
KQU( 1198713114381279421), KQU(14989862731124149015),
KQU(15932189508707978949), KQU( 2526406641432708722),
KQU( 29187427817271982), KQU( 1499802773054556353),
KQU(10816638187021897173), KQU( 5436139270839738132),
KQU( 6659882287036010082), KQU( 2154048955317173697),
KQU(10887317019333757642), KQU(16281091802634424955),
KQU(10754549879915384901), KQU(10760611745769249815),
KQU( 2161505946972504002), KQU( 5243132808986265107),
KQU(10129852179873415416), KQU( 710339480008649081),
KQU( 7802129453068808528), KQU(17967213567178907213),
KQU(15730859124668605599), KQU(13058356168962376502),
KQU( 3701224985413645909), KQU(14464065869149109264),
KQU( 9959272418844311646), KQU(10157426099515958752),
KQU(14013736814538268528), KQU(17797456992065653951),
KQU(17418878140257344806), KQU(15457429073540561521),
KQU( 2184426881360949378), KQU( 2062193041154712416),
KQU( 8553463347406931661), KQU( 4913057625202871854),
KQU( 2668943682126618425), KQU(17064444737891172288),
KQU( 4997115903913298637), KQU(12019402608892327416),
KQU(17603584559765897352), KQU(11367529582073647975),
KQU( 8211476043518436050), KQU( 8676849804070323674),
KQU(18431829230394475730), KQU(10490177861361247904),
KQU( 9508720602025651349), KQU( 7409627448555722700),
KQU( 5804047018862729008), KQU(11943858176893142594),
KQU(11908095418933847092), KQU( 5415449345715887652),
KQU( 1554022699166156407), KQU( 9073322106406017161),
KQU( 7080630967969047082), KQU(18049736940860732943),
KQU(12748714242594196794), KQU( 1226992415735156741),
KQU(17900981019609531193), KQU(11720739744008710999),
KQU( 3006400683394775434), KQU(11347974011751996028),
KQU( 3316999628257954608), KQU( 8384484563557639101),
KQU(18117794685961729767), KQU( 1900145025596618194),
KQU(17459527840632892676), KQU( 5634784101865710994),
KQU( 7918619300292897158), KQU( 3146577625026301350),
KQU( 9955212856499068767), KQU( 1873995843681746975),
KQU( 1561487759967972194), KQU( 8322718804375878474),
KQU(11300284215327028366), KQU( 4667391032508998982),
KQU( 9820104494306625580), KQU(17922397968599970610),
KQU( 1784690461886786712), KQU(14940365084341346821),
KQU( 5348719575594186181), KQU(10720419084507855261),
KQU(14210394354145143274), KQU( 2426468692164000131),
KQU(16271062114607059202), KQU(14851904092357070247),
KQU( 6524493015693121897), KQU( 9825473835127138531),
KQU(14222500616268569578), KQU(15521484052007487468),
KQU(14462579404124614699), KQU(11012375590820665520),
KQU(11625327350536084927), KQU(14452017765243785417),
KQU( 9989342263518766305), KQU( 3640105471101803790),
KQU( 4749866455897513242), KQU(13963064946736312044),
KQU(10007416591973223791), KQU(18314132234717431115),
KQU( 3286596588617483450), KQU( 7726163455370818765),
KQU( 7575454721115379328), KQU( 5308331576437663422),
KQU(18288821894903530934), KQU( 8028405805410554106),
KQU(15744019832103296628), KQU( 149765559630932100),
KQU( 6137705557200071977), KQU(14513416315434803615),
KQU(11665702820128984473), KQU( 218926670505601386),
KQU( 6868675028717769519), KQU(15282016569441512302),
KQU( 5707000497782960236), KQU( 6671120586555079567),
KQU( 2194098052618985448), KQU(16849577895477330978),
KQU(12957148471017466283), KQU( 1997805535404859393),
KQU( 1180721060263860490), KQU(13206391310193756958),
KQU(12980208674461861797), KQU( 3825967775058875366),
KQU(17543433670782042631), KQU( 1518339070120322730),
KQU(16344584340890991669), KQU( 2611327165318529819),
KQU(11265022723283422529), KQU( 4001552800373196817),
KQU(14509595890079346161), KQU( 3528717165416234562),
KQU(18153222571501914072), KQU( 9387182977209744425),
KQU(10064342315985580021), KQU(11373678413215253977),
KQU( 2308457853228798099), KQU( 9729042942839545302),
KQU( 7833785471140127746), KQU( 6351049900319844436),
KQU(14454610627133496067), KQU(12533175683634819111),
KQU(15570163926716513029), KQU(13356980519185762498)
};
TEST_BEGIN(test_gen_rand_32)
{
uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
int i;
uint32_t r32;
sfmt_t *ctx;
assert_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_gen_rand(1234);
fill_array32(ctx, array32, BLOCK_SIZE);
fill_array32(ctx, array32_2, BLOCK_SIZE);
fini_gen_rand(ctx);
ctx = init_gen_rand(1234);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
fini_gen_rand(ctx);
}
TEST_END
TEST_BEGIN(test_by_array_32)
{
uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
int i;
uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
uint32_t r32;
sfmt_t *ctx;
assert_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_by_array(ini, 4);
fill_array32(ctx, array32, BLOCK_SIZE);
fill_array32(ctx, array32_2, BLOCK_SIZE);
fini_gen_rand(ctx);
ctx = init_by_array(ini, 4);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
assert_u32_eq(array32[i], init_by_array_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
fini_gen_rand(ctx);
}
TEST_END
TEST_BEGIN(test_gen_rand_64)
{
uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
int i;
uint64_t r;
sfmt_t *ctx;
assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_gen_rand(4321);
fill_array64(ctx, array64, BLOCK_SIZE64);
fill_array64(ctx, array64_2, BLOCK_SIZE64);
fini_gen_rand(ctx);
ctx = init_gen_rand(4321);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i,
array64_2[i], r);
}
fini_gen_rand(ctx);
}
TEST_END
TEST_BEGIN(test_by_array_64)
{
uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
int i;
uint64_t r;
uint32_t ini[] = {5, 4, 3, 2, 1};
sfmt_t *ctx;
assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_by_array(ini, 5);
fill_array64(ctx, array64, BLOCK_SIZE64);
fill_array64(ctx, array64_2, BLOCK_SIZE64);
fini_gen_rand(ctx);
ctx = init_by_array(ini, 5);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
assert_u64_eq(array64[i], init_by_array_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i,
array64_2[i], r);
}
fini_gen_rand(ctx);
}
TEST_END
int
main(void)
{
return (test(
test_gen_rand_32,
test_by_array_32,
test_gen_rand_64,
test_by_array_64));
}
| 87,611 | 53.552927 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/rb.c
|
#include "test/jemalloc_test.h"
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \
(r_height)++; \
} \
} \
} while (0)
typedef struct node_s node_t;
struct node_s {
#define NODE_MAGIC 0x9823af7e
uint32_t magic;
rb_node(node_t) link;
uint64_t key;
};
static int
node_cmp(node_t *a, node_t *b) {
int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
ret = (a->key > b->key) - (a->key < b->key);
if (ret == 0) {
/*
* Duplicates are not allowed in the tree, so force an
* arbitrary ordering for non-identical items with equal keys.
*/
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
}
return (ret);
}
typedef rb_tree(node_t) tree_t;
rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
TEST_BEGIN(test_rb_empty)
{
tree_t tree;
node_t key;
tree_new(&tree);
assert_ptr_null(tree_first(&tree), "Unexpected node");
assert_ptr_null(tree_last(&tree), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
}
TEST_END
static unsigned
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *nil)
{
unsigned ret = 0;
node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node = rbtn_right_get(node_t, link, node);
if (rbtn_red_get(node_t, link, node) == false)
black_depth++;
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
assert_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
assert_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
}
if (node == nil)
return (ret);
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
if (left_node != nil)
ret += tree_recurse(left_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
/* Right subtree. */
if (right_node != nil)
ret += tree_recurse(right_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
return (ret);
}
static node_t *
tree_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *i = (unsigned *)data;
node_t *search_node;
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Test rb_search(). */
search_node = tree_search(tree, node);
assert_ptr_eq(search_node, node,
"tree_search() returned unexpected node");
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_nsearch() returned unexpected node");
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_psearch() returned unexpected node");
(*i)++;
return (NULL);
}
static unsigned
tree_iterate(tree_t *tree)
{
unsigned i;
i = 0;
tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static unsigned
tree_iterate_reverse(tree_t *tree)
{
unsigned i;
i = 0;
tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static void
node_remove(tree_t *tree, node_t *node, unsigned nnodes)
{
node_t *search_node;
unsigned black_height, imbalances;
tree_remove(tree, node);
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
if (search_node != NULL) {
assert_u64_ge(search_node->key, node->key,
"Key ordering error");
}
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
if (search_node != NULL) {
assert_u64_le(search_node->key, node->key,
"Key ordering error");
}
node->magic = 0;
rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0,
&(tree->rbt_nil));
assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
"Unexpected node iteration count");
}
static node_t *
remove_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_next(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
static node_t *
remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_prev(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
TEST_BEGIN(test_rb_random)
{
#define NNODES 25
#define NBAGS 250
#define SEED 42
sfmt_t *sfmt;
uint64_t bag[NNODES];
tree_t tree;
node_t nodes[NNODES];
unsigned i, j, k, black_height, imbalances;
sfmt = init_gen_rand(SEED);
for (i = 0; i < NBAGS; i++) {
switch (i) {
case 0:
/* Insert in order. */
for (j = 0; j < NNODES; j++)
bag[j] = j;
break;
case 1:
/* Insert in reverse order. */
for (j = 0; j < NNODES; j++)
bag[j] = NNODES - j - 1;
break;
default:
for (j = 0; j < NNODES; j++)
bag[j] = gen_rand64_range(sfmt, NNODES);
}
for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */
tree_new(&tree);
tree.rbt_nil.magic = 0;
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k];
}
/* Insert nodes. */
for (k = 0; k < j; k++) {
tree_insert(&tree, &nodes[k]);
rbtn_black_height(node_t, link, &tree,
black_height);
imbalances = tree_recurse(tree.rbt_root,
black_height, 0, &(tree.rbt_nil));
assert_u_eq(imbalances, 0,
"Tree is unbalanced");
assert_u_eq(tree_iterate(&tree), k+1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
assert_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_last(&tree),
"Tree should not be empty");
tree_next(&tree, &nodes[k]);
tree_prev(&tree, &nodes[k]);
}
/* Remove nodes. */
switch (i % 4) {
case 0:
for (k = 0; k < j; k++)
node_remove(&tree, &nodes[k], j - k);
break;
case 1:
for (k = j; k > 0; k--)
node_remove(&tree, &nodes[k-1], k);
break;
case 2: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_iter(&tree, start,
remove_iterate_cb, (void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 3: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_reverse_iter(&tree, start,
remove_reverse_iterate_cb,
(void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} default:
not_reached();
}
}
}
fini_gen_rand(sfmt);
#undef NNODES
#undef NBAGS
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rb_empty,
test_rb_random));
}
| 7,430 | 21.248503 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/util.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_pow2_ceil)
{
unsigned i, pow2;
size_t x;
assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
for (i = 0; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
"Unexpected result");
}
for (i = 2; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
"Unexpected result");
}
for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
"Unexpected result");
}
for (pow2 = 1; pow2 < 25; pow2++) {
for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
"Unexpected result, x=%zu", x);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax_no_endptr)
{
int err;
set_errno(0);
assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
err = get_errno();
assert_d_eq(err, 0, "Unexpected failure");
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax)
{
struct test_s {
const char *input;
const char *expected_remainder;
int base;
int expected_errno;
const char *expected_errno_name;
uintmax_t expected_x;
};
#define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL)
struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
{"", "", 0, ERR(EINVAL), UINTMAX_MAX},
{"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
{"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
{"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
{"42", "", 0, ERR(0), KUMAX(42)},
{"+42", "", 0, ERR(0), KUMAX(42)},
{"-42", "", 0, ERR(0), KUMAX(-42)},
{"042", "", 0, ERR(0), KUMAX(042)},
{"+042", "", 0, ERR(0), KUMAX(042)},
{"-042", "", 0, ERR(0), KUMAX(-042)},
{"0x42", "", 0, ERR(0), KUMAX(0x42)},
{"+0x42", "", 0, ERR(0), KUMAX(0x42)},
{"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
{"0", "", 0, ERR(0), KUMAX(0)},
{"1", "", 0, ERR(0), KUMAX(1)},
{"42", "", 0, ERR(0), KUMAX(42)},
{" 42", "", 0, ERR(0), KUMAX(42)},
{"42 ", " ", 0, ERR(0), KUMAX(42)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"42x", "x", 0, ERR(0), KUMAX(42)},
{"07", "", 0, ERR(0), KUMAX(7)},
{"010", "", 0, ERR(0), KUMAX(8)},
{"08", "8", 0, ERR(0), KUMAX(0)},
{"0_", "_", 0, ERR(0), KUMAX(0)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"0X", "X", 0, ERR(0), KUMAX(0)},
{"0xg", "xg", 0, ERR(0), KUMAX(0)},
{"0XA", "", 0, ERR(0), KUMAX(10)},
{"010", "", 10, ERR(0), KUMAX(10)},
{"0x3", "x3", 10, ERR(0), KUMAX(0)},
{"12", "2", 2, ERR(0), KUMAX(1)},
{"78", "8", 8, ERR(0), KUMAX(7)},
{"9a", "a", 10, ERR(0), KUMAX(9)},
{"9A", "A", 10, ERR(0), KUMAX(9)},
{"fg", "g", 16, ERR(0), KUMAX(15)},
{"FG", "G", 16, ERR(0), KUMAX(15)},
{"0xfg", "g", 16, ERR(0), KUMAX(15)},
{"0XFG", "G", 16, ERR(0), KUMAX(15)},
{"z_", "_", 36, ERR(0), KUMAX(35)},
{"Z_", "_", 36, ERR(0), KUMAX(35)}
};
#undef ERR
#undef KUMAX
unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
struct test_s *test = &tests[i];
int err;
uintmax_t result;
char *remainder;
set_errno(0);
result = malloc_strtoumax(test->input, &remainder, test->base);
err = get_errno();
assert_d_eq(err, test->expected_errno,
"Expected errno %s for \"%s\", base %d",
test->expected_errno_name, test->input, test->base);
assert_str_eq(remainder, test->expected_remainder,
"Unexpected remainder for \"%s\", base %d",
test->input, test->base);
if (err == 0) {
assert_ju_eq(result, test->expected_x,
"Unexpected result for \"%s\", base %d",
test->input, test->base);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_snprintf_truncated)
{
#define BUFLEN 15
char buf[BUFLEN];
int result;
size_t len;
#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
assert_d_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
for (len = 1; len < BUFLEN; len++) {
TEST("012346789", "012346789");
TEST("a0123b", "a%sb", "0123");
TEST("a01234567", "a%s%s", "0123", "4567");
TEST("a0123 ", "a%-6s", "0123");
TEST("a 0123", "a%6s", "0123");
TEST("a 012", "a%6.3s", "0123");
TEST("a 012", "a%*.*s", 6, 3, "0123");
TEST("a 123b", "a% db", 123);
TEST("a123b", "a%-db", 123);
TEST("a-123b", "a%-db", -123);
TEST("a+123b", "a%+db", 123);
}
#undef BUFLEN
#undef TEST
}
TEST_END
TEST_BEGIN(test_malloc_snprintf)
{
#define BUFLEN 128
char buf[BUFLEN];
int result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0)
TEST("hello", "hello");
TEST("50%, 100%", "50%%, %d%%", 100);
TEST("a0123b", "a%sb", "0123");
TEST("a 0123b", "a%5sb", "0123");
TEST("a 0123b", "a%*sb", 5, "0123");
TEST("a0123 b", "a%-5sb", "0123");
TEST("a0123b", "a%*sb", -1, "0123");
TEST("a0123 b", "a%*sb", -5, "0123");
TEST("a0123 b", "a%-*sb", -5, "0123");
TEST("a012b", "a%.3sb", "0123");
TEST("a012b", "a%.*sb", 3, "0123");
TEST("a0123b", "a%.*sb", -3, "0123");
TEST("a 012b", "a%5.3sb", "0123");
TEST("a 012b", "a%5.*sb", 3, "0123");
TEST("a 012b", "a%*.3sb", 5, "0123");
TEST("a 012b", "a%*.*sb", 5, 3, "0123");
TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
TEST("_abcd_", "_%x_", 0xabcd);
TEST("_0xabcd_", "_%#x_", 0xabcd);
TEST("_1234_", "_%o_", 01234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_1234_", "_%d_", 1234);
TEST("_ 1234_", "_% d_", 1234);
TEST("_+1234_", "_%+d_", 1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_-1234_", "_% d_", -1234);
TEST("_-1234_", "_%+d_", -1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_1234_", "_%d_", 1234);
TEST("_-1234_", "_%i_", -1234);
TEST("_1234_", "_%i_", 1234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_0x1234abc_", "_%#x_", 0x1234abc);
TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
TEST("_c_", "_%c_", 'c');
TEST("_string_", "_%s_", "string");
TEST("_0x42_", "_%p_", ((void *)0x42));
TEST("_-1234_", "_%ld_", ((long)-1234));
TEST("_1234_", "_%ld_", ((long)1234));
TEST("_-1234_", "_%li_", ((long)-1234));
TEST("_1234_", "_%li_", ((long)1234));
TEST("_01234_", "_%#lo_", ((long)01234));
TEST("_1234_", "_%lu_", ((long)1234));
TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
TEST("_-1234_", "_%lld_", ((long long)-1234));
TEST("_1234_", "_%lld_", ((long long)1234));
TEST("_-1234_", "_%lli_", ((long long)-1234));
TEST("_1234_", "_%lli_", ((long long)1234));
TEST("_01234_", "_%#llo_", ((long long)01234));
TEST("_1234_", "_%llu_", ((long long)1234));
TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
/* turn off ICC warnings on invalid format string conversion */
#pragma warning (push)
#pragma warning (disable: 269)
#endif
TEST("_-1234_", "_%qd_", ((long long)-1234));
TEST("_1234_", "_%qd_", ((long long)1234));
TEST("_-1234_", "_%qi_", ((long long)-1234));
TEST("_1234_", "_%qi_", ((long long)1234));
TEST("_01234_", "_%#qo_", ((long long)01234));
TEST("_1234_", "_%qu_", ((long long)1234));
TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
TEST("_1234_", "_%jd_", ((intmax_t)1234));
TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
TEST("_1234_", "_%ji_", ((intmax_t)1234));
TEST("_01234_", "_%#jo_", ((intmax_t)01234));
TEST("_1234_", "_%ju_", ((intmax_t)1234));
TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
TEST("_1234_", "_%zd_", ((ssize_t)1234));
TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
TEST("_1234_", "_%zi_", ((ssize_t)1234));
TEST("_01234_", "_%#zo_", ((ssize_t)01234));
TEST("_1234_", "_%zu_", ((ssize_t)1234));
TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
#undef BUFLEN
}
TEST_END
int
main(void)
{
return (test(
test_pow2_ceil,
test_malloc_strtoumax_no_endptr,
test_malloc_strtoumax,
test_malloc_snprintf_truncated,
test_malloc_snprintf));
}
| 8,905 | 28.2 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum.c
|
#include "prof_accum.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
#endif
static int
prof_dump_open_intercept(bool propagate_err, const char *filename)
{
int fd;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
}
static void *
alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
{
return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration));
}
static void *
thd_start(void *varg)
{
unsigned thd_ind = *(unsigned *)varg;
size_t bt_count_prev, bt_count;
unsigned i_prev, i;
i_prev = 0;
bt_count_prev = 0;
for (i = 0; i < NALLOCS_PER_THREAD; i++) {
void *p = alloc_from_permuted_backtrace(thd_ind, i);
dallocx(p, 0);
if (i % DUMP_INTERVAL == 0) {
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
}
if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
i+1 == NALLOCS_PER_THREAD) {
bt_count = prof_bt_count();
assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
"Expected larger backtrace count increase");
i_prev = i;
bt_count_prev = bt_count;
}
}
return (NULL);
}
TEST_BEGIN(test_idump)
{
bool active;
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
unsigned i;
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
for (i = 0; i < NTHREADS; i++) {
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
}
for (i = 0; i < NTHREADS; i++)
thd_join(thds[i], NULL);
}
TEST_END
int
main(void)
{
return (test(
test_idump));
}
| 1,782 | 19.494253 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/stats.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_stats_summary)
{
size_t *cactive;
size_t sz, allocated, active, mapped;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
assert_d_eq(mallctl("pool.0.stats.cactive", &cactive, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.active", &active, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.mapped", &mapped, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(active, *cactive,
"active should be no larger than cactive");
assert_zu_le(allocated, active,
"allocated should be no larger than active");
assert_zu_le(active, mapped,
"active should be no larger than mapped");
}
}
TEST_END
TEST_BEGIN(test_stats_chunks)
{
size_t current, high;
uint64_t total;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.current", ¤t, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.chunks.total", &total, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.high", &high, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(current, high,
"current should be no larger than high");
assert_u64_le((uint64_t)high, total,
"high should be no larger than total");
}
}
TEST_END
TEST_BEGIN(test_stats_huge)
{
void *p;
uint64_t epoch;
size_t allocated;
uint64_t nmalloc, ndalloc, nrequests;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(arena_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_summary)
{
unsigned arena;
void *little, *large;
uint64_t epoch;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
size_t mapped;
uint64_t npurge, nmadvise, purged;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(arena_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
assert_d_eq(mallctl("pool.0.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.purged", &purged, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
if (config_stats) {
assert_u64_gt(npurge, 0,
"At least one purge should have occurred");
assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged");
}
dallocx(little, 0);
dallocx(large, 0);
}
TEST_END
void *
thd_start(void *arg)
{
return (NULL);
}
static void
no_lazy_lock(void)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_BEGIN(test_stats_arenas_small)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be no greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_large)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_zu_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_bins)
{
unsigned arena;
void *p;
size_t sz, allocated, curruns;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nruns, nreruns;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_bin_info[0].reg_size, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nfills", &nfills, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nruns", &nruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
if (config_tcache) {
assert_u64_gt(nfills, 0,
"At least one fill should have occurred");
assert_u64_gt(nflushes, 0,
"At least one flush should have occurred");
}
assert_u64_gt(nruns, 0,
"At least one run should have been allocated");
assert_zu_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_lruns)
{
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc, nrequests;
size_t curruns, sz;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
assert_u64_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
int
main(void)
{
return (test(
test_stats_summary,
test_stats_chunks,
test_stats_huge,
test_stats_arenas_summary,
test_stats_arenas_small,
test_stats_arenas_large,
test_stats_arenas_bins,
test_stats_arenas_lruns));
}
| 12,318 | 30.997403 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/qr.c
|
#include "test/jemalloc_test.h"
/* Number of ring entries, in [2..26]. */
#define NENTRIES 9
/* Split index, in [1..NENTRIES). */
#define SPLIT_INDEX 5
typedef struct ring_s ring_t;
struct ring_s {
qr(ring_t) link;
char id;
};
static void
init_entries(ring_t *entries)
{
unsigned i;
for (i = 0; i < NENTRIES; i++) {
qr_new(&entries[i], link);
entries[i].id = 'a' + i;
}
}
static void
test_independent_entries(ring_t *entries)
{
ring_t *t;
unsigned i, j;
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
j++;
}
assert_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
j++;
}
assert_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_ptr_eq(t, &entries[i],
"Next element in single-element ring should be same as "
"current element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_ptr_eq(t, &entries[i],
"Previous element in single-element ring should be same as "
"current element");
}
}
TEST_BEGIN(test_qr_one)
{
ring_t entries[NENTRIES];
init_entries(entries);
test_independent_entries(entries);
}
TEST_END
static void
test_entries_ring(ring_t *entries)
{
ring_t *t;
unsigned i, j;
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
"Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
NENTRIES].id, "Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
}
TEST_BEGIN(test_qr_after_insert)
{
ring_t entries[NENTRIES];
unsigned i;
init_entries(entries);
for (i = 1; i < NENTRIES; i++)
qr_after_insert(&entries[i - 1], &entries[i], link);
test_entries_ring(entries);
}
TEST_END
TEST_BEGIN(test_qr_remove)
{
ring_t entries[NENTRIES];
ring_t *t;
unsigned i, j;
init_entries(entries);
for (i = 1; i < NENTRIES; i++)
qr_after_insert(&entries[i - 1], &entries[i], link);
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[i+j].id,
"Element id mismatch");
j++;
}
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
"Element id mismatch");
j++;
}
qr_remove(&entries[i], link);
}
test_independent_entries(entries);
}
TEST_END
TEST_BEGIN(test_qr_before_insert)
{
ring_t entries[NENTRIES];
ring_t *t;
unsigned i, j;
init_entries(entries);
for (i = 1; i < NENTRIES; i++)
qr_before_insert(&entries[i - 1], &entries[i], link);
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(NENTRIES+i-j) %
NENTRIES].id, "Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
"Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
}
TEST_END
static void
test_split_entries(ring_t *entries)
{
ring_t *t;
unsigned i, j;
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
if (i < SPLIT_INDEX) {
assert_c_eq(t->id,
entries[(i+j) % SPLIT_INDEX].id,
"Element id mismatch");
} else {
assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
(NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
"Element id mismatch");
}
j++;
}
}
}
TEST_BEGIN(test_qr_meld_split)
{
ring_t entries[NENTRIES];
unsigned i;
init_entries(entries);
for (i = 1; i < NENTRIES; i++)
qr_after_insert(&entries[i - 1], &entries[i], link);
qr_split(&entries[0], &entries[SPLIT_INDEX], link);
test_split_entries(entries);
qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
test_entries_ring(entries);
qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
test_split_entries(entries);
qr_split(&entries[0], &entries[SPLIT_INDEX], link);
test_entries_ring(entries);
qr_split(&entries[0], &entries[0], link);
test_entries_ring(entries);
qr_meld(&entries[0], &entries[0], link);
test_entries_ring(entries);
}
TEST_END
int
main(void)
{
return (test(
test_qr_one,
test_qr_after_insert,
test_qr_remove,
test_qr_before_insert,
test_qr_meld_split));
}
| 5,172 | 19.7751 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/bitmap.c
|
#include "test/jemalloc_test.h"
#if (LG_BITMAP_MAXBITS > 12)
# define MAXBITS 4500
#else
# define MAXBITS (1U << LG_BITMAP_MAXBITS)
#endif
TEST_BEGIN(test_bitmap_size)
{
size_t i, prev_size;
prev_size = 0;
for (i = 1; i <= MAXBITS; i++) {
size_t size = bitmap_size(i);
assert_true(size >= prev_size,
"Bitmap size is smaller than expected");
prev_size = size;
}
}
TEST_END
TEST_BEGIN(test_bitmap_init)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) {
assert_false(bitmap_get(bitmap, &binfo, j),
"Bit should be unset");
}
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_set)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_unset)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
for (j = 0; j < i; j++)
bitmap_unset(bitmap, &binfo, j);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_sfu)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
ssize_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
for (j = 0; j < i; j++) {
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after "
"previous first unset bit");
}
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
/*
* Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits.
*/
for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should the bit previously "
"unset");
bitmap_unset(bitmap, &binfo, j);
}
assert_false(bitmap_get(bitmap, &binfo, 0),
"Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and
* verify that bitmap_sfu() looks past them.
*/
for (j = 1; j < i; j++) {
bitmap_set(bitmap, &binfo, j - 1);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after the "
"bit previously set");
bitmap_unset(bitmap, &binfo, j);
}
assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
"First unset bit should be the last bit");
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
int
main(void)
{
return (test(
test_bitmap_size,
test_bitmap_init,
test_bitmap_set,
test_bitmap_unset,
test_bitmap_sfu));
}
| 3,614 | 20.777108 | 57 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/junk.c
|
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf =
"abort:false,junk:true,zero:false,redzone:true,quarantine:0";
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
static void *most_recently_junked;
static void
arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
{
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
most_recently_junked = ptr;
}
static void
arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
{
size_t i;
arena_dalloc_junk_large_orig(ptr, usize);
for (i = 0; i < usize; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
most_recently_junked = ptr;
}
static void
huge_dalloc_junk_intercept(void *ptr, size_t usize)
{
huge_dalloc_junk_orig(ptr, usize);
/*
* The conditions under which junk filling actually occurs are nuanced
* enough that it doesn't make sense to duplicate the decision logic in
* test code, so don't actually check that the region is junk-filled.
*/
most_recently_junked = ptr;
}
static void
test_junk(size_t sz_min, size_t sz_max)
{
char *s;
size_t sz_prev, sz, i;
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
huge_dalloc_junk_orig = huge_dalloc_junk;
huge_dalloc_junk = huge_dalloc_junk_intercept;
sz_prev = 0;
s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
assert_c_eq(s[i], 0xa5,
"Newly allocated byte %zu/%zu isn't junk-filled",
i, sz);
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
void *junked = (void *)s;
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be junk-filled",
sz);
}
}
dallocx(s, 0);
assert_ptr_eq(most_recently_junked, (void *)s,
"Expected region of size %zu to be junk-filled", sz);
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
huge_dalloc_junk = huge_dalloc_junk_orig;
}
TEST_BEGIN(test_junk_small)
{
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
}
TEST_END
TEST_BEGIN(test_junk_large)
{
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, arena_maxclass);
}
TEST_END
TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
test_junk(arena_maxclass+1, chunksize*2);
}
TEST_END
arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
static void *most_recently_trimmed;
static void
arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
most_recently_trimmed = ptr;
}
TEST_BEGIN(test_junk_large_ralloc_shrink)
{
void *p1, *p2;
p1 = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
p2 = rallocx(p1, arena_maxclass-PAGE, 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
assert_ptr_eq(most_recently_trimmed, p1,
"Expected trimmed portion of region to be junk-filled");
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_junk_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_junk_small,
test_junk_large,
test_junk_huge,
test_junk_large_ralloc_shrink,
test_junk_redzone));
}
| 5,541 | 24.190909 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/ckh.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_new_delete)
{
ckh_t ckh;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_count_insert_search_remove)
{
ckh_t ckh;
const char *strs[] = {
"a string",
"A string",
"a string.",
"A string."
};
const char *missing = "A string not in the hash table.";
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
ckh_insert(&ckh, strs[i], strs[i]);
assert_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
}
/* Search. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_search(&ckh, strs[i], kp, vp),
"Unexpected ckh_search() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
}
assert_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
/* Remove. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_remove(&ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
assert_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
sizeof(strs)/sizeof(const char *) - i - 1,
ckh_count(&ckh));
}
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_insert_iter_remove)
{
#define NITEMS ZU(1000)
ckh_t ckh;
void **p[NITEMS];
void *q, *r;
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NITEMS; i++) {
size_t j;
for (j = i; j < NITEMS; j++) {
assert_false(ckh_insert(&ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
assert_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
}
assert_zu_eq(ckh_count(&ckh), NITEMS,
"ckh_count() should return %zu, but it returned %zu",
NITEMS, ckh_count(&ckh));
for (j = i + 1; j < NITEMS; j++) {
assert_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
{
bool seen[NITEMS];
size_t tabind;
memset(seen, 0, sizeof(seen));
for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
false;) {
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
for (k = 0; k < NITEMS; k++) {
if (p[k] == q) {
assert_false(seen[k],
"Item %zu already seen", k);
seen[k] = true;
break;
}
}
}
for (j = 0; j < i + 1; j++)
assert_true(seen[j], "Item %zu not seen", j);
for (; j < NITEMS; j++)
assert_false(seen[j], "Item %zu seen", j);
}
}
for (i = 0; i < NITEMS; i++) {
assert_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[i], q, "Key pointer mismatch");
assert_ptr_eq(p[i], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
ckh_delete(&ckh);
#undef NITEMS
}
TEST_END
int
main(void)
{
return (test(
test_new_delete,
test_count_insert_search_remove,
test_insert_iter_remove));
}
| 5,301 | 24.613527 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/zero.c
|
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf =
"abort:false,junk:false,zero:true,redzone:false,quarantine:0";
#endif
static void
test_zero(size_t sz_min, size_t sz_max)
{
char *s;
size_t sz_prev, sz, i;
sz_prev = 0;
s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
assert_c_eq(s[i], 0x0,
"Newly allocated byte %zu/%zu isn't zero-filled",
i, sz);
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
}
}
dallocx(s, 0);
}
TEST_BEGIN(test_zero_small)
{
test_skip_if(!config_fill);
test_zero(1, SMALL_MAXCLASS-1);
}
TEST_END
TEST_BEGIN(test_zero_large)
{
test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, arena_maxclass);
}
TEST_END
TEST_BEGIN(test_zero_huge)
{
test_skip_if(!config_fill);
test_zero(arena_maxclass+1, chunksize*2);
}
TEST_END
int
main(void)
{
return (test(
test_zero_small,
test_zero_large,
test_zero_huge));
}
| 1,457 | 17.455696 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/math.c
|
#include "test/jemalloc_test.h"
#define MAX_REL_ERR 1.0e-9
#define MAX_ABS_ERR 1.0e-9
#include <float.h>
#ifndef INFINITY
#define INFINITY (DBL_MAX + DBL_MAX)
#endif
static bool
double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
{
double rel_err;
if (fabs(a - b) < max_abs_err)
return (true);
rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
return (rel_err < max_rel_err);
}
static uint64_t
factorial(unsigned x)
{
uint64_t ret = 1;
unsigned i;
for (i = 2; i <= x; i++)
ret *= (uint64_t)i;
return (ret);
}
TEST_BEGIN(test_ln_gamma_factorial)
{
unsigned x;
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
for (x = 1; x <= 21; x++) {
assert_true(double_eq_rel(exp(ln_gamma(x)),
(double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect factorial result for x=%u", x);
}
}
TEST_END
/* Expected ln_gamma([0.0..100.0] increment=0.25). */
static const double ln_gamma_misc_expected[] = {
INFINITY,
1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
-0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
359.13420536957539753
};
TEST_BEGIN(test_ln_gamma_misc)
{
unsigned i;
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
double x = (double)i * 0.25;
assert_true(double_eq_rel(ln_gamma(x),
ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect ln_gamma result for i=%u", i);
}
}
TEST_END
/* Expected pt_norm([0.01..0.99] increment=0.01). */
static const double pt_norm_expected[] = {
-INFINITY,
-2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
-1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
-1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
-1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
-1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
-0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
-0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
-0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
-0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
-0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
-0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
-0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
-0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
-0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
-0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
-0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
-0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
1.88079360815125041, 2.05374891063182208, 2.32634787404084076
};
TEST_BEGIN(test_pt_norm)
{
unsigned i;
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
double p = (double)i * 0.01;
assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_norm result for i=%u", i);
}
}
TEST_END
/*
* Expected pt_chi2(p=[0.01..0.99] increment=0.07,
* df={0.1, 1.1, 10.1, 100.1, 1000.1}).
*/
static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
static const double pt_chi2_expected[] = {
1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
2.606673548632508, 4.602913725294877, 5.646152813924212,
6.488971315540869, 7.249823275816285, 7.977314231410841,
8.700354939944047, 9.441728024225892, 10.224338321374127,
11.076435368801061, 12.039320937038386, 13.183878752697167,
14.657791935084575, 16.885728216339373, 23.361991680031817,
70.14844087392152, 80.92379498849355, 85.53325420085891,
88.94433120715347, 91.83732712857017, 94.46719943606301,
96.96896479994635, 99.43412843510363, 101.94074719829733,
104.57228644307247, 107.43900093448734, 110.71844673417287,
114.76616819871325, 120.57422505959563, 135.92318818757556,
899.0072447849649, 937.9271278858220, 953.8117189560207,
965.3079371501154, 974.8974061207954, 983.4936235182347,
991.5691170518946, 999.4334123954690, 1007.3391826856553,
1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
1046.4872561869577, 1063.5717461999654, 1107.0741966053859
};
TEST_BEGIN(test_pt_chi2)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
double df = pt_chi2_df[i];
double ln_gamma_df = ln_gamma(df * 0.5);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_chi2 result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
/*
* Expected pt_gamma(p=[0.1..0.99] increment=0.07,
* shape=[0.5..3.0] increment=0.5).
*/
static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
static const double pt_gamma_expected[] = {
7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
4.7230515633946677, 5.6417477865306020, 8.4059469148854635
};
TEST_BEGIN(test_pt_gamma_shape)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
double shape = pt_gamma_shape[i];
double ln_gamma_shape = ln_gamma(shape);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
MAX_ABS_ERR),
"Incorrect pt_gamma result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
TEST_BEGIN(test_pt_gamma_scale)
{
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
assert_true(double_eq_rel(
pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
MAX_ABS_ERR),
"Scale should be trivially equivalent to external multiplication");
}
TEST_END
int
main(void)
{
return (test(
test_ln_gamma_factorial,
test_ln_gamma_misc,
test_pt_norm,
test_pt_chi2,
test_pt_gamma_shape,
test_pt_gamma_scale));
}
| 18,448 | 45.706329 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/ql.c
|
#include "test/jemalloc_test.h"
/* Number of ring entries, in [2..26]. */
#define NENTRIES 9
typedef struct list_s list_t;
typedef ql_head(list_t) list_head_t;
struct list_s {
ql_elm(list_t) link;
char id;
};
static void
test_empty_list(list_head_t *head)
{
list_t *t;
unsigned i;
assert_ptr_null(ql_first(head), "Unexpected element for empty list");
assert_ptr_null(ql_last(head, link),
"Unexpected element for empty list");
i = 0;
ql_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
i = 0;
ql_reverse_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
}
TEST_BEGIN(test_ql_empty)
{
list_head_t head;
ql_new(&head);
test_empty_list(&head);
}
TEST_END
static void
init_entries(list_t *entries, unsigned nentries)
{
unsigned i;
for (i = 0; i < nentries; i++) {
entries[i].id = 'a' + i;
ql_elm_new(&entries[i], link);
}
}
static void
test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
{
list_t *t;
unsigned i;
assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
"Element id mismatch");
i = 0;
ql_foreach(t, head, link) {
assert_c_eq(t->id, entries[i].id, "Element id mismatch");
i++;
}
i = 0;
ql_reverse_foreach(t, head, link) {
assert_c_eq(t->id, entries[nentries-i-1].id,
"Element id mismatch");
i++;
}
for (i = 0; i < nentries-1; i++) {
t = ql_next(head, &entries[i], link);
assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
}
assert_ptr_null(ql_next(head, &entries[nentries-1], link),
"Unexpected element");
assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
for (i = 1; i < nentries; i++) {
t = ql_prev(head, &entries[i], link);
assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
}
}
TEST_BEGIN(test_ql_tail_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_tail_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, entries, NENTRIES-i);
ql_tail_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_head_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_head_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, &entries[i], NENTRIES-i);
ql_head_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_insert)
{
list_head_t head;
list_t entries[8];
list_t *a, *b, *c, *d, *e, *f, *g, *h;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
a = &entries[0];
b = &entries[1];
c = &entries[2];
d = &entries[3];
e = &entries[4];
f = &entries[5];
g = &entries[6];
h = &entries[7];
/*
* ql_remove(), ql_before_insert(), and ql_after_insert() are used
* internally by other macros that are already tested, so there's no
* need to test them completely. However, insertion/deletion from the
* middle of lists is not otherwise tested; do so here.
*/
ql_tail_insert(&head, f, link);
ql_before_insert(&head, f, b, link);
ql_before_insert(&head, f, c, link);
ql_after_insert(f, h, link);
ql_after_insert(f, g, link);
ql_before_insert(&head, b, a, link);
ql_after_insert(c, d, link);
ql_before_insert(&head, f, e, link);
test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
}
TEST_END
int
main(void)
{
return (test(
test_ql_empty,
test_ql_tail_insert,
test_ql_tail_remove,
test_ql_head_insert,
test_ql_head_remove,
test_ql_insert));
}
| 4,483 | 20.352381 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool_custom_alloc_internal.c
|
#include "pool.h"
void *
malloc_test(size_t size) {
custom_allocs++;
return malloc(size);
}
void
free_test(void *ptr) {
custom_allocs--;
free(ptr);
}
int
main(void)
{
/*
* Initialize custom allocator who call malloc from jemalloc.
*/
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
}
je_pool_set_alloc_funcs(malloc_test, free_test);
return test_not_init(POOL_TEST_CASES);
}
| 440 | 13.7 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/mallctl.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_mallctl_errors)
{
uint64_t epoch;
size_t sz;
assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
"mallctl() should return ENOENT for non-existent names");
assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_errors)
{
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
"mallctlnametomib() should return ENOENT for non-existent names");
}
TEST_END
TEST_BEGIN(test_mallctlbymib_errors)
{
uint64_t epoch;
size_t sz;
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
"attempt to write read-only value");
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctl_read_write)
{
uint64_t old_epoch, new_epoch;
size_t sz = sizeof(old_epoch);
/* Blind. */
assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_short_mib)
{
size_t mib[6];
size_t miblen;
void *mem;
pool_t *pool;
unsigned npools;
size_t sz = sizeof(npools);
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne((void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
miblen = 5;
mib[5] = 42;
assert_d_eq(mallctlnametomib("pool.1.arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_zu_eq(miblen, 5, "Unexpected mib output length");
assert_zu_eq(mib[5], 42,
"mallctlnametomib() wrote past the end of the input mib");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_mallctl_config)
{
#define TEST_MALLCTL_CONFIG(config) do { \
bool oldval; \
size_t sz = sizeof(oldval); \
assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_CONFIG(debug);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
TEST_MALLCTL_CONFIG(munmap);
TEST_MALLCTL_CONFIG(prof);
TEST_MALLCTL_CONFIG(prof_libgcc);
TEST_MALLCTL_CONFIG(prof_libunwind);
TEST_MALLCTL_CONFIG(stats);
TEST_MALLCTL_CONFIG(tcache);
TEST_MALLCTL_CONFIG(tls);
TEST_MALLCTL_CONFIG(utrace);
TEST_MALLCTL_CONFIG(valgrind);
TEST_MALLCTL_CONFIG(xmalloc);
#undef TEST_MALLCTL_CONFIG
}
TEST_END
TEST_BEGIN(test_mallctl_opt)
{
bool config_always = true;
#define TEST_MALLCTL_OPT(t, opt, config) do { \
t oldval; \
size_t sz = sizeof(oldval); \
int expected = config_##config ? 0 : ENOENT; \
int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
assert_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(bool, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill);
TEST_MALLCTL_OPT(bool, redzone, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
TEST_MALLCTL_OPT(bool, utrace, utrace);
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, tcache);
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
TEST_MALLCTL_OPT(bool, prof, prof);
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
TEST_MALLCTL_OPT(bool, prof_active, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
TEST_MALLCTL_OPT(bool, prof_accum, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
TEST_MALLCTL_OPT(bool, prof_final, prof);
TEST_MALLCTL_OPT(bool, prof_leak, prof);
#undef TEST_MALLCTL_OPT
}
TEST_END
/*
* create a couple of pools and check their size
* using mib feature
*/
TEST_BEGIN(test_mallctl_with_multiple_pools)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools;
int i;
size_t sz = sizeof(npools);
size_t mib[4], miblen;
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create( mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
miblen = 4;
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
unsigned narenas;
mib[1] = i;
sz = sizeof(narenas);
assert_d_eq(mallctlbymib(mib, miblen, &narenas, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_manpage_example)
{
unsigned nbins, i;
size_t mib[6];
size_t len, miblen;
len = sizeof(nbins);
assert_d_eq(mallctl("pool.0.arenas.nbins", &nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 6;
assert_d_eq(mallctlnametomib("pool.0.arenas.bin.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[4] = i;
len = sizeof(bin_size);
assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
}
TEST_END
TEST_BEGIN(test_thread_arena)
{
unsigned arena_old, arena_new, narenas;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
arena_new = narenas - 1;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
arena_new = 0;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
}
TEST_END
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
unsigned npools;
size_t sz = sizeof(unsigned);
size_t mib[5];
size_t miblen = 5;
void *mem;
pool_t *pool;
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
assert_d_eq(mallctl("pool.1.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("pool.1.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("pool.1.arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[3] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_arena_i_dss)
{
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
size_t mib[5];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("pool.0.arena.0.dss", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
mib[3] = narenas_total_get(pools[0]);
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
}
TEST_END
TEST_BEGIN(test_arenas_initialized)
{
unsigned narenas;
size_t sz = sizeof(narenas);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
{
VARIABLE_ARRAY(bool, initialized, narenas);
sz = narenas * sizeof(bool);
assert_d_eq(mallctl("pool.0.arenas.initialized", initialized, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
}
}
TEST_END
TEST_BEGIN(test_arenas_constants)
{
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas."#name, &(name), &sz, NULL, 0), 0, \
"Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
#undef TEST_ARENAS_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_bin_constants)
{
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.bin.0."#name, &(name), &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
#undef TEST_ARENAS_BIN_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_lrun_constants)
{
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.lrun.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
#undef TEST_ARENAS_LRUN_CONSTANT
}
TEST_END
/*
* create a couple of pools and extend their arenas
*/
TEST_BEGIN(test_arenas_extend)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools, narenas_before, arena, narenas_after;
int i;
size_t mib_narenas[4],
mib_extend[4],
miblen = sizeof(mib_narenas),
sz = sizeof(unsigned);
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create(mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 0, 1);
assert_ptr_ne((void *)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib_narenas, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlnametomib("pool.0.arenas.extend", mib_extend, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
mib_narenas[1] = i;
mib_extend[1] = i;
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_before, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_extend, miblen, &arena, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_after, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_stats_arenas)
{
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.stats.arenas.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
} while (0)
TEST_STATS_ARENAS(const char *, dss);
TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(size_t, pactive);
TEST_STATS_ARENAS(size_t, pdirty);
#undef TEST_STATS_ARENAS
}
TEST_END
/*
* Each arena allocates 32 kilobytes of CTL metadata, and since we only
* have 12 megabytes, we have to hard-limit it to a known value, otherwise
* on systems with high CPU count, the tests might run out of memory.
*/
#define NARENAS_IN_POOL 64
int
main(void)
{
opt_narenas = NARENAS_IN_POOL;
return (test(
test_mallctl_errors,
test_mallctlnametomib_errors,
test_mallctlbymib_errors,
test_mallctl_read_write,
test_mallctlnametomib_short_mib,
test_mallctl_config,
test_mallctl_opt,
test_mallctl_with_multiple_pools,
test_manpage_example,
test_thread_arena,
test_arena_i_purge,
test_arena_i_dss,
test_arenas_initialized,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,
test_arenas_extend,
test_stats_arenas));
}
| 16,249 | 28.98155 | 83 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/rtree.c
|
#include "test/jemalloc_test.h"
void *
rtree_malloc(pool_t *pool, size_t size)
{
return imalloc(size);
}
void
rtree_free(pool_t *pool, void *ptr)
{
return idalloc(ptr);
}
TEST_BEGIN(test_rtree_get_empty)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
assert_u_eq(rtree_get(rtree, 0), 0,
"rtree_get() should return NULL for empty tree");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
rtree_set(rtree, 0, 1);
assert_u_eq(rtree_get(rtree, 0), 1,
"rtree_get() should return previously set value");
rtree_set(rtree, ~((uintptr_t)0), 1);
assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
"rtree_get() should return previously set value");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_bits)
{
unsigned i, j, k;
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
rtree_set(rtree, keys[j], 1);
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
assert_u_eq(rtree_get(rtree, keys[k]), 1,
"rtree_get() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_u_eq(rtree_get(rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
rtree_set(rtree, keys[j], 0);
}
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_random)
{
unsigned i;
sfmt_t *sfmt;
#define NSET 100
#define SEED 42
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
uintptr_t keys[NSET];
unsigned j;
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
rtree_set(rtree, keys[j], 1);
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
rtree_set(rtree, keys[j], 0);
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
rtree_delete(rtree);
}
fini_gen_rand(sfmt);
#undef NSET
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rtree_get_empty,
test_rtree_extrema,
test_rtree_bits,
test_rtree_random));
}
| 3,032 | 22.152672 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/mq.c
|
#include "test/jemalloc_test.h"
#define NSENDERS 3
#define NMSGS 100000
typedef struct mq_msg_s mq_msg_t;
struct mq_msg_s {
mq_msg(mq_msg_t) link;
};
mq_gen(static, mq_, mq_t, mq_msg_t, link)
TEST_BEGIN(test_mq_basic)
{
mq_t mq;
mq_msg_t msg;
assert_false(mq_init(&mq), "Unexpected mq_init() failure");
assert_u_eq(mq_count(&mq), 0, "mq should be empty");
assert_ptr_null(mq_tryget(&mq),
"mq_tryget() should fail when the queue is empty");
mq_put(&mq, &msg);
assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
mq_put(&mq, &msg);
assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
mq_fini(&mq);
}
TEST_END
static void *
thd_receiver_start(void *arg)
{
mq_t *mq = (mq_t *)arg;
unsigned i;
for (i = 0; i < (NSENDERS * NMSGS); i++) {
mq_msg_t *msg = mq_get(mq);
assert_ptr_not_null(msg, "mq_get() should never return NULL");
dallocx(msg, 0);
}
return (NULL);
}
static void *
thd_sender_start(void *arg)
{
mq_t *mq = (mq_t *)arg;
unsigned i;
for (i = 0; i < NMSGS; i++) {
mq_msg_t *msg;
void *p;
p = mallocx(sizeof(mq_msg_t), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}
return (NULL);
}
TEST_BEGIN(test_mq_threaded)
{
mq_t mq;
thd_t receiver;
thd_t senders[NSENDERS];
unsigned i;
assert_false(mq_init(&mq), "Unexpected mq_init() failure");
thd_create(&receiver, thd_receiver_start, (void *)&mq);
for (i = 0; i < NSENDERS; i++)
thd_create(&senders[i], thd_sender_start, (void *)&mq);
thd_join(receiver, NULL);
for (i = 0; i < NSENDERS; i++)
thd_join(senders[i], NULL);
mq_fini(&mq);
}
TEST_END
int
main(void)
{
return (test(
test_mq_basic,
test_mq_threaded));
}
| 1,797 | 18.333333 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/tsd.c
|
#include "test/jemalloc_test.h"
#define THREAD_DATA 0x72b65c10
typedef unsigned int data_t;
static bool data_cleanup_executed;
void
data_cleanup(void *arg)
{
data_t *data = (data_t *)arg;
assert_x_eq(*data, THREAD_DATA,
"Argument passed into cleanup function should match tsd value");
data_cleanup_executed = true;
}
malloc_tsd_protos(, data, data_t)
malloc_tsd_externs(data, data_t)
#define DATA_INIT 0x12345678
malloc_tsd_data(, data, data_t, DATA_INIT)
malloc_tsd_funcs(, data, data_t, DATA_INIT, data_cleanup)
static void *
thd_start(void *arg)
{
data_t d = (data_t)(uintptr_t)arg;
assert_x_eq(*data_tsd_get(), DATA_INIT,
"Initial tsd get should return initialization value");
data_tsd_set(&d);
assert_x_eq(*data_tsd_get(), d,
"After tsd set, tsd get should return value that was set");
d = 0;
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
return (NULL);
}
TEST_BEGIN(test_tsd_main_thread)
{
thd_start((void *) 0xa5f3e329);
}
TEST_END
TEST_BEGIN(test_tsd_sub_thread)
{
thd_t thd;
data_cleanup_executed = false;
thd_create(&thd, thd_start, (void *)THREAD_DATA);
thd_join(thd, NULL);
assert_true(data_cleanup_executed,
"Cleanup function should have executed");
}
TEST_END
int
main(void)
{
data_tsd_boot();
return (test(
test_tsd_main_thread,
test_tsd_sub_thread));
}
| 1,400 | 18.458333 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/hash.c
|
/*
* This file is based on code that is part of SMHasher
* (https://code.google.com/p/smhasher/), and is subject to the MIT license
* (http://www.opensource.org/licenses/mit-license.php). Both email addresses
* associated with the source code's revision history belong to Austin Appleby,
* and the revision history ranges from 2010 to 2012. Therefore the copyright
* and license are here taken to be:
*
* Copyright (c) 2010-2012 Austin Appleby
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test/jemalloc_test.h"
typedef enum {
hash_variant_x86_32,
hash_variant_x86_128,
hash_variant_x64_128
} hash_variant_t;
static size_t
hash_variant_bits(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return (32);
case hash_variant_x86_128: return (128);
case hash_variant_x64_128: return (128);
default: not_reached();
}
}
static const char *
hash_variant_string(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return ("hash_x86_32");
case hash_variant_x86_128: return ("hash_x86_128");
case hash_variant_x64_128: return ("hash_x64_128");
default: not_reached();
}
}
static void
hash_variant_verify(hash_variant_t variant)
{
const size_t hashbytes = hash_variant_bits(variant) / 8;
uint8_t key[256];
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
memset(key, 0, sizeof(key));
memset(hashes, 0, sizeof(hashes));
memset(final, 0, sizeof(final));
/*
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
* seed.
*/
for (i = 0; i < 256; i++) {
key[i] = (uint8_t)i;
switch (variant) {
case hash_variant_x86_32: {
uint32_t out;
out = hash_x86_32(key, i, 256-i);
memcpy(&hashes[i*hashbytes], &out, hashbytes);
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} default: not_reached();
}
}
/* Hash the result array. */
switch (variant) {
case hash_variant_x86_32: {
uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
memcpy(final, &out, sizeof(out));
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} default: not_reached();
}
computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
(final[3] << 24);
switch (variant) {
#ifdef JEMALLOC_BIG_ENDIAN
case hash_variant_x86_32: expected = 0x6213303eU; break;
case hash_variant_x86_128: expected = 0x266820caU; break;
case hash_variant_x64_128: expected = 0xcc622b6fU; break;
#else
case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
case hash_variant_x86_128: expected = 0xb3ece62aU; break;
case hash_variant_x64_128: expected = 0x6384ba69U; break;
#endif
default: not_reached();
}
assert_u32_eq(computed, expected,
"Hash mismatch for %s(): expected %#x but got %#x",
hash_variant_string(variant), expected, computed);
}
TEST_BEGIN(test_hash_x86_32)
{
hash_variant_verify(hash_variant_x86_32);
}
TEST_END
TEST_BEGIN(test_hash_x86_128)
{
hash_variant_verify(hash_variant_x86_128);
}
TEST_END
TEST_BEGIN(test_hash_x64_128)
{
hash_variant_verify(hash_variant_x64_128);
}
TEST_END
int
main(void)
{
return (test(
test_hash_x86_32,
test_hash_x86_128,
test_hash_x64_128));
}
| 4,746 | 26.598837 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum_a.c
|
#include "prof_accum.h"
alloc_n_gen(0)
| 40 | 9.25 | 23 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_gdump.c
|
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
#endif
static bool did_prof_dump_open;
static int
prof_dump_open_intercept(bool propagate_err, const char *filename)
{
int fd;
did_prof_dump_open = true;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
}
TEST_BEGIN(test_gdump)
{
bool active;
void *p, *q;
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false;
p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx(chunksize, 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
dallocx(p, 0);
dallocx(q, 0);
}
TEST_END
int
main(void)
{
return (test(
test_gdump));
}
| 1,127 | 18.789474 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum.h
|
#include "test/jemalloc_test.h"
#define NTHREADS 4
#define NALLOCS_PER_THREAD 50
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
#define alloc_n_proto(n) \
void *alloc_##n(unsigned bits);
alloc_n_proto(0)
alloc_n_proto(1)
#define alloc_n_gen(n) \
void * \
alloc_##n(unsigned bits) \
{ \
void *p; \
\
if (bits == 0) \
p = mallocx(1, 0); \
else { \
switch (bits & 0x1U) { \
case 0: \
p = (alloc_0(bits >> 1)); \
break; \
case 1: \
p = (alloc_1(bits >> 1)); \
break; \
default: not_reached(); \
} \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
return (p); \
}
| 794 | 21.083333 | 59 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum_b.c
|
#include "prof_accum.h"
alloc_n_gen(1)
| 40 | 9.25 | 23 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/mtx.c
|
#include "test/jemalloc_test.h"
#define NTHREADS 2
#define NINCRS 2000000
TEST_BEGIN(test_mtx_basic)
{
mtx_t mtx;
assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
mtx_lock(&mtx);
mtx_unlock(&mtx);
mtx_fini(&mtx);
}
TEST_END
typedef struct {
mtx_t mtx;
unsigned x;
} thd_start_arg_t;
static void *
thd_start(void *varg)
{
thd_start_arg_t *arg = (thd_start_arg_t *)varg;
unsigned i;
for (i = 0; i < NINCRS; i++) {
mtx_lock(&arg->mtx);
arg->x++;
mtx_unlock(&arg->mtx);
}
return (NULL);
}
TEST_BEGIN(test_mtx_race)
{
thd_start_arg_t arg;
thd_t thds[NTHREADS];
unsigned i;
assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
arg.x = 0;
for (i = 0; i < NTHREADS; i++)
thd_create(&thds[i], thd_start, (void *)&arg);
for (i = 0; i < NTHREADS; i++)
thd_join(thds[i], NULL);
assert_u_eq(arg.x, NTHREADS * NINCRS,
"Race-related counter corruption");
}
TEST_END
int
main(void)
{
return (test(
test_mtx_basic,
test_mtx_race));
}
| 1,003 | 15.459016 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/quarantine.c
|
#include "test/jemalloc_test.h"
#define QUARANTINE_SIZE 8192
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#ifdef JEMALLOC_FILL
const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
STRINGIFY(QUARANTINE_SIZE);
#endif
void
quarantine_clear(void)
{
void *p;
p = mallocx(QUARANTINE_SIZE*2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
}
TEST_BEGIN(test_quarantine)
{
#define SZ ZU(256)
#define NQUARANTINED (QUARANTINE_SIZE/SZ)
void *quarantined[NQUARANTINED+1];
size_t i, j;
test_skip_if(!config_fill);
assert_zu_eq(nallocx(SZ, 0), SZ,
"SZ=%zu does not precisely equal a size class", SZ);
quarantine_clear();
/*
* Allocate enough regions to completely fill the quarantine, plus one
* more. The last iteration occurs with a completely full quarantine,
* but no regions should be drained from the quarantine until the last
* deallocation occurs. Therefore no region recycling should occur
* until after this loop completes.
*/
for (i = 0; i < NQUARANTINED+1; i++) {
void *p = mallocx(SZ, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
quarantined[i] = p;
dallocx(p, 0);
for (j = 0; j < i; j++) {
assert_ptr_ne(p, quarantined[j],
"Quarantined region recycled too early; "
"i=%zu, j=%zu", i, j);
}
}
#undef NQUARANTINED
#undef SZ
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_quarantine_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_quarantine,
test_quarantine_redzone));
}
| 2,583 | 22.706422 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool_base_alloc.c
|
#include "pool.h"
int
main(void)
{
return test_not_init(POOL_TEST_CASES);
}
| 79 | 7.888889 | 39 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_idump.c
|
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
"lg_prof_interval:0";
#endif
static bool did_prof_dump_open;
static int
prof_dump_open_intercept(bool propagate_err, const char *filename)
{
int fd;
did_prof_dump_open = true;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
}
TEST_BEGIN(test_idump)
{
bool active;
void *p;
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false;
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
assert_true(did_prof_dump_open, "Expected a profile dump");
}
TEST_END
int
main(void)
{
return (test(
test_idump));
}
| 969 | 17.653846 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool_custom_alloc.c
|
#include "pool.h"
static char buff_alloc[4*1024];
static char *buff_ptr = buff_alloc;
void *
malloc_test(size_t size) {
custom_allocs++;
void *ret = buff_ptr;
buff_ptr = buff_ptr + size;
return ret;
}
void
free_test(void *ptr) {
custom_allocs--;
if(custom_allocs == 0) {
buff_ptr = buff_alloc;
}
}
int
main(void)
{
je_pool_set_alloc_funcs(malloc_test, free_test);
return test_not_init(POOL_TEST_CASES);
}
| 421 | 13.551724 | 49 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool.h
|
#include "test/jemalloc_test.h"
#define TEST_POOL_SIZE (16L * 1024L * 1024L)
#define TEST_TOO_SMALL_POOL_SIZE (2L * 1024L * 1024L)
#define TEST_VALUE 123456
#define TEST_MALLOC_FREE_LOOPS 2
#define TEST_MALLOC_SIZE 1024
#define TEST_ALLOCS_SIZE (TEST_POOL_SIZE / 8)
#define TEST_BUFFOR_CMP_SIZE (4L * 1024L * 1024L)
static char mem_pool[TEST_POOL_SIZE];
static char mem_extend_ok[TEST_POOL_SIZE];
static void* allocs[TEST_ALLOCS_SIZE];
static int custom_allocs;
TEST_BEGIN(test_pool_create_errors) {
pool_t *pool;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, 0, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for size 0");
pool = pool_create(NULL, TEST_POOL_SIZE, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for input addr NULL");
}
TEST_END
TEST_BEGIN(test_pool_create) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
assert_ptr_eq(pool, mem_pool, "pool_create() should return addr with valid input");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_malloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_malloc(pool, sizeof(int));
assert_ptr_not_null(test, "pool_malloc should return valid ptr");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_free) {
pool_t *pool;
int i, j, s = 0, prev_s = 0;
int allocs = TEST_POOL_SIZE/TEST_MALLOC_SIZE;
void *arr[allocs];
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
for (i = 0; i < TEST_MALLOC_FREE_LOOPS; ++i) {
for (j = 0; j < allocs; ++j) {
arr[j] = pool_malloc(pool, TEST_MALLOC_SIZE);
if (arr[j] != NULL) {
s++;
}
}
for (j = 0; j < allocs; ++j) {
if (arr[j] != NULL) {
pool_free(pool, arr[j]);
}
}
if (prev_s != 0) {
assert_x_eq(s, prev_s,
"pool_free() should record back used chunks");
}
prev_s = s;
s = 0;
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_calloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
int *test = pool_calloc(pool, 1, sizeof(int));
assert_ptr_not_null(test, "pool_calloc should return valid ptr");
assert_x_eq(*test, 0, "pool_calloc should return zeroed memory");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_realloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_ralloc(pool, NULL, sizeof(int));
assert_ptr_not_null(test, "pool_ralloc with NULL addr should return valid ptr");
int *test2 = pool_ralloc(pool, test, sizeof(int)*2);
assert_ptr_not_null(test, "pool_ralloc should return valid ptr");
test2[0] = TEST_VALUE;
test2[1] = TEST_VALUE;
assert_x_eq(test[1], TEST_VALUE, "ptr should be usable");
pool_free(pool, test2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_aligned_alloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_aligned_alloc(pool, 1024, 1024);
assert_ptr_not_null(test, "pool_aligned_alloc should return valid ptr");
assert_x_eq(((uintptr_t)(test) & 1023), 0, "ptr should be aligned");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_aligned_alloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_aligned_alloc() should return pointer to memory from pool");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_reuse_pool) {
pool_t *pool;
size_t pool_num = 0;
custom_allocs = 0;
/* create and destroy pool multiple times */
for (; pool_num<100; ++pool_num) {
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
if (pool == NULL) {
break;
}
void *prev = NULL;
size_t i = 0;
/* allocate memory from pool */
for (; i<100; ++i) {
void **next = pool_malloc(pool, sizeof (void *));
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
prev = next;
}
/* free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
}
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_check_memory) {
pool_t *pool;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
size_t object_size;
size_t size_allocated;
size_t i;
size_t j;
for (object_size = 8; object_size <= TEST_BUFFOR_CMP_SIZE ; object_size *= 2) {
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
size_allocated = 0;
memset(allocs, 0, TEST_ALLOCS_SIZE * sizeof(void *));
for (i = 0; i < TEST_ALLOCS_SIZE;++i) {
allocs[i] = pool_malloc(pool, object_size);
if (allocs[i] == NULL) {
/* out of memory in pool */
break;
}
assert_lu_gt((uintptr_t)allocs[i], (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)allocs[i], (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
size_allocated += object_size;
/* fill each allocation with a unique value */
memset(allocs[i], (char)i, object_size);
}
assert_ptr_not_null(allocs[0], "pool_malloc should return valid ptr");
assert_lu_lt(i + 1, TEST_ALLOCS_SIZE, "All memory should be used");
/* check for unexpected modifications of prepare data */
for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) {
char *buffer = allocs[i];
for (j = 0; j < object_size; ++j)
if (buffer[j] != (char)i) {
assert_true(0, "Content of data object was modified unexpectedly"
" for object size: %zu, id: %zu", object_size, j);
break;
}
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
}
TEST_END
TEST_BEGIN(test_pool_use_all_memory) {
pool_t *pool;
size_t size = 0;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
void *prev = NULL;
for (;;) {
void **next = pool_malloc(pool, sizeof (void *));
if (next == NULL) {
/* Out of memory in pool, test end */
break;
}
size += sizeof (void *);
assert_ptr_not_null(next, "pool_malloc should return valid ptr");
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
assert_x_eq((uintptr_t)(*next), (uintptr_t)(prev), "ptr should be usable");
prev = next;
}
assert_lu_gt(size, 0, "Can not alloc any memory from pool");
/* Free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_errors) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_TOO_SMALL_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_TOO_SMALL_POOL_SIZE, 0);
assert_zu_eq(usable_size, 0, "pool_extend() should return 0"
" when provided with memory size smaller then chunksize");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_after_out_of_memory) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
/* use the all memory from pool and from base allocator */
while (pool_malloc(pool, sizeof (void *)));
pool->base_next_addr = pool->base_past_addr;
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*/
static void
print_jemalloc_messages(void* ignore, const char *s)
{
}
TEST_BEGIN(test_pool_check_extend) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
pool_malloc(pool, 100);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_malloc(pool, 100);
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_out_of_range) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
void *usable_addr = (void *)CHUNK_CEILING((uintptr_t)mem_extend_ok);
size_t usable_size = (TEST_POOL_SIZE - (uintptr_t)(usable_addr -
(void *)mem_extend_ok)) & ~chunksize_mask;
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, 0);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_overlap) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
pool_t *pool2;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
/* create another pool in the same memory region */
pool2 = pool_create(mem_extend_ok, TEST_POOL_SIZE, 0, 1);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_ne(je_pool_check(pool2), 1, "je_pool_check() not return error");
pool_delete(pool2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
#define POOL_TEST_CASES\
test_pool_create_errors, \
test_pool_create, \
test_pool_malloc, \
test_pool_free, \
test_pool_calloc, \
test_pool_realloc, \
test_pool_aligned_alloc, \
test_pool_reuse_pool, \
test_pool_check_memory, \
test_pool_use_all_memory, \
test_pool_extend_errors, \
test_pool_extend, \
test_pool_extend_after_out_of_memory, \
test_pool_check_extend, \
test_pool_check_memory_out_of_range, \
test_pool_check_memory_overlap
| 13,511 | 27.267782 | 84 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/SFMT.c
|
/*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT.c
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software, see LICENSE.txt
*/
#define SFMT_C_
#include "test/jemalloc_test.h"
#include "test/SFMT-params.h"
#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(ONLY64) && !defined(BIG_ENDIAN64)
#if defined(__GNUC__)
#error "-DONLY64 must be specified with -DBIG_ENDIAN64"
#endif
#undef ONLY64
#endif
/*------------------------------------------------------
128-bit SIMD data type for Altivec, SSE2 or standard C
------------------------------------------------------*/
#if defined(HAVE_ALTIVEC)
/** 128-bit data structure */
union W128_T {
vector unsigned int s;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#elif defined(HAVE_SSE2)
/** 128-bit data structure */
union W128_T {
__m128i si;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#else
/** 128-bit data structure */
struct W128_T {
uint32_t u[4];
};
/** 128-bit data type */
typedef struct W128_T w128_t;
#endif
struct sfmt_s {
/** the 128-bit internal state array */
w128_t sfmt[N];
/** index counter to the 32-bit internal state array */
int idx;
/** a flag: it is 0 if and only if the internal state is not yet
* initialized. */
int initialized;
};
/*--------------------------------------
FILE GLOBAL VARIABLES
internal state, index counter and flag
--------------------------------------*/
/** a parity check vector which certificate the period of 2^{MEXP} */
static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
/*----------------
STATIC FUNCTIONS
----------------*/
JEMALLOC_INLINE_C int idxof(int i);
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
#endif
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
static void period_certification(sfmt_t *ctx);
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
JEMALLOC_INLINE_C void swap(w128_t *array, int size);
#endif
#if defined(HAVE_ALTIVEC)
#include "test/SFMT-alti.h"
#elif defined(HAVE_SSE2)
#include "test/SFMT-sse2.h"
#endif
/**
* This function simulate a 64-bit index of LITTLE ENDIAN
* in BIG ENDIAN machine.
*/
#ifdef ONLY64
JEMALLOC_INLINE_C int idxof(int i) {
return i ^ 1;
}
#else
JEMALLOC_INLINE_C int idxof(int i) {
return i;
}
#endif
/**
* This function simulates SIMD 128-bit right shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
/**
* This function simulates SIMD 128-bit left shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#ifdef ONLY64
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
#endif
/**
* This function represents the recursion formula.
* @param r output
* @param a a 128-bit part of the internal state array
* @param b a 128-bit part of the internal state array
* @param c a 128-bit part of the internal state array
* @param d a 128-bit part of the internal state array
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
^ (d->u[3] << SL1);
}
#else
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
^ (d->u[3] << SL1);
}
#endif
#endif
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
int i;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
for (; i < N; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pseudorandom numbers to be generated.
*/
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < N; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < size - N; i++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j] = array[j + size - N];
}
for (; i < size; i++, j++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
ctx->sfmt[j] = array[i];
}
}
#endif
#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
int i;
uint32_t x, y;
for (i = 0; i < size; i++) {
x = array[i].u[0];
y = array[i].u[2];
array[i].u[0] = array[i].u[1];
array[i].u[2] = array[i].u[3];
array[i].u[1] = x;
array[i].u[3] = y;
}
}
#endif
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func1(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1664525UL;
}
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func2(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
}
/**
* This function certificate the period of 2^{MEXP}
*/
static void period_certification(sfmt_t *ctx) {
int inner = 0;
int i, j;
uint32_t work;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
for (i = 0; i < 4; i++)
inner ^= psfmt32[idxof(i)] & parity[i];
for (i = 16; i > 0; i >>= 1)
inner ^= inner >> i;
inner &= 1;
/* check OK */
if (inner == 1) {
return;
}
/* check NG, and modification */
for (i = 0; i < 4; i++) {
work = 1;
for (j = 0; j < 32; j++) {
if ((work & parity[i]) != 0) {
psfmt32[idxof(i)] ^= work;
return;
}
work = work << 1;
}
}
}
/*----------------
PUBLIC FUNCTIONS
----------------*/
/**
* This function returns the identification string.
* The string shows the word size, the Mersenne exponent,
* and all parameters of this generator.
*/
const char *get_idstring(void) {
return IDSTR;
}
/**
* This function returns the minimum size of array used for \b
* fill_array32() function.
* @return minimum size of array used for fill_array32() function.
*/
int get_min_array_size32(void) {
return N32;
}
/**
* This function returns the minimum size of array used for \b
* fill_array64() function.
* @return minimum size of array used for fill_array64() function.
*/
int get_min_array_size64(void) {
return N64;
}
#ifndef ONLY64
/**
* This function generates and returns 32-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* @return 32-bit pseudorandom number
*/
uint32_t gen_rand32(sfmt_t *ctx) {
uint32_t r;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
assert(ctx->initialized);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
r = psfmt32[ctx->idx++];
return r;
}
/* Generate a random integer in [0..limit). */
uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
uint32_t ret, above;
above = 0xffffffffU - (0xffffffffU % limit);
while (1) {
ret = gen_rand32(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#endif
/**
* This function generates and returns 64-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* The function gen_rand64 should not be called after gen_rand32,
* unless an initialization is again executed.
* @return 64-bit pseudorandom number
*/
uint64_t gen_rand64(sfmt_t *ctx) {
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
uint32_t r1, r2;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
#else
uint64_t r;
uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
#endif
assert(ctx->initialized);
assert(ctx->idx % 2 == 0);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
r1 = psfmt32[ctx->idx];
r2 = psfmt32[ctx->idx + 1];
ctx->idx += 2;
return ((uint64_t)r2 << 32) | r1;
#else
r = psfmt64[ctx->idx / 2];
ctx->idx += 2;
return r;
#endif
}
/* Generate a random integer in [0..limit). */
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
uint64_t ret, above;
above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
while (1) {
ret = gen_rand64(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#ifndef ONLY64
/**
* This function generates pseudorandom 32-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 624 and a
* multiple of four. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 32-bit integers are filled
* by this function. The pointer to the array must be \b "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 32-bit pseudorandom integers to be
* generated. size must be a multiple of 4, and greater than or equal
* to (MEXP / 128 + 1) * 4.
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 4 == 0);
assert(size >= N32);
gen_rand_array(ctx, (w128_t *)array, size / 4);
ctx->idx = N32;
}
#endif
/**
* This function generates pseudorandom 64-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 312 and a
* multiple of two. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 64-bit integers are filled
* by this function. The pointer to the array must be "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 64-bit pseudorandom integers to be
* generated. size must be a multiple of 2, and greater than or equal
* to (MEXP / 128 + 1) * 2
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 2 == 0);
assert(size >= N64);
gen_rand_array(ctx, (w128_t *)array, size / 2);
ctx->idx = N32;
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
swap((w128_t *)array, size /2);
#endif
}
/**
* This function initializes the internal state array with a 32-bit
* integer seed.
*
* @param seed a 32-bit integer used as the seed.
*/
sfmt_t *init_gen_rand(uint32_t seed) {
void *p;
sfmt_t *ctx;
int i;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
psfmt32[idxof(0)] = seed;
for (i = 1; i < N32; i++) {
psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
^ (psfmt32[idxof(i - 1)] >> 30))
+ i;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
/**
* This function initializes the internal state array,
* with an array of 32-bit integers used as the seeds
* @param init_key the array of 32-bit integers, used as a seed.
* @param key_length the length of init_key.
*/
sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
void *p;
sfmt_t *ctx;
int i, j, count;
uint32_t r;
int lag;
int mid;
int size = N * 4;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
if (size >= 623) {
lag = 11;
} else if (size >= 68) {
lag = 7;
} else if (size >= 39) {
lag = 5;
} else {
lag = 3;
}
mid = (size - lag) / 2;
memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
if (key_length + 1 > N32) {
count = key_length + 1;
} else {
count = N32;
}
r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
^ psfmt32[idxof(N32 - 1)]);
psfmt32[idxof(mid)] += r;
r += key_length;
psfmt32[idxof(mid + lag)] += r;
psfmt32[idxof(0)] = r;
count--;
for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += init_key[j] + i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (; j < count; j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (j = 0; j < N32; j++) {
r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] ^= r;
r -= i;
psfmt32[idxof((i + mid + lag) % N32)] ^= r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
void fini_gen_rand(sfmt_t *ctx) {
assert(ctx != NULL);
ctx->initialized = 0;
free(ctx);
}
| 20,765 | 27.841667 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/test.c
|
#include "test/jemalloc_test.h"
static unsigned test_count = 0;
static test_status_t test_counts[test_status_count] = {0, 0, 0};
static test_status_t test_status = test_status_pass;
static const char * test_name = "";
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_skip(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_skip;
}
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_fail(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_fail;
}
static const char *
test_status_string(test_status_t test_status)
{
switch (test_status) {
case test_status_pass: return "pass";
case test_status_skip: return "skip";
case test_status_fail: return "fail";
default: not_reached();
}
}
void
p_test_init(const char *name)
{
test_count++;
test_status = test_status_pass;
test_name = name;
}
void
p_test_fini(void)
{
test_counts[test_status]++;
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
test_status_t
p_test(test_t *t, ...)
{
test_status_t ret;
va_list ap;
/*
* Make sure initialization occurs prior to running tests. Tests are
* special because they may use internal facilities prior to triggering
* initialization as a side effect of calling into the public API. This
* is a final safety that works even if jemalloc_constructor() doesn't
* run, as for MSVC builds.
*/
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
}
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
test_status_t
p_test_not_init(test_t *t, ...)
{
test_status_t ret;
va_list ap;
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
void
p_test_fail(const char *prefix, const char *message)
{
malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
test_status = test_status_fail;
}
| 2,920 | 20.798507 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/thd.c
|
#include "test/jemalloc_test.h"
#ifdef _WIN32
void
thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
{
LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
*thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
if (*thd == NULL)
test_fail("Error in CreateThread()\n");
}
void
thd_join(thd_t thd, void **ret)
{
if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
DWORD exit_code;
GetExitCodeThread(thd, (LPDWORD) &exit_code);
*ret = (void *)(uintptr_t)exit_code;
}
}
#else
void
thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
{
if (pthread_create(thd, NULL, proc, arg) != 0)
test_fail("Error in pthread_create()\n");
}
void
thd_join(thd_t thd, void **ret)
{
pthread_join(thd, ret);
}
#endif
| 752 | 17.825 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/math.c
|
#define MATH_C_
#include "test/jemalloc_test.h"
| 48 | 15.333333 | 31 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/mtx.c
|
#include "test/jemalloc_test.h"
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
bool
mtx_init(mtx_t *mtx)
{
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
return (true);
#elif (defined(JEMALLOC_OSSPIN))
mtx->lock = 0;
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
mtx_fini(mtx_t *mtx)
{
#ifdef _WIN32
#elif (defined(JEMALLOC_OSSPIN))
#else
pthread_mutex_destroy(&mtx->lock);
#endif
}
void
mtx_lock(mtx_t *mtx)
{
#ifdef _WIN32
EnterCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mtx->lock);
#else
pthread_mutex_lock(&mtx->lock);
#endif
}
void
mtx_unlock(mtx_t *mtx)
{
#ifdef _WIN32
LeaveCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mtx->lock);
#else
pthread_mutex_unlock(&mtx->lock);
#endif
}
| 1,104 | 15.492537 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/allocm.c
|
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
int r;
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = 0;
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) |
ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"nallocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
rsz = 0;
r = allocm(&ps[i], &rsz, sz,
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"allocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocm()/allocm() rsize mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
sallocm(ps[i], &rsz, 0);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocm(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,719 | 24.185185 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/allocated.c
|
#include "test/jemalloc_test.h"
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
void *p;
uint64_t a0, a1, d0, d1;
uint64_t *ap0, *ap1, *dp0, *dp1;
size_t sz, usize;
sz = sizeof(a0);
if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(ap0);
if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*ap0, a0,
"\"thread.allocatedp\" should provide a pointer to internal "
"storage");
sz = sizeof(d0);
if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(dp0);
if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*dp0, d0,
"\"thread.deallocatedp\" should provide a pointer to internal "
"storage");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1);
mallctl("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1);
mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value");
assert_ptr_eq(ap0, ap1,
"Pointer returned by \"thread.allocatedp\" should not change");
usize = malloc_usable_size(p);
assert_u64_le(a0 + usize, a1,
"Allocated memory counter should increase by at least the amount "
"explicitly allocated");
free(p);
sz = sizeof(d1);
mallctl("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1);
mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value");
assert_ptr_eq(dp0, dp1,
"Pointer returned by \"thread.deallocatedp\" should not change");
assert_u64_le(d0 + usize, d1,
"Deallocated memory counter should increase by at least the amount "
"explicitly deallocated");
return (NULL);
label_ENOENT:
assert_false(config_stats,
"ENOENT should only be returned if stats are disabled");
test_skip("\"thread.allocated\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,989 | 22.730159 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/xallocx.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_same_size)
{
void *p;
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz, 0, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_extra_no_move)
{
void *p;
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz, sz-42, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_no_move_fail)
{
void *p;
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz + 5, 0, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
TEST_END
int
main(void)
{
return (test(
test_same_size,
test_extra_no_move,
test_no_move_fail));
}
| 1,010 | 15.85 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/mallocx.c
|
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0);
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_ptr_not_null(ps[i],
"mallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocx(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,387 | 23.367347 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/rallocm.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_same_size)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS,
"Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_extra_no_move)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE),
ALLOCM_SUCCESS, "Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_no_move_fail)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE),
ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = p;
assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = p;
assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j], szs[j-1]);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
int
main(void)
{
return (test(
test_same_size,
test_extra_no_move,
test_no_move_fail,
test_grow_and_shrink));
}
| 2,637 | 22.553571 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/rallocx.c
|
#include "test/jemalloc_test.h"
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
szs[0] = sallocx(p, 0);
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = rallocx(p, szs[j-1]+1, 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
szs[j] = sallocx(q, 0);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = rallocx(p, szs[j-1], 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j], szs[j-1]);
tsz = sallocx(q, 0);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
dallocx(p, 0);
#undef MAXSZ
#undef NSZS
#undef NCYCLES
}
TEST_END
static bool
validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
{
bool ret = false;
const uint8_t *buf = (const uint8_t *)p;
size_t i;
for (i = 0; i < len; i++) {
uint8_t b = buf[offset+i];
if (b != c) {
test_fail("Allocation at %p contains %#x rather than "
"%#x at offset %zu", p, b, c, offset+i);
ret = true;
}
}
return (ret);
}
TEST_BEGIN(test_zero)
{
void *p, *q;
size_t psz, qsz, i, j;
size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
#define FILL_BYTE 0xaaU
#define RANGE 2048
for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
size_t start_size = start_sizes[i];
p = mallocx(start_size, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
psz = sallocx(p, 0);
assert_false(validate_fill(p, 0, 0, psz),
"Expected zeroed memory");
memset(p, FILL_BYTE, psz);
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
for (j = 1; j < RANGE; j++) {
q = rallocx(p, start_size+j, MALLOCX_ZERO);
assert_ptr_not_null(q, "Unexpected rallocx() error");
qsz = sallocx(q, 0);
if (q != p || qsz != psz) {
assert_false(validate_fill(q, FILL_BYTE, 0,
psz), "Expected filled memory");
assert_false(validate_fill(q, 0, psz, qsz-psz),
"Expected zeroed memory");
}
if (psz != qsz) {
memset((void *)((uintptr_t)q+psz), FILL_BYTE,
qsz-psz);
psz = qsz;
}
p = q;
}
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
dallocx(p, 0);
}
#undef FILL_BYTE
}
TEST_END
TEST_BEGIN(test_align)
{
void *p, *q;
size_t align;
#define MAX_ALIGN (ZU(1) << 25)
align = ZU(1);
p = mallocx(1, MALLOCX_ALIGN(align));
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
q = rallocx(p, 1, MALLOCX_ALIGN(align));
assert_ptr_not_null(q,
"Unexpected rallocx() error for align=%zu", align);
assert_ptr_null(
(void *)((uintptr_t)q & (align-1)),
"%p inadequately aligned for align=%zu",
q, align);
p = q;
}
dallocx(p, 0);
#undef MAX_ALIGN
}
TEST_END
TEST_BEGIN(test_lg_align_and_zero)
{
void *p, *q;
size_t lg_align, sz;
#define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22)
lg_align = ZU(0);
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q,
"Unexpected rallocx() error for lg_align=%zu", lg_align);
assert_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
"%p inadequately aligned for lg_align=%zu",
q, lg_align);
sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz),
"Expected zeroed memory");
} else {
assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
"Expected zeroed memory");
assert_false(validate_fill(
(void *)((uintptr_t)q+sz-MAX_VALIDATE),
0, 0, MAX_VALIDATE), "Expected zeroed memory");
}
p = q;
}
dallocx(p, 0);
#undef MAX_VALIDATE
#undef MAX_LG_ALIGN
}
TEST_END
int
main(void)
{
return (test(
test_grow_and_shrink,
test_zero,
test_align,
test_lg_align_and_zero));
}
| 4,365 | 22.6 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/thread_tcache_enabled.c
|
#include "test/jemalloc_test.h"
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
size_t sz;
bool e0, e1;
sz = sizeof(bool);
if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
if (err == ENOENT) {
assert_false(config_tcache,
"ENOENT should only be returned if tcache is "
"disabled");
}
goto label_ENOENT;
}
if (e0) {
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
}
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
return (NULL);
label_ENOENT:
test_skip("\"thread.tcache.enabled\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,535 | 21.245614 | 68 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.