Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/obj.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj.c -- transactional object store implementation
*/
#include <inttypes.h>
#include <limits.h>
#include <wchar.h>
#include <stdbool.h>
#include "valgrind_internal.h"
#include "libpmem.h"
#include "memblock.h"
#include "ravl.h"
#include "cuckoo.h"
#include "list.h"
#include "mmap.h"
#include "obj.h"
#include "ctl_global.h"
#include "heap_layout.h"
#include "os.h"
#include "os_thread.h"
#include "pmemops.h"
#include "set.h"
#include "sync.h"
#include "tx.h"
#include "sys_util.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define OBJ_CONFIG_ENV_VARIABLE "PMEMOBJ_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define OBJ_CONFIG_FILE_ENV_VARIABLE "PMEMOBJ_CONF_FILE"
/*
* The variable which overwrites a number of lanes available at runtime.
*/
#define OBJ_NLANES_ENV_VARIABLE "PMEMOBJ_NLANES"
#define OBJ_X_VALID_FLAGS PMEMOBJ_F_RELAXED
static const struct pool_attr Obj_create_attr = {
OBJ_HDR_SIG,
OBJ_FORMAT_MAJOR,
OBJ_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Obj_open_attr = {
OBJ_HDR_SIG,
OBJ_FORMAT_MAJOR,
OBJ_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
static struct cuckoo *pools_ht; /* hash table used for searching by UUID */
static struct ravl *pools_tree; /* tree used for searching by address */
int _pobj_cache_invalidate;
#ifndef _WIN32
__thread struct _pobj_pcache _pobj_cached_pool;
/*
* pmemobj_direct -- returns the direct pointer of an object
*/
void *
pmemobj_direct(PMEMoid oid)
{
return pmemobj_direct_inline(oid);
}
#else /* _WIN32 */
/*
* XXX - this is a temporary implementation
*
* Seems like we could still use TLS and simply substitute "__thread" with
* "__declspec(thread)", however it's not clear if it would work correctly
* with Windows DLL's.
* Need to verify that once we have the multi-threaded tests ported.
*/
struct _pobj_pcache {
PMEMobjpool *pop;
uint64_t uuid_lo;
int invalidate;
};
static os_once_t Cached_pool_key_once = OS_ONCE_INIT;
static os_tls_key_t Cached_pool_key;
/*
* _Cached_pool_key_alloc -- (internal) allocate pool cache pthread key
*/
static void
_Cached_pool_key_alloc(void)
{
int pth_ret = os_tls_key_create(&Cached_pool_key, free);
if (pth_ret)
FATAL("!os_tls_key_create");
}
/*
* pmemobj_direct -- returns the direct pointer of an object
*/
void *
pmemobj_direct(PMEMoid oid)
{
if (oid.off == 0 || oid.pool_uuid_lo == 0)
return NULL;
struct _pobj_pcache *pcache = os_tls_get(Cached_pool_key);
if (pcache == NULL) {
pcache = calloc(sizeof(struct _pobj_pcache), 1);
if (pcache == NULL)
FATAL("!pcache malloc");
int ret = os_tls_set(Cached_pool_key, pcache);
if (ret)
FATAL("!os_tls_set");
}
if (_pobj_cache_invalidate != pcache->invalidate ||
pcache->uuid_lo != oid.pool_uuid_lo) {
pcache->invalidate = _pobj_cache_invalidate;
if ((pcache->pop = pmemobj_pool_by_oid(oid)) == NULL) {
pcache->uuid_lo = 0;
return NULL;
}
pcache->uuid_lo = oid.pool_uuid_lo;
}
return (void *)((uintptr_t)pcache->pop + oid.off);
}
#endif /* _WIN32 */
/*
* obj_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
obj_ctl_init_and_load(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if (pop != NULL && (pop->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
if (pop) {
tx_ctl_register(pop);
pmalloc_ctl_register(pop);
stats_ctl_register(pop);
debug_ctl_register(pop);
}
char *env_config = os_getenv(OBJ_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pop ? pop->ctl : NULL,
pop, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
OBJ_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(OBJ_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pop ? pop->ctl : NULL,
pop, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
OBJ_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pop)
ctl_delete(pop->ctl);
return -1;
}
/*
* obj_pool_cmp -- (internal) compares two PMEMobjpool pointers
*/
static int
obj_pool_cmp(const void *lhs, const void *rhs)
{
if (lhs > rhs)
return 1;
else if (lhs < rhs)
return -1;
return 0;
}
/*
* obj_pool_init -- (internal) allocate global structs holding all opened pools
*
* This is invoked on a first call to pmemobj_open() or pmemobj_create().
* Memory is released in library destructor.
*/
static void
obj_pool_init(void)
{
LOG(3, NULL);
if (pools_ht)
return;
pools_ht = cuckoo_new();
if (pools_ht == NULL)
FATAL("!cuckoo_new");
pools_tree = ravl_new(obj_pool_cmp);
if (pools_tree == NULL)
FATAL("!ravl_new");
}
/*
* pmemobj_oid -- return a PMEMoid based on the virtual address
*
* If the address does not belong to any pool OID_NULL is returned.
*/
PMEMoid
pmemobj_oid(const void *addr)
{
PMEMobjpool *pop = pmemobj_pool_by_ptr(addr);
if (pop == NULL)
return OID_NULL;
PMEMoid oid = {pop->uuid_lo, (uintptr_t)addr - (uintptr_t)pop};
return oid;
}
/*
* User may decide to map all pools with MAP_PRIVATE flag using
* PMEMOBJ_COW environment variable.
*/
static int Open_cow;
/*
* obj_init -- initialization of obj
*
* Called by constructor.
*/
void
obj_init(void)
{
LOG(3, NULL);
COMPILE_ERROR_ON(sizeof(struct pmemobjpool) !=
POOL_HDR_SIZE + POOL_DESC_SIZE);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NODRAIN != PMEM_F_MEM_NODRAIN);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NONTEMPORAL != PMEM_F_MEM_NONTEMPORAL);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_TEMPORAL != PMEM_F_MEM_TEMPORAL);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_WC != PMEM_F_MEM_WC);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_WB != PMEM_F_MEM_WB);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NOFLUSH != PMEM_F_MEM_NOFLUSH);
#ifdef USE_COW_ENV
char *env = os_getenv("PMEMOBJ_COW");
if (env)
Open_cow = atoi(env);
#endif
#ifdef _WIN32
/* XXX - temporary implementation (see above) */
os_once(&Cached_pool_key_once, _Cached_pool_key_alloc);
#endif
/*
* Load global config, ignore any issues. They will be caught on the
* subsequent call to this function for individual pools.
*/
ctl_global_register();
if (obj_ctl_init_and_load(NULL))
FATAL("error: %s", pmemobj_errormsg());
lane_info_boot();
util_remote_init();
}
/*
* obj_fini -- cleanup of obj
*
* Called by destructor.
*/
void
obj_fini(void)
{
LOG(3, NULL);
if (pools_ht)
cuckoo_delete(pools_ht);
if (pools_tree)
ravl_delete(pools_tree);
lane_info_destroy();
util_remote_fini();
#ifdef _WIN32
(void) os_tls_key_delete(Cached_pool_key);
#endif
}
/*
* obj_drain_empty -- (internal) empty function for drain on non-pmem memory
*/
static void
obj_drain_empty(void)
{
/* do nothing */
}
/*
* obj_nopmem_memcpy -- (internal) memcpy followed by an msync
*/
static void *
obj_nopmem_memcpy(void *dest, const void *src, size_t len, unsigned flags)
{
LOG(15, "dest %p src %p len %zu flags 0x%x", dest, src, len, flags);
/*
* Use pmem_memcpy instead of memcpy, because pmemobj_memcpy is supposed
* to guarantee that multiple of 8 byte stores to 8 byte aligned
* addresses are fail safe atomic. pmem_memcpy guarantees that, while
* libc memcpy does not.
*/
pmem_memcpy(dest, src, len, PMEM_F_MEM_NOFLUSH);
pmem_msync(dest, len);
return dest;
}
/*
* obj_nopmem_memmove -- (internal) memmove followed by an msync
*/
static void *
obj_nopmem_memmove(void *dest, const void *src, size_t len, unsigned flags)
{
LOG(15, "dest %p src %p len %zu flags 0x%x", dest, src, len, flags);
/* see comment in obj_nopmem_memcpy */
pmem_memmove(dest, src, len, PMEM_F_MEM_NOFLUSH);
pmem_msync(dest, len);
return dest;
}
/*
* obj_nopmem_memset -- (internal) memset followed by an msync
*/
static void *
obj_nopmem_memset(void *dest, int c, size_t len, unsigned flags)
{
LOG(15, "dest %p c 0x%02x len %zu flags 0x%x", dest, c, len, flags);
/* see comment in obj_nopmem_memcpy */
pmem_memset(dest, c, len, PMEM_F_MEM_NOFLUSH);
pmem_msync(dest, len);
return dest;
}
/*
* obj_remote_persist -- (internal) remote persist function
*/
static int
obj_remote_persist(PMEMobjpool *pop, const void *addr, size_t len,
unsigned lane, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu lane %u flags %u",
pop, addr, len, lane, flags);
ASSERTne(pop->rpp, NULL);
uintptr_t offset = (uintptr_t)addr - pop->remote_base;
unsigned rpmem_flags = 0;
if (flags & PMEMOBJ_F_RELAXED)
rpmem_flags |= RPMEM_PERSIST_RELAXED;
int rv = Rpmem_persist(pop->rpp, offset, len, lane, rpmem_flags);
if (rv) {
ERR("!rpmem_persist(rpp %p offset %zu length %zu lane %u)"
" FATAL ERROR (returned value %i)",
pop->rpp, offset, len, lane, rv);
return -1;
}
return 0;
}
/*
* XXX - Consider removing obj_norep_*() wrappers to call *_local()
* functions directly. Alternatively, always use obj_rep_*(), even
* if there are no replicas. Verify the performance penalty.
*/
/*
* obj_norep_memcpy -- (internal) memcpy w/o replication
*/
static void *
obj_norep_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
return pop->memcpy_local(dest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_memmove -- (internal) memmove w/o replication
*/
static void *
obj_norep_memmove(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
return pop->memmove_local(dest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_memset -- (internal) memset w/o replication
*/
static void *
obj_norep_memset(void *ctx, void *dest, int c, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
return pop->memset_local(dest, c, len, flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_persist -- (internal) persist w/o replication
*/
static int
obj_norep_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pop->persist_local(addr, len);
return 0;
}
/*
* obj_norep_flush -- (internal) flush w/o replication
*/
static int
obj_norep_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pop->flush_local(addr, len);
return 0;
}
/*
* obj_norep_drain -- (internal) drain w/o replication
*/
static void
obj_norep_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p", pop);
pop->drain_local();
}
static void obj_pool_cleanup(PMEMobjpool *pop);
/*
* obj_handle_remote_persist_error -- (internal) handle remote persist
* fatal error
*/
static void
obj_handle_remote_persist_error(PMEMobjpool *pop)
{
LOG(1, "pop %p", pop);
ERR("error clean up...");
obj_pool_cleanup(pop);
FATAL("Fatal error of remote persist. Aborting...");
}
/*
* obj_rep_memcpy -- (internal) memcpy with replication
*/
static void *
obj_rep_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memcpy_local(dest, src, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(rdest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_memmove -- (internal) memmove with replication
*/
static void *
obj_rep_memmove(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memmove_local(dest, src, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memmove_local(rdest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_memset -- (internal) memset with replication
*/
static void *
obj_rep_memset(void *ctx, void *dest, int c, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memset_local(dest, c, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memset_local(rdest, c, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_persist -- (internal) persist with replication
*/
static int
obj_rep_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
pop->persist_local(addr, len);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *raddr = (char *)rep + (uintptr_t)addr - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(raddr, addr, len, 0);
} else {
if (rep->persist_remote(rep, raddr, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return 0;
}
/*
* obj_rep_flush -- (internal) flush with replication
*/
static int
obj_rep_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
pop->flush_local(addr, len);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *raddr = (char *)rep + (uintptr_t)addr - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(raddr, addr, len,
PMEM_F_MEM_NODRAIN);
} else {
if (rep->persist_remote(rep, raddr, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return 0;
}
/*
* obj_rep_drain -- (internal) drain with replication
*/
static void
obj_rep_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p", pop);
pop->drain_local();
PMEMobjpool *rep = pop->replica;
while (rep) {
if (rep->rpp == NULL)
rep->drain_local();
rep = rep->replica;
}
}
#if VG_MEMCHECK_ENABLED
/*
* Arbitrary value. When there's more undefined regions than MAX_UNDEFS, it's
* not worth reporting everything - developer should fix the code.
*/
#define MAX_UNDEFS 1000
/*
* obj_vg_check_no_undef -- (internal) check whether there are any undefined
* regions
*/
static void
obj_vg_check_no_undef(struct pmemobjpool *pop)
{
LOG(4, "pop %p", pop);
struct {
void *start, *end;
} undefs[MAX_UNDEFS];
int num_undefs = 0;
VALGRIND_DO_DISABLE_ERROR_REPORTING;
char *addr_start = pop->addr;
char *addr_end = addr_start + pop->set->poolsize;
while (addr_start < addr_end) {
char *noaccess = (char *)VALGRIND_CHECK_MEM_IS_ADDRESSABLE(
addr_start, addr_end - addr_start);
if (noaccess == NULL)
noaccess = addr_end;
while (addr_start < noaccess) {
char *undefined =
(char *)VALGRIND_CHECK_MEM_IS_DEFINED(
addr_start, noaccess - addr_start);
if (undefined) {
addr_start = undefined;
#ifdef VALGRIND_CHECK_MEM_IS_UNDEFINED
addr_start = (char *)
VALGRIND_CHECK_MEM_IS_UNDEFINED(
addr_start, noaccess - addr_start);
if (addr_start == NULL)
addr_start = noaccess;
#else
while (addr_start < noaccess &&
VALGRIND_CHECK_MEM_IS_DEFINED(
addr_start, 1))
addr_start++;
#endif
if (num_undefs < MAX_UNDEFS) {
undefs[num_undefs].start = undefined;
undefs[num_undefs].end = addr_start - 1;
num_undefs++;
}
} else
addr_start = noaccess;
}
#ifdef VALGRIND_CHECK_MEM_IS_UNADDRESSABLE
addr_start = (char *)VALGRIND_CHECK_MEM_IS_UNADDRESSABLE(
addr_start, addr_end - addr_start);
if (addr_start == NULL)
addr_start = addr_end;
#else
while (addr_start < addr_end &&
(char *)VALGRIND_CHECK_MEM_IS_ADDRESSABLE(
addr_start, 1) == addr_start)
addr_start++;
#endif
}
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (num_undefs) {
/*
* How to resolve this error:
* If it's part of the free space Valgrind should be told about
* it by VALGRIND_DO_MAKE_MEM_NOACCESS request. If it's
* allocated - initialize it or use VALGRIND_DO_MAKE_MEM_DEFINED
* request.
*/
VALGRIND_PRINTF("Part of the pool is left in undefined state on"
" boot. This is pmemobj's bug.\nUndefined"
" regions: [pool address: %p]\n", pop);
for (int i = 0; i < num_undefs; ++i)
VALGRIND_PRINTF(" [%p, %p]\n", undefs[i].start,
undefs[i].end);
if (num_undefs == MAX_UNDEFS)
VALGRIND_PRINTF(" ...\n");
/* Trigger error. */
VALGRIND_CHECK_MEM_IS_DEFINED(undefs[0].start, 1);
}
}
/*
* obj_vg_boot -- (internal) notify Valgrind about pool objects
*/
static void
obj_vg_boot(struct pmemobjpool *pop)
{
if (!On_valgrind)
return;
LOG(4, "pop %p", pop);
if (os_getenv("PMEMOBJ_VG_CHECK_UNDEF"))
obj_vg_check_no_undef(pop);
}
#endif
/*
* obj_runtime_init_common -- (internal) runtime initialization
*
* Common routine for create/open and check.
*/
static int
obj_runtime_init_common(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if ((errno = lane_boot(pop)) != 0) {
ERR("!lane_boot");
return errno;
}
if ((errno = lane_recover_and_section_boot(pop)) != 0) {
ERR("!lane_recover_and_section_boot");
return errno;
}
pop->conversion_flags = 0;
pmemops_persist(&pop->p_ops,
&pop->conversion_flags, sizeof(pop->conversion_flags));
return 0;
}
/*
* obj_runtime_cleanup_common -- (internal) runtime cleanup
*
* Common routine for create/open and check
*/
static void
obj_runtime_cleanup_common(PMEMobjpool *pop)
{
lane_section_cleanup(pop);
lane_cleanup(pop);
}
/*
* obj_descr_create -- (internal) create obj pool descriptor
*/
static int
obj_descr_create(PMEMobjpool *pop, const char *layout, size_t poolsize)
{
LOG(3, "pop %p layout %s poolsize %zu", pop, layout, poolsize);
ASSERTeq(poolsize % Pagesize, 0);
/* opaque info lives at the beginning of mapped memory pool */
void *dscp = (void *)((uintptr_t)pop + sizeof(struct pool_hdr));
/* create the persistent part of pool's descriptor */
memset(dscp, 0, OBJ_DSC_P_SIZE);
if (layout)
strncpy(pop->layout, layout, PMEMOBJ_MAX_LAYOUT - 1);
struct pmem_ops *p_ops = &pop->p_ops;
pop->lanes_offset = OBJ_LANES_OFFSET;
pop->nlanes = OBJ_NLANES;
/* zero all lanes */
lane_init_data(pop);
pop->heap_offset = pop->lanes_offset +
pop->nlanes * sizeof(struct lane_layout);
pop->heap_offset = (pop->heap_offset + Pagesize - 1) & ~(Pagesize - 1);
size_t heap_size = pop->set->poolsize - pop->heap_offset;
/* initialize heap prior to storing the checksum */
errno = palloc_init((char *)pop + pop->heap_offset, heap_size,
&pop->heap_size, p_ops);
if (errno != 0) {
ERR("!palloc_init");
return -1;
}
util_checksum(dscp, OBJ_DSC_P_SIZE, &pop->checksum, 1, 0);
/*
* store the persistent part of pool's descriptor (2kB)
*
* It's safe to use PMEMOBJ_F_RELAXED flag because the entire
* structure is protected by checksum.
*/
pmemops_xpersist(p_ops, dscp, OBJ_DSC_P_SIZE, PMEMOBJ_F_RELAXED);
/* initialize run_id, it will be incremented later */
pop->run_id = 0;
pmemops_persist(p_ops, &pop->run_id, sizeof(pop->run_id));
pop->root_offset = 0;
pmemops_persist(p_ops, &pop->root_offset, sizeof(pop->root_offset));
pop->root_size = 0;
pmemops_persist(p_ops, &pop->root_size, sizeof(pop->root_size));
pop->conversion_flags = 0;
pmemops_persist(p_ops, &pop->conversion_flags,
sizeof(pop->conversion_flags));
/*
* It's safe to use PMEMOBJ_F_RELAXED flag because the reserved
* area must be entirely zeroed.
*/
pmemops_memset(p_ops, pop->pmem_reserved, 0,
sizeof(pop->pmem_reserved), PMEMOBJ_F_RELAXED);
return 0;
}
/*
* obj_descr_check -- (internal) validate obj pool descriptor
*/
static int
obj_descr_check(PMEMobjpool *pop, const char *layout, size_t poolsize)
{
LOG(3, "pop %p layout %s poolsize %zu", pop, layout, poolsize);
void *dscp = (void *)((uintptr_t)pop + sizeof(struct pool_hdr));
if (pop->rpp) {
/* read remote descriptor */
if (obj_read_remote(pop->rpp, pop->remote_base, dscp, dscp,
OBJ_DSC_P_SIZE)) {
ERR("!obj_read_remote");
return -1;
}
}
if (!util_checksum(dscp, OBJ_DSC_P_SIZE, &pop->checksum, 0, 0)) {
ERR("invalid checksum of pool descriptor");
errno = EINVAL;
return -1;
}
if (layout &&
strncmp(pop->layout, layout, PMEMOBJ_MAX_LAYOUT)) {
ERR("wrong layout (\"%s\"), "
"pool created with layout \"%s\"",
layout, pop->layout);
errno = EINVAL;
return -1;
}
if (pop->heap_offset % Pagesize) {
ERR("unaligned heap: off %" PRIu64, pop->heap_offset);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* obj_msync_nofail -- (internal) pmem_msync wrapper that never fails from
* caller's perspective
*/
static void
obj_msync_nofail(const void *addr, size_t size)
{
if (pmem_msync(addr, size))
FATAL("!pmem_msync");
}
/*
* obj_replica_init_local -- (internal) initialize runtime part
* of the local replicas
*/
static int
obj_replica_init_local(PMEMobjpool *rep, int is_pmem, size_t resvsize)
{
LOG(3, "rep %p is_pmem %d resvsize %zu", rep, is_pmem, resvsize);
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
rep->is_pmem = is_pmem;
/* init hooks */
rep->persist_remote = NULL;
/*
* All replicas, except for master, are ignored as far as valgrind is
* concerned. This is to save CPU time and lessen the complexity of
* instrumentation.
*/
if (!rep->is_master_replica)
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(rep, resvsize);
if (rep->is_pmem) {
rep->persist_local = pmem_persist;
rep->flush_local = pmem_flush;
rep->drain_local = pmem_drain;
rep->memcpy_local = pmem_memcpy;
rep->memmove_local = pmem_memmove;
rep->memset_local = pmem_memset;
} else {
rep->persist_local = obj_msync_nofail;
rep->flush_local = obj_msync_nofail;
rep->drain_local = obj_drain_empty;
rep->memcpy_local = obj_nopmem_memcpy;
rep->memmove_local = obj_nopmem_memmove;
rep->memset_local = obj_nopmem_memset;
}
return 0;
}
/*
* obj_replica_init_remote -- (internal) initialize runtime part
* of a remote replica
*/
static int
obj_replica_init_remote(PMEMobjpool *rep, struct pool_set *set,
unsigned repidx, int create)
{
LOG(3, "rep %p set %p repidx %u", rep, set, repidx);
struct pool_replica *repset = set->replica[repidx];
ASSERTne(repset->remote->rpp, NULL);
ASSERTne(repset->remote->node_addr, NULL);
ASSERTne(repset->remote->pool_desc, NULL);
rep->node_addr = Strdup(repset->remote->node_addr);
if (rep->node_addr == NULL)
return -1;
rep->pool_desc = Strdup(repset->remote->pool_desc);
if (rep->pool_desc == NULL) {
Free(rep->node_addr);
return -1;
}
rep->rpp = repset->remote->rpp;
/* remote_base - beginning of the remote pool */
rep->remote_base = (uintptr_t)rep->addr;
/* init hooks */
rep->persist_remote = obj_remote_persist;
rep->persist_local = NULL;
rep->flush_local = NULL;
rep->drain_local = NULL;
rep->memcpy_local = NULL;
rep->memmove_local = NULL;
rep->memset_local = NULL;
rep->p_ops.remote.read = obj_read_remote;
rep->p_ops.remote.ctx = rep->rpp;
rep->p_ops.remote.base = rep->remote_base;
return 0;
}
/*
* obj_cleanup_remote -- (internal) clean up the remote pools data
*/
static void
obj_cleanup_remote(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
for (; pop != NULL; pop = pop->replica) {
if (pop->rpp != NULL) {
Free(pop->node_addr);
Free(pop->pool_desc);
pop->rpp = NULL;
}
}
}
/*
* obj_replica_init -- (internal) initialize runtime part of the replica
*/
static int
obj_replica_init(PMEMobjpool *rep, struct pool_set *set, unsigned repidx,
int create)
{
struct pool_replica *repset = set->replica[repidx];
if (repidx == 0) {
/* master replica */
rep->is_master_replica = 1;
rep->has_remote_replicas = set->remote;
if (set->nreplicas > 1) {
rep->p_ops.persist = obj_rep_persist;
rep->p_ops.flush = obj_rep_flush;
rep->p_ops.drain = obj_rep_drain;
rep->p_ops.memcpy = obj_rep_memcpy;
rep->p_ops.memmove = obj_rep_memmove;
rep->p_ops.memset = obj_rep_memset;
} else {
rep->p_ops.persist = obj_norep_persist;
rep->p_ops.flush = obj_norep_flush;
rep->p_ops.drain = obj_norep_drain;
rep->p_ops.memcpy = obj_norep_memcpy;
rep->p_ops.memmove = obj_norep_memmove;
rep->p_ops.memset = obj_norep_memset;
}
rep->p_ops.base = rep;
} else {
/* non-master replicas */
rep->is_master_replica = 0;
rep->has_remote_replicas = 0;
rep->p_ops.persist = NULL;
rep->p_ops.flush = NULL;
rep->p_ops.drain = NULL;
rep->p_ops.memcpy = NULL;
rep->p_ops.memmove = NULL;
rep->p_ops.memset = NULL;
rep->p_ops.base = NULL;
}
rep->is_dev_dax = set->replica[repidx]->part[0].is_dev_dax;
int ret;
if (repset->remote)
ret = obj_replica_init_remote(rep, set, repidx, create);
else
ret = obj_replica_init_local(rep, repset->is_pmem,
set->resvsize);
if (ret)
return ret;
return 0;
}
/*
* obj_replica_fini -- (internal) deinitialize replica
*/
static void
obj_replica_fini(struct pool_replica *repset)
{
PMEMobjpool *rep = repset->part[0].addr;
if (repset->remote)
obj_cleanup_remote(rep);
}
/*
* obj_runtime_init -- (internal) initialize runtime part of the pool header
*/
static int
obj_runtime_init(PMEMobjpool *pop, int rdonly, int boot, unsigned nlanes)
{
LOG(3, "pop %p rdonly %d boot %d", pop, rdonly, boot);
struct pmem_ops *p_ops = &pop->p_ops;
/* run_id is made unique by incrementing the previous value */
pop->run_id += 2;
if (pop->run_id == 0)
pop->run_id += 2;
pmemops_persist(p_ops, &pop->run_id, sizeof(pop->run_id));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pop->rdonly = rdonly;
pop->uuid_lo = pmemobj_get_uuid_lo(pop);
pop->lanes_desc.runtime_nlanes = nlanes;
pop->tx_params = tx_params_new();
if (pop->tx_params == NULL)
goto err_tx_params;
pop->stats = stats_new(pop);
if (pop->stats == NULL)
goto err_stat;
VALGRIND_REMOVE_PMEM_MAPPING(&pop->mutex_head,
sizeof(pop->mutex_head));
VALGRIND_REMOVE_PMEM_MAPPING(&pop->rwlock_head,
sizeof(pop->rwlock_head));
VALGRIND_REMOVE_PMEM_MAPPING(&pop->cond_head,
sizeof(pop->cond_head));
pop->mutex_head = NULL;
pop->rwlock_head = NULL;
pop->cond_head = NULL;
if (boot) {
if ((errno = obj_runtime_init_common(pop)) != 0)
goto err_boot;
#if VG_MEMCHECK_ENABLED
if (On_valgrind) {
/* mark unused part of the pool as not accessible */
void *end = palloc_heap_end(&pop->heap);
VALGRIND_DO_MAKE_MEM_NOACCESS(end,
(char *)pop + pop->set->poolsize - (char *)end);
}
#endif
obj_pool_init();
if ((errno = cuckoo_insert(pools_ht, pop->uuid_lo, pop)) != 0) {
ERR("!cuckoo_insert");
goto err_cuckoo_insert;
}
if ((errno = ravl_insert(pools_tree, pop)) != 0) {
ERR("!ravl_insert");
goto err_tree_insert;
}
}
if (obj_ctl_init_and_load(pop) != 0) {
errno = EINVAL;
goto err_ctl;
}
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pop->addr, sizeof(struct pool_hdr), pop->is_dev_dax);
return 0;
struct ravl_node *n;
err_ctl:
n = ravl_find(pools_tree, pop, RAVL_PREDICATE_EQUAL);
ASSERTne(n, NULL);
ravl_remove(pools_tree, n);
err_tree_insert:
cuckoo_remove(pools_ht, pop->uuid_lo);
err_cuckoo_insert:
obj_runtime_cleanup_common(pop);
err_boot:
stats_delete(pop, pop->stats);
err_stat:
tx_params_delete(pop->tx_params);
err_tx_params:
return -1;
}
/*
* obj_get_nlanes -- get a number of lanes available at runtime. If the value
* provided with the PMEMOBJ_NLANES environment variable is greater than 0 and
* smaller than OBJ_NLANES constant it returns PMEMOBJ_NLANES. Otherwise it
* returns OBJ_NLANES.
*/
static unsigned
obj_get_nlanes(void)
{
LOG(3, NULL);
char *env_nlanes = os_getenv(OBJ_NLANES_ENV_VARIABLE);
if (env_nlanes) {
int nlanes = atoi(env_nlanes);
if (nlanes <= 0) {
ERR("%s variable must be a positive integer",
OBJ_NLANES_ENV_VARIABLE);
errno = EINVAL;
goto no_valid_env;
}
return (unsigned)(OBJ_NLANES < nlanes ? OBJ_NLANES : nlanes);
}
no_valid_env:
return OBJ_NLANES;
}
/*
* pmemobj_createU -- create a transactional memory pool (set)
*/
#ifndef _WIN32
static inline
#endif
PMEMobjpool *
pmemobj_createU(const char *path, const char *layout,
size_t poolsize, mode_t mode)
{
LOG(3, "path %s layout %s poolsize %zu mode %o",
path, layout, poolsize, mode);
PMEMobjpool *pop;
struct pool_set *set;
/* check length of layout */
if (layout && (strlen(layout) >= PMEMOBJ_MAX_LAYOUT)) {
ERR("Layout too long");
errno = EINVAL;
return NULL;
}
/*
* A number of lanes available at runtime equals the lowest value
* from all reported by remote replicas hosts. In the single host mode
* the runtime number of lanes is equal to the total number of lanes
* available in the pool or the value provided with PMEMOBJ_NLANES
* environment variable whichever is lower.
*/
unsigned runtime_nlanes = obj_get_nlanes();
if (util_pool_create(&set, path, poolsize, PMEMOBJ_MIN_POOL,
PMEMOBJ_MIN_PART, &Obj_create_attr, &runtime_nlanes,
REPLICAS_ENABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
/* pop is master replica from now on */
pop = set->replica[0]->part[0].addr;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
size_t rt_size = (uintptr_t)(rep + 1) - (uintptr_t)&rep->addr;
VALGRIND_REMOVE_PMEM_MAPPING(&rep->addr, rt_size);
memset(&rep->addr, 0, rt_size);
rep->addr = rep;
rep->replica = NULL;
rep->rpp = NULL;
/* initialize replica runtime - is_pmem, funcs, ... */
if (obj_replica_init(rep, set, r, 1 /* create */) != 0) {
ERR("initialization of replica #%u failed", r);
goto err;
}
/* link replicas */
if (r < set->nreplicas - 1)
rep->replica = set->replica[r + 1]->part[0].addr;
}
pop->set = set;
/* create pool descriptor */
if (obj_descr_create(pop, layout, set->poolsize) != 0) {
LOG(2, "creation of pool descriptor failed");
goto err;
}
/* initialize runtime parts - lanes, obj stores, ... */
if (obj_runtime_init(pop, 0, 1 /* boot */,
runtime_nlanes) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pop %p", pop);
return pop;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (set->remote)
obj_cleanup_remote(pop);
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemobj_create -- create a transactional memory pool (set)
*/
PMEMobjpool *
pmemobj_create(const char *path, const char *layout,
size_t poolsize, mode_t mode)
{
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_createU(path, layout, poolsize, mode);
PMEMOBJ_API_END();
return pop;
}
#else
/*
* pmemobj_createW -- create a transactional memory pool (set)
*/
PMEMobjpool *
pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMobjpool *ret = pmemobj_createU(upath, ulayout, poolsize, mode);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* obj_check_basic_local -- (internal) basic pool consistency check
* of a local replica
*/
static int
obj_check_basic_local(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
ASSERTeq(pop->rpp, NULL);
int consistent = 1;
if (pop->run_id % 2) {
ERR("invalid run_id %" PRIu64, pop->run_id);
consistent = 0;
}
if ((errno = lane_check(pop)) != 0) {
LOG(2, "!lane_check");
consistent = 0;
}
errno = palloc_heap_check((char *)pop + pop->heap_offset,
mapped_size);
if (errno != 0) {
LOG(2, "!heap_check");
consistent = 0;
}
return consistent;
}
/*
* obj_read_remote -- read data from remote replica
*
* It reads data of size 'length' from the remote replica 'pop'
* from address 'addr' and saves it at address 'dest'.
*/
int
obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length)
{
LOG(3, "ctx %p base 0x%lx dest %p addr %p length %zu", ctx, base, dest,
addr, length);
ASSERTne(ctx, NULL);
ASSERT((uintptr_t)addr >= base);
uintptr_t offset = (uintptr_t)addr - base;
if (Rpmem_read(ctx, dest, offset, length, RLANE_DEFAULT)) {
ERR("!rpmem_read");
return -1;
}
return 0;
}
/*
* obj_check_basic_remote -- (internal) basic pool consistency check
* of a remote replica
*/
static int
obj_check_basic_remote(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
ASSERTne(pop->rpp, NULL);
int consistent = 1;
/* read pop->run_id */
if (obj_read_remote(pop->rpp, pop->remote_base, &pop->run_id,
&pop->run_id, sizeof(pop->run_id))) {
ERR("!obj_read_remote");
return -1;
}
if (pop->run_id % 2) {
ERR("invalid run_id %" PRIu64, pop->run_id);
consistent = 0;
}
/* XXX add lane_check_remote */
errno = palloc_heap_check_remote((char *)pop + pop->heap_offset,
mapped_size, &pop->p_ops.remote);
if (errno != 0) {
LOG(2, "!heap_check_remote");
consistent = 0;
}
return consistent;
}
/*
* obj_check_basic -- (internal) basic pool consistency check
*
* Used to check if all the replicas are consistent prior to pool recovery.
*/
static int
obj_check_basic(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
if (pop->rpp == NULL)
return obj_check_basic_local(pop, mapped_size);
else
return obj_check_basic_remote(pop, mapped_size);
}
/*
* obj_pool_close -- (internal) close the pool set
*/
static void
obj_pool_close(struct pool_set *set)
{
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
}
/*
* obj_pool_open -- (internal) open the given pool
*/
static int
obj_pool_open(struct pool_set **set, const char *path, unsigned flags,
unsigned *nlanes)
{
if (util_pool_open(set, path, PMEMOBJ_MIN_PART, &Obj_open_attr,
nlanes, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return -1;
}
ASSERT((*set)->nreplicas > 0);
/* read-only mode is not supported in libpmemobj */
if ((*set)->rdonly) {
ERR("read-only mode is not supported");
errno = EINVAL;
goto err_rdonly;
}
return 0;
err_rdonly:
obj_pool_close(*set);
return -1;
}
/*
* obj_replicas_init -- (internal) initialize all replicas
*/
static int
obj_replicas_init(struct pool_set *set)
{
unsigned r;
for (r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
size_t rt_size = (uintptr_t)(rep + 1) - (uintptr_t)&rep->addr;
VALGRIND_REMOVE_PMEM_MAPPING(&rep->addr, rt_size);
memset(&rep->addr, 0, rt_size);
rep->addr = rep;
rep->replica = NULL;
rep->rpp = NULL;
/* initialize replica runtime - is_pmem, funcs, ... */
if (obj_replica_init(rep, set, r, 0 /* open */) != 0) {
ERR("initialization of replica #%u failed", r);
goto err;
}
/* link replicas */
if (r < set->nreplicas - 1)
rep->replica = set->replica[r + 1]->part[0].addr;
}
return 0;
err:
for (unsigned p = 0; p < r; p++)
obj_replica_fini(set->replica[p]);
return -1;
}
/*
* obj_replicas_fini -- (internal) deinitialize all replicas
*/
static void
obj_replicas_fini(struct pool_set *set)
{
int oerrno = errno;
for (unsigned r = 0; r < set->nreplicas; r++)
obj_replica_fini(set->replica[r]);
errno = oerrno;
}
/*
* obj_replicas_check_basic -- (internal) perform basic consistency check
* for all replicas
*/
static int
obj_replicas_check_basic(PMEMobjpool *pop)
{
PMEMobjpool *rep;
for (unsigned r = 0; r < pop->set->nreplicas; r++) {
rep = pop->set->replica[r]->part[0].addr;
if (obj_check_basic(rep, pop->set->poolsize) == 0) {
ERR("inconsistent replica #%u", r);
return -1;
}
}
/* copy lanes */
void *src = (void *)((uintptr_t)pop + pop->lanes_offset);
size_t len = pop->nlanes * sizeof(struct lane_layout);
for (unsigned r = 1; r < pop->set->nreplicas; r++) {
rep = pop->set->replica[r]->part[0].addr;
void *dst = (void *)((uintptr_t)rep + pop->lanes_offset);
if (rep->rpp == NULL) {
rep->memcpy_local(dst, src, len, 0);
} else {
if (rep->persist_remote(rep, dst, len,
RLANE_DEFAULT, 0))
obj_handle_remote_persist_error(pop);
}
}
return 0;
}
/*
* obj_open_common -- open a transactional memory pool (set)
*
* This routine takes flags and does all the work
* (flag POOL_OPEN_COW - internal calls can map a read-only pool if required).
*/
static PMEMobjpool *
obj_open_common(const char *path, const char *layout, unsigned flags, int boot)
{
LOG(3, "path %s layout %s flags 0x%x", path, layout, flags);
PMEMobjpool *pop = NULL;
struct pool_set *set;
/*
* A number of lanes available at runtime equals the lowest value
* from all reported by remote replicas hosts. In the single host mode
* the runtime number of lanes is equal to the total number of lanes
* available in the pool or the value provided with PMEMOBJ_NLANES
* environment variable whichever is lower.
*/
unsigned runtime_nlanes = obj_get_nlanes();
if (obj_pool_open(&set, path, flags, &runtime_nlanes))
return NULL;
/* pop is master replica from now on */
pop = set->replica[0]->part[0].addr;
if (obj_replicas_init(set))
goto replicas_init;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
/* check descriptor */
if (obj_descr_check(rep, layout, set->poolsize) != 0) {
LOG(2, "descriptor check of replica #%u failed", r);
goto err_descr_check;
}
}
pop->set = set;
if (boot) {
/* check consistency of 'master' replica */
if (obj_check_basic(pop, pop->set->poolsize) == 0) {
goto err_check_basic;
}
}
if (set->nreplicas > 1) {
if (obj_replicas_check_basic(pop))
goto err_replicas_check_basic;
}
/*
* before runtime initialization lanes are unavailable, remote persists
* should use RLANE_DEFAULT
*/
pop->lanes_desc.runtime_nlanes = 0;
#if VG_MEMCHECK_ENABLED
pop->vg_boot = boot;
#endif
/* initialize runtime parts - lanes, obj stores, ... */
if (obj_runtime_init(pop, 0, boot, runtime_nlanes) != 0) {
ERR("pool initialization failed");
goto err_runtime_init;
}
#if VG_MEMCHECK_ENABLED
if (boot)
obj_vg_boot(pop);
#endif
util_poolset_fdclose(set);
LOG(3, "pop %p", pop);
return pop;
err_runtime_init:
err_replicas_check_basic:
err_check_basic:
err_descr_check:
obj_replicas_fini(set);
replicas_init:
obj_pool_close(set);
return NULL;
}
/*
* pmemobj_openU -- open a transactional memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMobjpool *
pmemobj_openU(const char *path, const char *layout)
{
LOG(3, "path %s layout %s", path, layout);
return obj_open_common(path, layout, Open_cow ? POOL_OPEN_COW : 0, 1);
}
#ifndef _WIN32
/*
* pmemobj_open -- open a transactional memory pool
*/
PMEMobjpool *
pmemobj_open(const char *path, const char *layout)
{
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_openU(path, layout);
PMEMOBJ_API_END();
return pop;
}
#else
/*
* pmemobj_openW -- open a transactional memory pool
*/
PMEMobjpool *
pmemobj_openW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMobjpool *ret = pmemobj_openU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* obj_replicas_cleanup -- (internal) free resources allocated for replicas
*/
static void
obj_replicas_cleanup(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
PMEMobjpool *pop = rep->part[0].addr;
if (pop->rpp != NULL) {
/*
* remote replica will be closed in util_poolset_close
*/
pop->rpp = NULL;
Free(pop->node_addr);
Free(pop->pool_desc);
}
}
}
/*
* obj_pool_lock_cleanup -- (internal) Destroy any locks or condition
* variables that were allocated at run time
*/
static void
obj_pool_lock_cleanup(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMmutex_internal *nextm;
for (PMEMmutex_internal *m = pop->mutex_head; m != NULL; m = nextm) {
nextm = m->PMEMmutex_next;
LOG(4, "mutex %p *mutex %p", &m->PMEMmutex_lock,
m->PMEMmutex_bsd_mutex_p);
os_mutex_destroy(&m->PMEMmutex_lock);
m->PMEMmutex_next = NULL;
m->PMEMmutex_bsd_mutex_p = NULL;
}
pop->mutex_head = NULL;
PMEMrwlock_internal *nextr;
for (PMEMrwlock_internal *r = pop->rwlock_head; r != NULL; r = nextr) {
nextr = r->PMEMrwlock_next;
LOG(4, "rwlock %p *rwlock %p", &r->PMEMrwlock_lock,
r->PMEMrwlock_bsd_rwlock_p);
os_rwlock_destroy(&r->PMEMrwlock_lock);
r->PMEMrwlock_next = NULL;
r->PMEMrwlock_bsd_rwlock_p = NULL;
}
pop->rwlock_head = NULL;
PMEMcond_internal *nextc;
for (PMEMcond_internal *c = pop->cond_head; c != NULL; c = nextc) {
nextc = c->PMEMcond_next;
LOG(4, "cond %p *cond %p", &c->PMEMcond_cond,
c->PMEMcond_bsd_cond_p);
os_cond_destroy(&c->PMEMcond_cond);
c->PMEMcond_next = NULL;
c->PMEMcond_bsd_cond_p = NULL;
}
pop->cond_head = NULL;
}
/*
* obj_pool_cleanup -- (internal) cleanup the pool and unmap
*/
static void
obj_pool_cleanup(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
stats_delete(pop, pop->stats);
tx_params_delete(pop->tx_params);
ctl_delete(pop->ctl);
obj_pool_lock_cleanup(pop);
lane_section_cleanup(pop);
lane_cleanup(pop);
/* unmap all the replicas */
obj_replicas_cleanup(pop->set);
util_poolset_close(pop->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemobj_close -- close a transactional memory pool
*/
void
pmemobj_close(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMOBJ_API_START();
_pobj_cache_invalidate++;
if (cuckoo_remove(pools_ht, pop->uuid_lo) != pop) {
ERR("cuckoo_remove");
}
struct ravl_node *n = ravl_find(pools_tree, pop, RAVL_PREDICATE_EQUAL);
if (n == NULL) {
ERR("ravl_find");
} else {
ravl_remove(pools_tree, n);
}
#ifndef _WIN32
if (_pobj_cached_pool.pop == pop) {
_pobj_cached_pool.pop = NULL;
_pobj_cached_pool.uuid_lo = 0;
}
#else /* _WIN32 */
struct _pobj_pcache *pcache = os_tls_get(Cached_pool_key);
if (pcache != NULL) {
if (pcache->pop == pop) {
pcache->pop = NULL;
pcache->uuid_lo = 0;
}
}
#endif /* _WIN32 */
obj_pool_cleanup(pop);
PMEMOBJ_API_END();
}
/*
* pmemobj_checkU -- transactional memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_checkU(const char *path, const char *layout)
{
LOG(3, "path %s layout %s", path, layout);
PMEMobjpool *pop = obj_open_common(path, layout, POOL_OPEN_COW, 0);
if (pop == NULL)
return -1; /* errno set by obj_open_common() */
int consistent = 1;
/*
* For replicated pools, basic consistency check is performed
* in obj_open_common().
*/
if (pop->replica == NULL)
consistent = obj_check_basic(pop, pop->set->poolsize);
if (consistent && (errno = obj_runtime_init_common(pop)) != 0) {
LOG(3, "!obj_boot");
consistent = 0;
}
if (consistent) {
obj_pool_cleanup(pop);
} else {
stats_delete(pop, pop->stats);
tx_params_delete(pop->tx_params);
ctl_delete(pop->ctl);
/* unmap all the replicas */
obj_replicas_cleanup(pop->set);
util_poolset_close(pop->set, DO_NOT_DELETE_PARTS);
}
if (consistent)
LOG(4, "pool consistency check OK");
return consistent;
}
#ifndef _WIN32
/*
* pmemobj_check -- transactional memory pool consistency check
*/
int
pmemobj_check(const char *path, const char *layout)
{
PMEMOBJ_API_START();
int ret = pmemobj_checkU(path, layout);
PMEMOBJ_API_END();
return ret;
}
#else
/*
* pmemobj_checkW -- transactional memory pool consistency check
*/
int
pmemobj_checkW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return -1;
}
}
int ret = pmemobj_checkU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemobj_pool_by_oid -- returns the pool handle associated with the oid
*/
PMEMobjpool *
pmemobj_pool_by_oid(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
/* XXX this is a temporary fix, to be fixed properly later */
if (pools_ht == NULL)
return NULL;
return cuckoo_get(pools_ht, oid.pool_uuid_lo);
}
/*
* pmemobj_pool_by_ptr -- returns the pool handle associated with the address
*/
PMEMobjpool *
pmemobj_pool_by_ptr(const void *addr)
{
LOG(3, "addr %p", addr);
/* fast path for transactions */
PMEMobjpool *pop = tx_get_pop();
if ((pop != NULL) && OBJ_PTR_FROM_POOL(pop, addr))
return pop;
/* XXX this is a temporary fix, to be fixed properly later */
if (pools_tree == NULL)
return NULL;
struct ravl_node *n = ravl_find(pools_tree, addr,
RAVL_PREDICATE_LESS_EQUAL);
if (n == NULL)
return NULL;
pop = ravl_data(n);
size_t pool_size = pop->heap_offset + pop->heap_size;
if ((char *)addr >= (char *)pop + pool_size)
return NULL;
return pop;
}
/* arguments for constructor_alloc */
struct constr_args {
int zero_init;
pmemobj_constr constructor;
void *arg;
};
/*
* constructor_alloc -- (internal) constructor for obj_alloc_construct
*/
static int
constructor_alloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
struct pmem_ops *p_ops = &pop->p_ops;
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct constr_args *carg = arg;
if (carg->zero_init)
pmemops_memset(p_ops, ptr, 0, usable_size, 0);
int ret = 0;
if (carg->constructor)
ret = carg->constructor(pop, ptr, carg->arg);
return ret;
}
/*
* obj_alloc_construct -- (internal) allocates a new object with constructor
*/
static int
obj_alloc_construct(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
type_num_t type_num, uint64_t flags,
pmemobj_constr constructor, void *arg)
{
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return -1;
}
struct constr_args carg;
carg.zero_init = flags & POBJ_FLAG_ZERO;
carg.constructor = constructor;
carg.arg = arg;
struct operation_context *ctx = pmalloc_operation_hold(pop);
if (oidp)
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
int ret = palloc_operation(&pop->heap, 0,
oidp != NULL ? &oidp->off : NULL, size,
constructor_alloc, &carg, type_num, 0,
CLASS_ID_FROM_FLAG(flags),
ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmemobj_alloc -- allocates a new object
*/
int
pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx constructor %p arg %p",
pop, oidp, size, (unsigned long long)type_num,
constructor, arg);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num,
0, constructor, arg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_xalloc -- allocates with flags
*/
int
pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, uint64_t flags,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx flags %llx "
"constructor %p arg %p",
pop, oidp, size, (unsigned long long)type_num,
(unsigned long long)flags,
constructor, arg);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_TX_XALLOC_VALID_FLAGS);
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num,
flags, constructor, arg);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_realloc and constructor_zrealloc */
struct carg_realloc {
void *ptr;
size_t old_size;
size_t new_size;
int zero_init;
type_num_t user_type;
pmemobj_constr constructor;
void *arg;
};
/*
* pmemobj_zalloc -- allocates a new zeroed object
*/
int
pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx",
pop, oidp, size, (unsigned long long)type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num, POBJ_FLAG_ZERO,
NULL, NULL);
PMEMOBJ_API_END();
return ret;
}
/*
* obj_free -- (internal) free an object
*/
static void
obj_free(PMEMobjpool *pop, PMEMoid *oidp)
{
ASSERTne(oidp, NULL);
struct operation_context *ctx = pmalloc_operation_hold(pop);
operation_add_entry(ctx, &oidp->pool_uuid_lo, 0, ULOG_OPERATION_SET);
palloc_operation(&pop->heap, oidp->off, &oidp->off, 0, NULL, NULL,
0, 0, 0, ctx);
pmalloc_operation_release(pop);
}
/*
* constructor_realloc -- (internal) constructor for pmemobj_realloc
*/
static int
constructor_realloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
struct pmem_ops *p_ops = &pop->p_ops;
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_realloc *carg = arg;
if (!carg->zero_init)
return 0;
if (usable_size > carg->old_size) {
size_t grow_len = usable_size - carg->old_size;
void *new_data_ptr = (void *)((uintptr_t)ptr + carg->old_size);
pmemops_memset(p_ops, new_data_ptr, 0, grow_len, 0);
}
return 0;
}
/*
* obj_realloc_common -- (internal) common routine for resizing
* existing objects
*/
static int
obj_realloc_common(PMEMobjpool *pop,
PMEMoid *oidp, size_t size, type_num_t type_num, int zero_init)
{
/* if OID is NULL just allocate memory */
if (OBJ_OID_IS_NULL(*oidp)) {
/* if size is 0 - do nothing */
if (size == 0)
return 0;
return obj_alloc_construct(pop, oidp, size, type_num,
POBJ_FLAG_ZERO, NULL, NULL);
}
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return -1;
}
/* if size is 0 just free */
if (size == 0) {
obj_free(pop, oidp);
return 0;
}
struct carg_realloc carg;
carg.ptr = OBJ_OFF_TO_PTR(pop, oidp->off);
carg.new_size = size;
carg.old_size = pmemobj_alloc_usable_size(*oidp);
carg.user_type = type_num;
carg.constructor = NULL;
carg.arg = NULL;
carg.zero_init = zero_init;
struct operation_context *ctx = pmalloc_operation_hold(pop);
int ret = palloc_operation(&pop->heap, oidp->off, &oidp->off,
size, constructor_realloc, &carg, type_num, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* constructor_zrealloc_root -- (internal) constructor for pmemobj_root
*/
static int
constructor_zrealloc_root(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
VALGRIND_ADD_TO_TX(ptr, usable_size);
struct carg_realloc *carg = arg;
constructor_realloc(pop, ptr, usable_size, arg);
int ret = 0;
if (carg->constructor)
ret = carg->constructor(pop, ptr, carg->arg);
VALGRIND_REMOVE_FROM_TX(ptr, usable_size);
return ret;
}
/*
* pmemobj_realloc -- resizes an existing object
*/
int
pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
ASSERTne(oidp, NULL);
LOG(3, "pop %p oid.off 0x%016" PRIx64 " size %zu type_num %" PRIu64,
pop, oidp->off, size, type_num);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
int ret = obj_realloc_common(pop, oidp, size, (type_num_t)type_num, 0);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_zrealloc -- resizes an existing object, any new space is zeroed.
*/
int
pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
ASSERTne(oidp, NULL);
LOG(3, "pop %p oid.off 0x%016" PRIx64 " size %zu type_num %" PRIu64,
pop, oidp->off, size, type_num);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
int ret = obj_realloc_common(pop, oidp, size, (type_num_t)type_num, 1);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_strdup */
struct carg_strdup {
size_t size;
const char *s;
};
/*
* constructor_strdup -- (internal) constructor of pmemobj_strdup
*/
static int
constructor_strdup(PMEMobjpool *pop, void *ptr, void *arg)
{
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_strdup *carg = arg;
/* copy string */
pmemops_memcpy(&pop->p_ops, ptr, carg->s, carg->size, 0);
return 0;
}
/*
* pmemobj_strdup -- allocates a new object with duplicate of the string s.
*/
int
pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p string %s type_num %" PRIu64,
pop, oidp, s, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (NULL == s) {
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
struct carg_strdup carg;
carg.size = (strlen(s) + 1) * sizeof(char);
carg.s = s;
int ret = obj_alloc_construct(pop, oidp, carg.size,
(type_num_t)type_num, 0, constructor_strdup, &carg);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_wcsdup */
struct carg_wcsdup {
size_t size;
const wchar_t *s;
};
/*
* constructor_wcsdup -- (internal) constructor of pmemobj_wcsdup
*/
static int
constructor_wcsdup(PMEMobjpool *pop, void *ptr, void *arg)
{
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_wcsdup *carg = arg;
/* copy string */
pmemops_memcpy(&pop->p_ops, ptr, carg->s, carg->size, 0);
return 0;
}
/*
* pmemobj_wcsdup -- allocates a new object with duplicate of the wide character
* string s.
*/
int
pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p string %S type_num %" PRIu64,
pop, oidp, s, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (NULL == s) {
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
struct carg_wcsdup carg;
carg.size = (wcslen(s) + 1) * sizeof(wchar_t);
carg.s = s;
int ret = obj_alloc_construct(pop, oidp, carg.size,
(type_num_t)type_num, 0, constructor_wcsdup, &carg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_free -- frees an existing object
*/
void
pmemobj_free(PMEMoid *oidp)
{
ASSERTne(oidp, NULL);
LOG(3, "oid.off 0x%016" PRIx64, oidp->off);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (oidp->off == 0)
return;
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_pool_by_oid(*oidp);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
obj_free(pop, oidp);
PMEMOBJ_API_END();
}
/*
* pmemobj_alloc_usable_size -- returns usable size of object
*/
size_t
pmemobj_alloc_usable_size(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
if (oid.off == 0)
return 0;
PMEMobjpool *pop = pmemobj_pool_by_oid(oid);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, oid));
return (palloc_usable_size(&pop->heap, oid.off));
}
/*
* pmemobj_memcpy_persist -- pmemobj version of memcpy
*/
void *
pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src,
size_t len)
{
LOG(15, "pop %p dest %p src %p len %zu", pop, dest, src, len);
PMEMOBJ_API_START();
void *ptr = pmemops_memcpy(&pop->p_ops, dest, src, len, 0);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memset_persist -- pmemobj version of memset
*/
void *
pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len)
{
LOG(15, "pop %p dest %p c 0x%02x len %zu", pop, dest, c, len);
PMEMOBJ_API_START();
void *ptr = pmemops_memset(&pop->p_ops, dest, c, len, 0);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memcpy -- pmemobj version of memcpy
*/
void *
pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memcpy(&pop->p_ops, dest, src, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memmove -- pmemobj version of memmove
*/
void *
pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memmove(&pop->p_ops, dest, src, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memset -- pmemobj version of memset
*/
void *
pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags)
{
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memset(&pop->p_ops, dest, c, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_persist -- pmemobj version of pmem_persist
*/
void
pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pmemops_persist(&pop->p_ops, addr, len);
}
/*
* pmemobj_flush -- pmemobj version of pmem_flush
*/
void
pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pmemops_flush(&pop->p_ops, addr, len);
}
/*
* pmemobj_xpersist -- pmemobj version of pmem_persist with additional flags
* argument
*/
int
pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
if (flags & ~OBJ_X_VALID_FLAGS) {
errno = EINVAL;
ERR("invalid flags 0x%x", flags);
return -1;
}
return pmemops_xpersist(&pop->p_ops, addr, len, flags);
}
/*
* pmemobj_xflush -- pmemobj version of pmem_flush with additional flags
* argument
*/
int
pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
if (flags & ~OBJ_X_VALID_FLAGS) {
errno = EINVAL;
ERR("invalid flags 0x%x", flags);
return -1;
}
return pmemops_xflush(&pop->p_ops, addr, len, flags);
}
/*
* pmemobj_drain -- pmemobj version of pmem_drain
*/
void
pmemobj_drain(PMEMobjpool *pop)
{
LOG(15, "pop %p", pop);
pmemops_drain(&pop->p_ops);
}
/*
* pmemobj_type_num -- returns type number of object
*/
uint64_t
pmemobj_type_num(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
ASSERT(!OID_IS_NULL(oid));
PMEMobjpool *pop = pmemobj_pool_by_oid(oid);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, oid));
return palloc_extra(&pop->heap, oid.off);
}
/* arguments for constructor_alloc_root */
struct carg_root {
size_t size;
pmemobj_constr constructor;
void *arg;
};
/*
* obj_realloc_root -- (internal) reallocate root object
*/
static int
obj_alloc_root(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p size %zu", pop, size);
struct carg_realloc carg;
carg.ptr = OBJ_OFF_TO_PTR(pop, pop->root_offset);
carg.old_size = pop->root_size;
carg.new_size = size;
carg.user_type = POBJ_ROOT_TYPE_NUM;
carg.constructor = constructor;
carg.zero_init = 1;
carg.arg = arg;
struct operation_context *ctx = pmalloc_operation_hold(pop);
operation_add_entry(ctx, &pop->root_size, size, ULOG_OPERATION_SET);
int ret = palloc_operation(&pop->heap, pop->root_offset,
&pop->root_offset, size,
constructor_zrealloc_root, &carg,
POBJ_ROOT_TYPE_NUM, OBJ_INTERNAL_OBJECT_MASK, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmemobj_root_size -- returns size of the root object
*/
size_t
pmemobj_root_size(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if (pop->root_offset && pop->root_size) {
return pop->root_size;
} else
return 0;
}
/*
* pmemobj_root_construct -- returns root object
*/
PMEMoid
pmemobj_root_construct(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p size %zu constructor %p args %p", pop, size, constructor,
arg);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return OID_NULL;
}
if (size == 0 && pop->root_offset == 0) {
ERR("requested size cannot equals zero");
errno = EINVAL;
return OID_NULL;
}
PMEMOBJ_API_START();
PMEMoid root;
pmemobj_mutex_lock_nofail(pop, &pop->rootlock);
if (size > pop->root_size &&
obj_alloc_root(pop, size, constructor, arg)) {
pmemobj_mutex_unlock_nofail(pop, &pop->rootlock);
LOG(2, "obj_realloc_root failed");
PMEMOBJ_API_END();
return OID_NULL;
}
root.pool_uuid_lo = pop->uuid_lo;
root.off = pop->root_offset;
pmemobj_mutex_unlock_nofail(pop, &pop->rootlock);
PMEMOBJ_API_END();
return root;
}
/*
* pmemobj_root -- returns root object
*/
PMEMoid
pmemobj_root(PMEMobjpool *pop, size_t size)
{
LOG(3, "pop %p size %zu", pop, size);
PMEMOBJ_API_START();
PMEMoid oid = pmemobj_root_construct(pop, size, NULL, NULL);
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_first - returns first object of specified type
*/
PMEMoid
pmemobj_first(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMoid ret = {0, 0};
uint64_t off = palloc_first(&pop->heap);
if (off != 0) {
ret.off = off;
ret.pool_uuid_lo = pop->uuid_lo;
if (palloc_flags(&pop->heap, off) & OBJ_INTERNAL_OBJECT_MASK) {
return pmemobj_next(ret);
}
}
return ret;
}
/*
* pmemobj_next - returns next object of specified type
*/
PMEMoid
pmemobj_next(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
if (oid.off == 0)
return OID_NULL;
PMEMobjpool *pop = pmemobj_pool_by_oid(oid);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, oid));
PMEMoid ret = {0, 0};
uint64_t off = palloc_next(&pop->heap, oid.off);
if (off != 0) {
ret.off = off;
ret.pool_uuid_lo = pop->uuid_lo;
if (palloc_flags(&pop->heap, off) & OBJ_INTERNAL_OBJECT_MASK) {
return pmemobj_next(ret);
}
}
return ret;
}
/*
* pmemobj_reserve -- reserves a single object
*/
PMEMoid
pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num)
{
LOG(3, "pop %p act %p size %zu type_num %llx",
pop, act, size,
(unsigned long long)type_num);
PMEMOBJ_API_START();
PMEMoid oid = OID_NULL;
if (palloc_reserve(&pop->heap, size, NULL, NULL, type_num,
0, 0, act) != 0) {
PMEMOBJ_API_END();
return oid;
}
oid.off = act->heap.offset;
oid.pool_uuid_lo = pop->uuid_lo;
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_xreserve -- reserves a single object
*/
PMEMoid
pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num, uint64_t flags)
{
LOG(3, "pop %p act %p size %zu type_num %llx flags %llx",
pop, act, size,
(unsigned long long)type_num, (unsigned long long)flags);
PMEMoid oid = OID_NULL;
if (flags & ~POBJ_ACTION_XRESERVE_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_ACTION_XRESERVE_VALID_FLAGS);
errno = EINVAL;
return oid;
}
PMEMOBJ_API_START();
struct constr_args carg;
carg.zero_init = flags & POBJ_FLAG_ZERO;
carg.constructor = NULL;
carg.arg = NULL;
if (palloc_reserve(&pop->heap, size, constructor_alloc, &carg,
type_num, 0, CLASS_ID_FROM_FLAG(flags), act) != 0) {
PMEMOBJ_API_END();
return oid;
}
oid.off = act->heap.offset;
oid.pool_uuid_lo = pop->uuid_lo;
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_set_value -- creates an action to set a value
*/
void
pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act,
uint64_t *ptr, uint64_t value)
{
palloc_set_value(&pop->heap, act, ptr, value);
}
/*
* pmemobj_defer_free -- creates a deferred free action
*/
void
pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act)
{
ASSERT(!OID_IS_NULL(oid));
palloc_defer_free(&pop->heap, oid.off, act);
}
/*
* pmemobj_publish -- publishes a collection of actions
*/
int
pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt)
{
PMEMOBJ_API_START();
struct operation_context *ctx = pmalloc_operation_hold(pop);
size_t entries_size = actvcnt * sizeof(struct ulog_entry_val);
if (operation_reserve(ctx, entries_size) != 0) {
PMEMOBJ_API_END();
return -1;
}
palloc_publish(&pop->heap, actv, actvcnt, ctx);
pmalloc_operation_release(pop);
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_cancel -- cancels collection of actions
*/
void
pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt)
{
PMEMOBJ_API_START();
palloc_cancel(&pop->heap, actv, actvcnt);
PMEMOBJ_API_END();
}
/*
* pmemobj_list_insert -- adds object to a list
*/
int
pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, "pop %p pe_offset %zu head %p dest.off 0x%016" PRIx64
" before %d oid.off 0x%016" PRIx64,
pop, pe_offset, head, dest.off, before, oid.off);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
int ret = list_insert(pop, (ssize_t)pe_offset, head, dest, before, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_list_insert_new -- adds new object to a list
*/
PMEMoid
pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, size_t size,
uint64_t type_num,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p pe_offset %zu head %p dest.off 0x%016" PRIx64
" before %d size %zu type_num %" PRIu64,
pop, pe_offset, head, dest.off, before, size, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_offset <= size - sizeof(struct list_entry));
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return OID_NULL;
}
PMEMOBJ_API_START();
struct constr_args carg;
carg.constructor = constructor;
carg.arg = arg;
carg.zero_init = 0;
PMEMoid retoid = OID_NULL;
list_insert_new_user(pop, pe_offset, head, dest, before, size, type_num,
constructor_alloc, &carg, &retoid);
PMEMOBJ_API_END();
return retoid;
}
/*
* pmemobj_list_remove -- removes object from a list
*/
int
pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid oid, int free)
{
LOG(3, "pop %p pe_offset %zu head %p oid.off 0x%016" PRIx64 " free %d",
pop, pe_offset, head, oid.off, free);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
int ret;
if (free)
ret = list_remove_free_user(pop, pe_offset, head, &oid);
else
ret = list_remove(pop, (ssize_t)pe_offset, head, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_list_move -- moves object between lists
*/
int
pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old,
size_t pe_new_offset, void *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, "pop %p pe_old_offset %zu pe_new_offset %zu"
" head_old %p head_new %p dest.off 0x%016" PRIx64
" before %d oid.off 0x%016" PRIx64 "",
pop, pe_old_offset, pe_new_offset,
head_old, head_new, dest.off, before, oid.off);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_old_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
ASSERT(pe_new_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
ASSERT(pe_old_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_new_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
int ret = list_move(pop, pe_old_offset, head_old,
pe_new_offset, head_new,
dest, before, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemobj_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemobj_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemobj_ctl_get -- programmatically executes a read ctl query
*/
int
pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg)
{
return pmemobj_ctl_getU(pop, name, arg);
}
/*
* pmemobj_ctl_set -- programmatically executes a write ctl query
*/
int
pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg)
{
PMEMOBJ_API_START();
int ret = pmemobj_ctl_setU(pop, name, arg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg)
{
PMEMOBJ_API_START();
int ret = pmemobj_ctl_execU(pop, name, arg);
PMEMOBJ_API_END();
return ret;
}
#else
/*
* pmemobj_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_getU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemobj_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_setU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemobj_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_execU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
/*
* _pobj_debug_notice -- logs notice message if used inside a transaction
*/
void
_pobj_debug_notice(const char *api_name, const char *file, int line)
{
#ifdef DEBUG
if (pmemobj_tx_stage() != TX_STAGE_NONE) {
if (file)
LOG(4, "Notice: non-transactional API"
" used inside a transaction (%s in %s:%d)",
api_name, file, line);
else
LOG(4, "Notice: non-transactional API"
" used inside a transaction (%s)", api_name);
}
#endif /* DEBUG */
}
| 76,103 | 21.562704 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/list.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 2,891 | 29.765957 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memops.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memops.c -- aggregated memory operations helper implementation
*
* The operation collects all of the required memory modifications that
* need to happen in an atomic way (all of them or none), and abstracts
* away the storage type (transient/persistent) and the underlying
* implementation of how it's actually performed - in some cases using
* the redo log is unnecessary and the allocation process can be sped up
* a bit by completely omitting that whole machinery.
*
* The modifications are not visible until the context is processed.
*/
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define ULOG_BASE_SIZE 1024
#define OP_MERGE_SEARCH 64
struct operation_log {
size_t capacity; /* capacity of the ulog log */
size_t offset; /* data offset inside of the log */
struct ulog *ulog; /* DRAM allocated log of modifications */
};
/*
* operation_context -- context of an ongoing palloc operation
*/
struct operation_context {
enum log_type type;
ulog_extend_fn extend; /* function to allocate next ulog */
ulog_free_fn ulog_free; /* function to free next ulogs */
const struct pmem_ops *p_ops;
struct pmem_ops t_ops; /* used for transient data processing */
struct pmem_ops s_ops; /* used for shadow copy data processing */
size_t ulog_curr_offset; /* offset in the log for buffer stores */
size_t ulog_curr_capacity; /* capacity of the current log */
struct ulog *ulog_curr; /* current persistent log */
size_t total_logged; /* total amount of buffer stores in the logs */
struct ulog *ulog; /* pointer to the persistent ulog log */
size_t ulog_base_nbytes; /* available bytes in initial ulog log */
size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */
struct ulog_next next; /* vector of 'next' fields of persistent ulog */
int in_progress; /* operation sanity check */
struct operation_log pshadow_ops; /* shadow copy of persistent ulog */
struct operation_log transient_ops; /* log of transient changes */
/* collection used to look for potential merge candidates */
VECQ(, struct ulog_entry_val *) merge_entries;
};
/*
* operation_log_transient_init -- (internal) initialize operation log
* containing transient memory resident changes
*/
static int
operation_log_transient_init(struct operation_log *log)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ULOG_BASE_SIZE;
log->ulog = src;
return 0;
}
/*
* operation_log_persistent_init -- (internal) initialize operation log
* containing persistent memory resident changes
*/
static int
operation_log_persistent_init(struct operation_log *log,
size_t ulog_base_nbytes)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ulog_base_nbytes;
memset(src->unused, 0, sizeof(src->unused));
log->ulog = src;
return 0;
}
/*
* operation_transient_clean -- cleans pmemcheck address state
*/
static int
operation_transient_clean(void *base, const void *addr, size_t len,
unsigned flags)
{
VALGRIND_SET_CLEAN(addr, len);
return 0;
}
/*
* operation_transient_memcpy -- transient memcpy wrapper
*/
static void *
operation_transient_memcpy(void *base, void *dest, const void *src, size_t len,
unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* operation_new -- creates new operation context
*/
struct operation_context *
operation_new(struct ulog *ulog, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type)
{
struct operation_context *ctx = Zalloc(sizeof(*ctx));
if (ctx == NULL) {
ERR("!Zalloc");
goto error_ctx_alloc;
}
ctx->ulog = ulog;
ctx->ulog_base_nbytes = ulog_base_nbytes;
ctx->ulog_capacity = ulog_capacity(ulog,
ulog_base_nbytes, p_ops);
ctx->extend = extend;
ctx->ulog_free = ulog_free;
ctx->in_progress = 0;
VEC_INIT(&ctx->next);
ulog_rebuild_next_vec(ulog, &ctx->next, p_ops);
ctx->p_ops = p_ops;
ctx->type = type;
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->t_ops.base = NULL;
ctx->t_ops.flush = operation_transient_clean;
ctx->t_ops.memcpy = operation_transient_memcpy;
ctx->s_ops.base = p_ops->base;
ctx->s_ops.flush = operation_transient_clean;
ctx->s_ops.memcpy = operation_transient_memcpy;
VECQ_INIT(&ctx->merge_entries);
if (operation_log_transient_init(&ctx->transient_ops) != 0)
goto error_ulog_alloc;
if (operation_log_persistent_init(&ctx->pshadow_ops,
ulog_base_nbytes) != 0)
goto error_ulog_alloc;
return ctx;
error_ulog_alloc:
operation_delete(ctx);
error_ctx_alloc:
return NULL;
}
/*
* operation_delete -- deletes operation context
*/
void
operation_delete(struct operation_context *ctx)
{
VECQ_DELETE(&ctx->merge_entries);
VEC_DELETE(&ctx->next);
Free(ctx->pshadow_ops.ulog);
Free(ctx->transient_ops.ulog);
Free(ctx);
}
/*
* operation_merge -- (internal) performs operation on a field
*/
static inline void
operation_merge(struct ulog_entry_base *entry, uint64_t value,
ulog_operation_type type)
{
struct ulog_entry_val *e = (struct ulog_entry_val *)entry;
switch (type) {
case ULOG_OPERATION_AND:
e->value &= value;
break;
case ULOG_OPERATION_OR:
e->value |= value;
break;
case ULOG_OPERATION_SET:
e->value = value;
break;
default:
ASSERT(0); /* unreachable */
}
}
/*
* operation_try_merge_entry -- tries to merge the incoming log entry with
* existing entries
*
* Because this requires a reverse foreach, it cannot be implemented using
* the on-media ulog log structure since there's no way to find what's
* the previous entry in the log. Instead, the last N entries are stored
* in a collection and traversed backwards.
*/
static int
operation_try_merge_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type)
{
int ret = 0;
uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr);
struct ulog_entry_val *e;
VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) {
if (ulog_entry_offset(&e->base) == offset) {
if (ulog_entry_type(&e->base) == type) {
operation_merge(&e->base, value, type);
return 1;
} else {
break;
}
}
}
return ret;
}
/*
* operation_merge_entry_add -- adds a new entry to the merge collection,
* keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion.
*/
static void
operation_merge_entry_add(struct operation_context *ctx,
struct ulog_entry_val *entry)
{
if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH)
(void) VECQ_DEQUEUE(&ctx->merge_entries);
if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) {
/* this is fine, only runtime perf will get slower */
LOG(2, "out of memory - unable to track entries");
}
}
/*
* operation_add_typed_value -- adds new entry to the current operation, if the
* same ptr address already exists and the operation type is set,
* the new value is not added and the function has no effect.
*/
int
operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type)
{
struct operation_log *oplog = log_type == LOG_PERSISTENT ?
&ctx->pshadow_ops : &ctx->transient_ops;
/*
* Always make sure to have one extra spare cacheline so that the
* ulog log entry creation has enough room for zeroing.
*/
if (oplog->offset + CACHELINE_SIZE == oplog->capacity) {
size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE;
struct ulog *ulog = Realloc(oplog->ulog,
SIZEOF_ULOG(ncapacity));
if (ulog == NULL)
return -1;
oplog->capacity += ULOG_BASE_SIZE;
oplog->ulog = ulog;
/*
* Realloc invalidated the ulog entries that are inside of this
* vector, need to clear it to avoid use after free.
*/
VECQ_CLEAR(&ctx->merge_entries);
}
if (log_type == LOG_PERSISTENT &&
operation_try_merge_entry(ctx, ptr, value, type) != 0)
return 0;
struct ulog_entry_val *entry = ulog_entry_val_create(
oplog->ulog, oplog->offset, ptr, value, type,
log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops);
if (log_type == LOG_PERSISTENT)
operation_merge_entry_add(ctx, entry);
oplog->offset += ulog_entry_size(&entry->base);
return 0;
}
/*
* operation_add_value -- adds new entry to the current operation with
* entry type autodetected based on the memory location
*/
int
operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value,
ulog_operation_type type)
{
const struct pmem_ops *p_ops = ctx->p_ops;
PMEMobjpool *pop = (PMEMobjpool *)p_ops->base;
int from_pool = OBJ_OFF_IS_VALID(pop,
(uintptr_t)ptr - (uintptr_t)p_ops->base);
return operation_add_typed_entry(ctx, ptr, value, type,
from_pool ? LOG_PERSISTENT : LOG_TRANSIENT);
}
/*
* operation_add_buffer -- adds a buffer operation to the log
*/
int
operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type)
{
size_t real_size = size + sizeof(struct ulog_entry_buf);
/* if there's no space left in the log, reserve some more */
if (ctx->ulog_curr_capacity == 0) {
if (operation_reserve(ctx, ctx->total_logged + real_size) != 0)
return -1;
ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog :
ulog_next(ctx->ulog_curr, ctx->p_ops);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = ctx->ulog_curr->capacity;
}
size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity);
size_t data_size = curr_size - sizeof(struct ulog_entry_buf);
/* create a persistent log entry */
struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
dest, src, data_size,
type, ctx->p_ops);
size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE);
ASSERT(entry_size == ulog_entry_size(&e->base));
ASSERT(entry_size <= ctx->ulog_curr_capacity);
ctx->total_logged += entry_size;
ctx->ulog_curr_offset += entry_size;
ctx->ulog_curr_capacity -= entry_size;
/*
* Recursively add the data to the log until the entire buffer is
* processed.
*/
return size - data_size == 0 ? 0 : operation_add_buffer(ctx,
(char *)dest + data_size,
(char *)src + data_size,
size - data_size, type);
}
/*
* operation_process_persistent_redo -- (internal) process using ulog
*/
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_store(ctx->ulog, ctx->pshadow_ops.ulog,
ctx->pshadow_ops.offset, ctx->ulog_base_nbytes,
&ctx->next, ctx->p_ops);
ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops);
}
/*
* operation_process_persistent_undo -- (internal) process using ulog
*/
static void
operation_process_persistent_undo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops);
}
/*
* operation_reserve -- (internal) reserves new capacity in persistent ulog log
*/
int
operation_reserve(struct operation_context *ctx, size_t new_capacity)
{
if (new_capacity > ctx->ulog_capacity) {
if (ctx->extend == NULL) {
ERR("no extend function present");
return -1;
}
if (ulog_reserve(ctx->ulog,
ctx->ulog_base_nbytes, &new_capacity, ctx->extend,
&ctx->next, ctx->p_ops) != 0)
return -1;
ctx->ulog_capacity = new_capacity;
}
return 0;
}
/*
* operation_init -- initializes runtime state of an operation
*/
void
operation_init(struct operation_context *ctx)
{
struct operation_log *plog = &ctx->pshadow_ops;
struct operation_log *tlog = &ctx->transient_ops;
VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) +
tlog->capacity);
VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) +
plog->capacity);
tlog->offset = 0;
plog->offset = 0;
VECQ_REINIT(&ctx->merge_entries);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->total_logged = 0;
}
/*
* operation_start -- initializes and starts a new operation
*/
void
operation_start(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->in_progress, 0);
ctx->in_progress = 1;
}
void
operation_resume(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->in_progress, 0);
ctx->in_progress = 1;
ctx->total_logged = ulog_base_nbytes(ctx->ulog);
}
/*
* operation_cancel -- cancels a running operation
*/
void
operation_cancel(struct operation_context *ctx)
{
ASSERTeq(ctx->in_progress, 1);
ctx->in_progress = 0;
}
/*
* operation_process -- processes registered operations
*
* The order of processing is important: persistent, transient.
* This is because the transient entries that reside on persistent memory might
* require write to a location that is currently occupied by a valid persistent
* state but becomes a transient state after operation is processed.
*/
void
operation_process(struct operation_context *ctx)
{
/*
* If there's exactly one persistent entry there's no need to involve
* the redo log. We can simply assign the value, the operation will be
* atomic.
*/
int redo_process = ctx->type == LOG_TYPE_REDO &&
ctx->pshadow_ops.offset != 0;
if (redo_process &&
ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) {
struct ulog_entry_base *e = (struct ulog_entry_base *)
ctx->pshadow_ops.ulog->data;
ulog_operation_type t = ulog_entry_type(e);
if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND ||
t == ULOG_OPERATION_OR) {
ulog_entry_apply(e, 1, ctx->p_ops);
redo_process = 0;
}
}
if (redo_process)
operation_process_persistent_redo(ctx);
else if (ctx->type == LOG_TYPE_UNDO)
operation_process_persistent_undo(ctx);
/* process transient entries with transient memory ops */
if (ctx->transient_ops.offset != 0)
ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops);
}
/*
* operation_finish -- finalizes the operation
*/
void
operation_finish(struct operation_context *ctx)
{
ASSERTeq(ctx->in_progress, 1);
ctx->in_progress = 0;
if (ctx->type == LOG_TYPE_REDO && ctx->pshadow_ops.offset != 0) {
operation_process(ctx);
} else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) {
ulog_clobber_data(ctx->ulog,
ctx->total_logged, ctx->ulog_base_nbytes,
&ctx->next, ctx->ulog_free, ctx->p_ops);
/* clobbering might have shrunk the ulog */
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
}
}
| 16,501 | 27.064626 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/stats.c
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* stats.c -- implementation of statistics
*/
#include "obj.h"
#include "stats.h"
STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated);
static const struct ctl_node CTL_NODE(heap)[] = {
STATS_CTL_LEAF(persistent, curr_allocated),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled
*/
static int
CTL_READ_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->stats->enabled > 0;
return 0;
}
/*
* CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting
*/
static int
CTL_WRITE_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->stats->enabled = arg_in > 0;
return 0;
}
static struct ctl_argument CTL_ARG(enabled) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(stats)[] = {
CTL_CHILD(heap),
CTL_LEAF_RW(enabled),
CTL_NODE_END
};
/*
* stats_new -- allocates and initializes statistics instance
*/
struct stats *
stats_new(PMEMobjpool *pop)
{
struct stats *s = Malloc(sizeof(*s));
s->enabled = 0;
s->persistent = &pop->stats_persistent;
s->transient = Zalloc(sizeof(struct stats_transient));
if (s->transient == NULL)
goto error_transient_alloc;
return s;
error_transient_alloc:
Free(s);
return NULL;
}
/*
* stats_delete -- deletes statistics instance
*/
void
stats_delete(PMEMobjpool *pop, struct stats *s)
{
pmemops_persist(&pop->p_ops, s->persistent,
sizeof(struct stats_persistent));
Free(s->transient);
Free(s);
}
/*
* stats_ctl_register -- registers ctl nodes for statistics
*/
void
stats_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, stats);
}
| 3,424 | 25.145038 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ctl_debug.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl_debug.h -- definitions for the debug CTL namespace
*/
#ifndef LIBPMEMOBJ_CTL_DEBUG_H
#define LIBPMEMOBJ_CTL_DEBUG_H 1
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
void debug_ctl_register(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CTL_DEBUG_H */
| 1,901 | 35.576923 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/heap.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, struct alloc_class *c);
struct bucket *
heap_bucket_acquire_by_id(struct palloc_heap *heap, uint8_t class_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 4,468 | 32.350746 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/list.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* list.c -- implementation of persistent atomic lists module
*/
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "sync.h"
#include "valgrind_internal.h"
#include "memops.h"
#define PREV_OFF (offsetof(struct list_entry, pe_prev) + offsetof(PMEMoid, off))
#define NEXT_OFF (offsetof(struct list_entry, pe_next) + offsetof(PMEMoid, off))
/*
* list_args_common -- common arguments for operations on list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* entry_ptr - list entry structure of element
*/
struct list_args_common {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_entry *entry_ptr;
};
/*
* list_args_insert -- arguments for inserting element to list
*
* head - list head
* dest - destination element OID
* dest_entry_ptr - list entry of destination element
* before - insert before or after destination element
*/
struct list_args_insert {
struct list_head *head;
PMEMoid dest;
struct list_entry *dest_entry_ptr;
int before;
};
/*
* list_args_reinsert -- arguments for reinserting element on list
*
* head - list head
* entry_ptr - list entry of old element
* obj_doffset - offset to element's data relative to pmemobj pool
*/
struct list_args_reinsert {
struct list_head *head;
struct list_entry *entry_ptr;
uint64_t obj_doffset;
};
/*
* list_args_remove -- arguments for removing element from list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* head - list head
* entry_ptr - list entry structure of element
*/
struct list_args_remove {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_head *head;
struct list_entry *entry_ptr;
};
/*
* list_mutexes_lock -- (internal) grab one or two locks in ascending
* address order
*/
static inline int
list_mutexes_lock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2)
return pmemobj_mutex_lock(pop, &head1->lock);
PMEMmutex *lock1;
PMEMmutex *lock2;
if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) {
lock1 = &head1->lock;
lock2 = &head2->lock;
} else {
lock1 = &head2->lock;
lock2 = &head1->lock;
}
int ret;
if ((ret = pmemobj_mutex_lock(pop, lock1)))
goto err;
if ((ret = pmemobj_mutex_lock(pop, lock2)))
goto err_unlock;
return 0;
err_unlock:
pmemobj_mutex_unlock(pop, lock1);
err:
return ret;
}
/*
* list_mutexes_unlock -- (internal) release one or two locks
*/
static inline void
list_mutexes_unlock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2) {
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
return;
}
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
pmemobj_mutex_unlock_nofail(pop, &head2->lock);
}
/*
* list_get_dest -- (internal) return destination object ID
*
* If the input dest is not OID_NULL returns dest.
* If the input dest is OID_NULL and before is set returns first element.
* If the input dest is OID_NULL and before is no set returns last element.
*/
static inline PMEMoid
list_get_dest(PMEMobjpool *pop, struct list_head *head, PMEMoid dest,
ssize_t pe_offset, int before)
{
if (dest.off)
return dest;
if (head->pe_first.off == 0 || !!before == POBJ_LIST_DEST_HEAD)
return head->pe_first;
struct list_entry *first_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)head->pe_first.off + pe_offset));
return first_ptr->pe_prev;
}
/*
* list_set_oid_redo_log -- (internal) set PMEMoid value using redo log
*/
static size_t
list_set_oid_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited)
{
ASSERT(OBJ_PTR_IS_VALID(pop, oidp));
if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) {
if (oidp_inited)
ASSERTeq(oidp->pool_uuid_lo, 0);
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
}
operation_add_entry(ctx, &oidp->off, obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_update_head -- (internal) update pe_first entry in list head
*/
static size_t
list_update_head(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_head *head, uint64_t first_offset)
{
LOG(15, NULL);
operation_add_entry(ctx, &head->pe_first.off, first_offset,
ULOG_OPERATION_SET);
if (head->pe_first.pool_uuid_lo == 0) {
operation_add_entry(ctx, &head->pe_first.pool_uuid_lo,
pop->uuid_lo, ULOG_OPERATION_SET);
}
return 0;
}
/*
* u64_add_offset -- (internal) add signed offset to unsigned integer and check
* for overflows
*/
static void
u64_add_offset(uint64_t *value, ssize_t off)
{
uint64_t prev = *value;
if (off >= 0) {
*value += (size_t)off;
ASSERT(*value >= prev); /* detect overflow */
} else {
*value -= (size_t)-off;
ASSERT(*value < prev);
}
}
/*
* list_fill_entry_persist -- (internal) fill new entry using persist function
*
* Used for newly allocated objects.
*/
static void
list_fill_entry_persist(PMEMobjpool *pop, struct list_entry *entry_ptr,
uint64_t next_offset, uint64_t prev_offset)
{
LOG(15, NULL);
VALGRIND_ADD_TO_TX(entry_ptr, sizeof(*entry_ptr));
entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_next.off = next_offset;
entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_prev.off = prev_offset;
VALGRIND_REMOVE_FROM_TX(entry_ptr, sizeof(*entry_ptr));
pmemops_persist(&pop->p_ops, entry_ptr, sizeof(*entry_ptr));
}
/*
* list_fill_entry_redo_log -- (internal) fill new entry using redo log
*
* Used to update entry in existing object.
*/
static size_t
list_fill_entry_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_common *args,
uint64_t next_offset, uint64_t prev_offset, int set_uuid)
{
LOG(15, NULL);
struct pmem_ops *ops = &pop->p_ops;
ASSERTne(args->entry_ptr, NULL);
ASSERTne(args->obj_doffset, 0);
if (set_uuid) {
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
/* don't need to fill pool uuid using redo log */
args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr));
} else {
ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo);
ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo);
}
/* set current->next and current->prev using redo log */
uint64_t next_off_off = args->obj_doffset + NEXT_OFF;
uint64_t prev_off_off = args->obj_doffset + PREV_OFF;
u64_add_offset(&next_off_off, args->pe_offset);
u64_add_offset(&prev_off_off, args->pe_offset);
void *next_ptr = (char *)pop + next_off_off;
void *prev_ptr = (char *)pop + prev_off_off;
operation_add_entry(ctx, next_ptr, next_offset, ULOG_OPERATION_SET);
operation_add_entry(ctx, prev_ptr, prev_offset, ULOG_OPERATION_SET);
return 0;
}
/*
* list_remove_single -- (internal) remove element from single list
*/
static size_t
list_remove_single(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_remove *args)
{
LOG(15, NULL);
if (args->entry_ptr->pe_next.off == args->obj_doffset) {
/* only one element on list */
ASSERTeq(args->head->pe_first.off, args->obj_doffset);
ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset);
return list_update_head(pop, ctx, args->head, 0);
} else {
/* set next->prev = prev and prev->next = next */
uint64_t next_off = args->entry_ptr->pe_next.off;
uint64_t next_prev_off = next_off + PREV_OFF;
u64_add_offset(&next_prev_off, args->pe_offset);
uint64_t prev_off = args->entry_ptr->pe_prev.off;
uint64_t prev_next_off = prev_off + NEXT_OFF;
u64_add_offset(&prev_next_off, args->pe_offset);
void *prev_ptr = (char *)pop + next_prev_off;
void *next_ptr = (char *)pop + prev_next_off;
operation_add_entry(ctx, prev_ptr, prev_off,
ULOG_OPERATION_SET);
operation_add_entry(ctx, next_ptr, next_off,
ULOG_OPERATION_SET);
if (args->head->pe_first.off == args->obj_doffset) {
/* removing element is the first one */
return list_update_head(pop, ctx,
args->head, next_off);
} else {
return 0;
}
}
}
/*
* list_insert_before -- (internal) insert element at offset before an element
*/
static size_t
list_insert_before(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest and current->prev = dest->prev */
*next_offset = args->dest.off;
*prev_offset = args->dest_entry_ptr->pe_prev.off;
/* dest->prev = current and dest->prev->next = current */
uint64_t dest_prev_off = args->dest.off + PREV_OFF;
u64_add_offset(&dest_prev_off, args_common->pe_offset);
uint64_t dest_prev_next_off = args->dest_entry_ptr->pe_prev.off +
NEXT_OFF;
u64_add_offset(&dest_prev_next_off, args_common->pe_offset);
void *dest_prev_ptr = (char *)pop + dest_prev_off;
void *dest_prev_next_ptr = (char *)pop + dest_prev_next_off;
operation_add_entry(ctx, dest_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_prev_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_after -- (internal) insert element at offset after an element
*/
static size_t
list_insert_after(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest->next and current->prev = dest */
*next_offset = args->dest_entry_ptr->pe_next.off;
*prev_offset = args->dest.off;
/* dest->next = current and dest->next->prev = current */
uint64_t dest_next_off = args->dest.off + NEXT_OFF;
u64_add_offset(&dest_next_off, args_common->pe_offset);
uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off +
PREV_OFF;
u64_add_offset(&dest_next_prev_off, args_common->pe_offset);
void *dest_next_ptr = (char *)pop + dest_next_off;
void *dest_next_prev_ptr = (char *)pop + dest_next_prev_off;
operation_add_entry(ctx, dest_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_next_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_user -- (internal) insert element at offset to a user list
*/
static size_t
list_insert_user(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
if (args->dest.off == 0) {
/* inserting the first element on list */
ASSERTeq(args->head->pe_first.off, 0);
/* set loop on current element */
*next_offset = args_common->obj_doffset;
*prev_offset = args_common->obj_doffset;
/* update head */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
} else {
if (args->before) {
/* inserting before dest */
list_insert_before(pop, ctx, args, args_common,
next_offset, prev_offset);
if (args->dest.off == args->head->pe_first.off) {
/* current element at first position */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
}
} else {
/* inserting after dest */
list_insert_after(pop, ctx, args, args_common,
next_offset, prev_offset);
}
}
return 0;
}
/*
* list_insert_new -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, must be locked if not NULL
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
static int
list_insert_new(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
int ret;
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct pobj_action reserved;
if (palloc_reserve(&pop->heap, size, constructor, arg,
type_num, 0, 0, &reserved) != 0) {
ERR("!palloc_reserve");
ret = -1;
goto err_pmalloc;
}
uint64_t obj_doffset = reserved.heap.offset;
struct operation_context *ctx = lane->external;
operation_start(ctx);
ASSERT((ssize_t)pe_offset >= 0);
dest = list_get_dest(pop, user_head, dest,
(ssize_t)pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset);
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = user_head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = obj_doffset,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop,
ctx, &args, &args_common,
&next_offset, &prev_offset);
/* don't need to use redo log for filling new element */
list_fill_entry_persist(pop, entry_ptr,
next_offset, prev_offset);
if (oidp != NULL) {
if (OBJ_PTR_IS_VALID(pop, oidp)) {
list_set_oid_redo_log(pop, ctx,
oidp, obj_doffset, 0);
} else {
oidp->off = obj_doffset;
oidp->pool_uuid_lo = pop->uuid_lo;
}
}
palloc_publish(&pop->heap, &reserved, 1, ctx);
ret = 0;
err_pmalloc:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert_new_user -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
int
list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
ret = list_insert_new(pop, pe_offset, user_head,
dest, before, size, type_num, constructor, arg, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert -- insert object to a single list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid dest, int before,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
struct lane *lane;
lane_hold(pop, &lane);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head, dest, pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)oid.off + pe_offset));
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)dest.off + pe_offset));
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop, ctx,
&args, &args_common, &next_offset, &prev_offset);
/* fill entry of existing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, 1);
operation_finish(ctx);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_remove_free -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, *must* be locked if not NULL
* oidp - pointer to target object ID
*/
static void
list_remove_free(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct pobj_action deferred;
palloc_defer_free(&pop->heap, oidp->off, &deferred);
uint64_t obj_doffset = oidp->off;
ASSERT((ssize_t)pe_offset >= 0);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = user_head,
.entry_ptr = entry_ptr,
.obj_doffset = obj_doffset
};
/* remove from user list */
list_remove_single(pop, ctx, &args);
/* clear the oid */
if (OBJ_PTR_IS_VALID(pop, oidp))
list_set_oid_redo_log(pop, ctx, oidp, 0, 1);
else
oidp->off = 0;
palloc_publish(&pop->heap, &deferred, 1, ctx);
lane_release(pop);
}
/*
* list_remove_free_user -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* oidp - pointer to target object ID
*/
int
list_remove_free_user(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
list_remove_free(pop, pe_offset, user_head, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
return 0;
}
/*
* list_remove -- remove object from list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* oid - target object ID
*/
int
list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + (size_t)pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = head,
.entry_ptr = entry_ptr,
.obj_doffset = oid.off,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
/* remove element from user list */
list_remove_single(pop, ctx, &args);
/* clear next and prev offsets in removing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, 0, 0, 0);
operation_finish(ctx);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_move -- move object between two lists
*
* pop - pmemobj handle
* pe_offset_old - offset to old list entry relative to user data
* head_old - old list head
* pe_offset_new - offset to new list entry relative to user data
* head_new - new list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head_old, NULL);
ASSERTne(head_new, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
/*
* Grab locks in specified order to avoid dead-locks.
*
* XXX performance improvement: initialize oob locks at pool opening
*/
if ((ret = list_mutexes_lock(pop, head_new, head_old))) {
errno = ret;
LOG(2, "list_mutexes_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head_new, dest,
(ssize_t)pe_offset_new, before);
struct list_entry *entry_ptr_old =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_old);
struct list_entry *entry_ptr_new =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_new);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset_new);
if (head_old == head_new) {
/* moving within the same list */
if (dest.off == oid.off)
goto unlock;
if (before && dest_entry_ptr->pe_prev.off == oid.off) {
if (head_old->pe_first.off != dest.off)
goto unlock;
list_update_head(pop, ctx,
head_old, oid.off);
goto redo_last;
}
if (!before && dest_entry_ptr->pe_next.off == oid.off) {
if (head_old->pe_first.off != oid.off)
goto unlock;
list_update_head(pop, ctx,
head_old, entry_ptr_old->pe_next.off);
goto redo_last;
}
}
ASSERT((ssize_t)pe_offset_old >= 0);
struct list_args_remove args_remove = {
.pe_offset = (ssize_t)pe_offset_old,
.head = head_old,
.entry_ptr = entry_ptr_old,
.obj_doffset = oid.off,
};
struct list_args_insert args_insert = {
.head = head_new,
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.before = before,
};
ASSERT((ssize_t)pe_offset_new >= 0);
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr_new,
.pe_offset = (ssize_t)pe_offset_new,
};
uint64_t next_offset;
uint64_t prev_offset;
/* remove element from user list */
list_remove_single(pop, ctx, &args_remove);
/* insert element to user list */
list_insert_user(pop, ctx, &args_insert,
&args_common, &next_offset, &prev_offset);
/* offsets differ, move is between different list entries - set uuid */
int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0;
/* fill next and prev offsets of moving element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, set_uuid);
redo_last:
unlock:
operation_finish(ctx);
list_mutexes_unlock(pop, head_new, head_old);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
| 25,726 | 25.604964 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/palloc.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* palloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*
* Lock ordering in the entirety of the allocator is simple, but might be hard
* to follow at times because locks are, by necessity, externalized.
* There are two sets of locks that need to be taken into account:
* - runtime state locks, represented by buckets.
* - persistent state locks, represented by memory block mutexes.
*
* To properly use them, follow these rules:
* - When nesting, always lock runtime state first.
* Doing the reverse might cause deadlocks in other parts of the code.
*
* - When introducing functions that would require runtime state locks,
* always try to move the lock acquiring to the upper most layer. This
* usually means that the functions will simply take "struct bucket" as
* their argument. By doing so most of the locking can happen in
* the frontend part of the allocator and it's easier to follow the first
* rule because all functions in the backend can safely use the persistent
* state locks - the runtime lock, if it is needed, will be already taken
* by the upper layer.
*/
#include "valgrind_internal.h"
#include "heap_layout.h"
#include "heap.h"
#include "alloc_class.h"
#include "out.h"
#include "sys_util.h"
#include "palloc.h"
struct pobj_action_internal {
/* type of operation (alloc/free vs set) */
enum pobj_action_type type;
/* not used */
uint32_t padding;
/*
* Action-specific lock that needs to be taken for the duration of
* an action.
*/
os_mutex_t *lock;
/* action-specific data */
union {
/* valid only when type == POBJ_ACTION_TYPE_HEAP */
struct {
uint64_t offset;
enum memblock_state new_state;
struct memory_block m;
int *resvp;
};
/* valid only when type == POBJ_ACTION_TYPE_MEM */
struct {
uint64_t *ptr;
uint64_t value;
};
/* padding, not used */
uint64_t data2[14];
};
};
/*
* palloc_set_value -- creates a new set memory action
*/
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value)
{
act->type = POBJ_ACTION_TYPE_MEM;
struct pobj_action_internal *actp = (struct pobj_action_internal *)act;
actp->ptr = ptr;
actp->value = value;
actp->lock = NULL;
}
/*
* alloc_prep_block -- (internal) prepares a memory block for allocation
*
* Once the block is fully reserved and it's guaranteed that no one else will
* be able to write to this memory region it is safe to write the allocation
* header and call the object construction function.
*
* Because the memory block at this stage is only reserved in transient state
* there's no need to worry about fail-safety of this method because in case
* of a crash the memory will be back in the free blocks collection.
*/
static int
alloc_prep_block(struct palloc_heap *heap, const struct memory_block *m,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint64_t *offset_value)
{
void *uptr = m->m_ops->get_user_data(m);
size_t usize = m->m_ops->get_user_size(m);
VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize);
VALGRIND_ANNOTATE_NEW_MEMORY(uptr, usize);
m->m_ops->write_header(m, extra_field, object_flags);
/*
* Set allocated memory with pattern, if debug.heap.alloc_pattern CTL
* parameter had been set.
*/
if (unlikely(heap->alloc_pattern > PALLOC_CTL_DEBUG_NO_PATTERN)) {
pmemops_memset(&heap->p_ops, uptr, heap->alloc_pattern,
usize, 0);
VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize);
}
int ret;
if (constructor != NULL &&
(ret = constructor(heap->base, uptr, usize, arg)) != 0) {
/*
* If canceled, revert the block back to the free state in vg
* machinery.
*/
VALGRIND_DO_MEMPOOL_FREE(heap->layout, uptr);
return ret;
}
/*
* To avoid determining the user data pointer twice this method is also
* responsible for calculating the offset of the object in the pool that
* will be used to set the offset destination pointer provided by the
* caller.
*/
*offset_value = HEAP_PTR_TO_OFF(heap, uptr);
return 0;
}
/*
* palloc_reservation_create -- creates a volatile reservation of a
* memory block.
*
* The first step in the allocation of a new block is reserving it in
* the transient heap - which is represented by the bucket abstraction.
*
* To provide optimal scaling for multi-threaded applications and reduce
* fragmentation the appropriate bucket is chosen depending on the
* current thread context and to which allocation class the requested
* size falls into.
*
* Once the bucket is selected, just enough memory is reserved for the
* requested size. The underlying block allocation algorithm
* (best-fit, next-fit, ...) varies depending on the bucket container.
*/
static int
palloc_reservation_create(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct pobj_action_internal *out)
{
int err = 0;
struct memory_block *new_block = &out->m;
out->type = POBJ_ACTION_TYPE_HEAP;
ASSERT(class_id < UINT8_MAX);
struct alloc_class *c = class_id == 0 ?
heap_get_best_class(heap, size) :
alloc_class_by_id(heap_alloc_classes(heap),
(uint8_t)class_id);
if (c == NULL) {
ERR("no allocation class for size %lu bytes", size);
errno = EINVAL;
return -1;
}
/*
* The caller provided size in bytes, but buckets operate in
* 'size indexes' which are multiples of the block size in the
* bucket.
*
* For example, to allocate 500 bytes from a bucket that
* provides 256 byte blocks two memory 'units' are required.
*/
ssize_t size_idx = alloc_class_calc_size_idx(c, size);
if (size_idx < 0) {
ERR("allocation class not suitable for size %lu bytes",
size);
errno = EINVAL;
return -1;
}
ASSERT(size_idx <= UINT32_MAX);
*new_block = MEMORY_BLOCK_NONE;
new_block->size_idx = (uint32_t)size_idx;
struct bucket *b = heap_bucket_acquire(heap, c);
err = heap_get_bestfit_block(heap, b, new_block);
if (err != 0)
goto out;
if (alloc_prep_block(heap, new_block, constructor, arg,
extra_field, object_flags, &out->offset) != 0) {
/*
* Constructor returned non-zero value which means
* the memory block reservation has to be rolled back.
*/
if (new_block->type == MEMORY_BLOCK_HUGE) {
bucket_insert_block(b, new_block);
}
err = ECANCELED;
goto out;
}
/*
* Each as of yet unfulfilled reservation needs to be tracked in the
* runtime state.
* The memory block cannot be put back into the global state unless
* there are no active reservations.
*/
if ((out->resvp = bucket_current_resvp(b)) != NULL)
util_fetch_and_add64(out->resvp, 1);
out->lock = new_block->m_ops->get_lock(new_block);
out->new_state = MEMBLOCK_ALLOCATED;
out:
heap_bucket_release(heap, b);
if (err == 0)
return 0;
errno = err;
return -1;
}
/*
* palloc_heap_action_exec -- executes a single heap action (alloc, free)
*/
static void
palloc_heap_action_exec(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx)
{
#ifdef DEBUG
if (act->m.m_ops->get_state(&act->m) == act->new_state) {
ERR("invalid operation or heap corruption");
ASSERT(0);
}
#endif /* DEBUG */
/*
* The actual required metadata modifications are chunk-type
* dependent, but it always is a modification of a single 8 byte
* value - either modification of few bits in a bitmap or
* changing a chunk type from free to used or vice versa.
*/
act->m.m_ops->prep_hdr(&act->m, act->new_state, ctx);
}
/*
* palloc_restore_free_chunk_state -- updates the runtime state of a free chunk.
*
* This function also takes care of coalescing of huge chunks.
*/
static void
palloc_restore_free_chunk_state(struct palloc_heap *heap,
struct memory_block *m)
{
if (m->type == MEMORY_BLOCK_HUGE) {
struct bucket *b = heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
if (heap_free_chunk_reuse(heap, b, m) != 0) {
if (errno == EEXIST) {
FATAL(
"duplicate runtime chunk state, possible double free");
} else {
LOG(2, "unable to track runtime chunk state");
}
}
heap_bucket_release(heap, b);
}
}
/*
* palloc_mem_action_noop -- empty handler for unused memory action funcs
*/
static void
palloc_mem_action_noop(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
}
/*
* palloc_heap_action_on_cancel -- restores the state of the heap
*/
static void
palloc_heap_action_on_cancel(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_ALLOCATED) {
VALGRIND_DO_MEMPOOL_FREE(heap->layout,
act->m.m_ops->get_user_data(&act->m));
act->m.m_ops->invalidate(&act->m);
palloc_restore_free_chunk_state(heap, &act->m);
}
if (act->resvp)
util_fetch_and_sub64(act->resvp, 1);
}
/*
* palloc_heap_action_on_process -- performs finalization steps under a lock
* on the persistent state
*/
static void
palloc_heap_action_on_process(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_ALLOCATED) {
STATS_INC(heap->stats, persistent, heap_curr_allocated,
act->m.m_ops->get_real_size(&act->m));
if (act->resvp)
util_fetch_and_sub64(act->resvp, 1);
} else if (act->new_state == MEMBLOCK_FREE) {
if (On_valgrind) {
void *ptr = act->m.m_ops->get_user_data(&act->m);
size_t size = act->m.m_ops->get_real_size(&act->m);
VALGRIND_DO_MEMPOOL_FREE(heap->layout, ptr);
/*
* The sync module, responsible for implementations of
* persistent memory resident volatile variables,
* de-registers the pmemcheck pmem mapping at the time
* of initialization. This is done so that usage of
* pmem locks is not reported as an error due to
* missing flushes/stores outside of transaction. But,
* after we freed an object, we need to reestablish
* the pmem mapping, otherwise pmemchek might miss bugs
* that occurr in newly allocated memory locations, that
* once were occupied by a lock/volatile variable.
*/
VALGRIND_REGISTER_PMEM_MAPPING(ptr, size);
}
STATS_SUB(heap->stats, persistent, heap_curr_allocated,
act->m.m_ops->get_real_size(&act->m));
heap_memblock_on_free(heap, &act->m);
}
}
/*
* palloc_heap_action_on_unlock -- performs finalization steps that need to be
* performed without a lock on persistent state
*/
static void
palloc_heap_action_on_unlock(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_FREE) {
palloc_restore_free_chunk_state(heap, &act->m);
}
}
/*
* palloc_mem_action_exec -- executes a single memory action (set, and, or)
*/
static void
palloc_mem_action_exec(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx)
{
operation_add_entry(ctx, act->ptr, act->value, ULOG_OPERATION_SET);
}
static struct {
/*
* Translate action into some number of operation_entry'ies.
*/
void (*exec)(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx);
/*
* Cancel any runtime state changes. Can be called only when action has
* not been translated to persistent operation yet.
*/
void (*on_cancel)(struct palloc_heap *heap,
struct pobj_action_internal *act);
/*
* Final steps after persistent state has been modified. Performed
* under action-specific lock.
*/
void (*on_process)(struct palloc_heap *heap,
struct pobj_action_internal *act);
/*
* Final steps after persistent state has been modified. Performed
* after action-specific lock has been dropped.
*/
void (*on_unlock)(struct palloc_heap *heap,
struct pobj_action_internal *act);
} action_funcs[POBJ_MAX_ACTION_TYPE] = {
[POBJ_ACTION_TYPE_HEAP] = {
.exec = palloc_heap_action_exec,
.on_cancel = palloc_heap_action_on_cancel,
.on_process = palloc_heap_action_on_process,
.on_unlock = palloc_heap_action_on_unlock,
},
[POBJ_ACTION_TYPE_MEM] = {
.exec = palloc_mem_action_exec,
.on_cancel = palloc_mem_action_noop,
.on_process = palloc_mem_action_noop,
.on_unlock = palloc_mem_action_noop,
}
};
/*
* palloc_action_compare -- compares two actions based on lock address
*/
static int
palloc_action_compare(const void *lhs, const void *rhs)
{
const struct pobj_action_internal *mlhs = lhs;
const struct pobj_action_internal *mrhs = rhs;
uintptr_t vlhs = (uintptr_t)(mlhs->lock);
uintptr_t vrhs = (uintptr_t)(mrhs->lock);
if (vlhs < vrhs)
return -1;
if (vlhs > vrhs)
return 1;
return 0;
}
/*
* palloc_exec_actions -- perform the provided free/alloc operations
*/
static void
palloc_exec_actions(struct palloc_heap *heap,
struct operation_context *ctx,
struct pobj_action_internal *actv,
size_t actvcnt)
{
/*
* The operations array is sorted so that proper lock ordering is
* ensured.
*/
qsort(actv, actvcnt, sizeof(struct pobj_action_internal),
palloc_action_compare);
struct pobj_action_internal *act;
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
/*
* This lock must be held for the duration between the creation
* of the allocation metadata updates in the operation context
* and the operation processing. This is because a different
* thread might operate on the same 8-byte value of the run
* bitmap and override allocation performed by this thread.
*/
if (i == 0 || act->lock != actv[i - 1].lock) {
if (act->lock)
util_mutex_lock(act->lock);
}
/* translate action to some number of operation_entry'ies */
action_funcs[act->type].exec(heap, act, ctx);
}
/* wait for all allocated object headers to be persistent */
pmemops_drain(&heap->p_ops);
/* perform all persistent memory operations */
operation_finish(ctx);
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
action_funcs[act->type].on_process(heap, act);
if (i == 0 || act->lock != actv[i - 1].lock) {
if (act->lock)
util_mutex_unlock(act->lock);
}
}
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
action_funcs[act->type].on_unlock(heap, act);
}
}
/*
* palloc_reserve -- creates a single reservation
*/
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct pobj_action *act)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
return palloc_reservation_create(heap, size, constructor, arg,
extra_field, object_flags, class_id,
(struct pobj_action_internal *)act);
}
/*
* palloc_defer_free -- creates an internal deferred free action
*/
static void
palloc_defer_free_create(struct palloc_heap *heap, uint64_t off,
struct pobj_action_internal *out)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
out->type = POBJ_ACTION_TYPE_HEAP;
out->offset = off;
out->m = memblock_from_offset(heap, off);
/*
* For the duration of free we may need to protect surrounding
* metadata from being modified.
*/
out->lock = out->m.m_ops->get_lock(&out->m);
out->resvp = NULL;
out->new_state = MEMBLOCK_FREE;
}
/*
* palloc_defer_free -- creates a deferred free action
*/
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
palloc_defer_free_create(heap, off, (struct pobj_action_internal *)act);
}
/*
* palloc_cancel -- cancels all reservations in the array
*/
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt)
{
struct pobj_action_internal *act;
for (size_t i = 0; i < actvcnt; ++i) {
act = (struct pobj_action_internal *)&actv[i];
action_funcs[act->type].on_cancel(heap, act);
}
}
/*
* palloc_publish -- publishes all reservations in the array
*/
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx)
{
palloc_exec_actions(heap, ctx,
(struct pobj_action_internal *)actv, actvcnt);
}
/*
* palloc_operation -- persistent memory operation. Takes a NULL pointer
* or an existing memory block and modifies it to occupy, at least, 'size'
* number of bytes.
*
* The malloc, free and realloc routines are implemented in the context of this
* common operation which encompasses all of the functionality usually done
* separately in those methods.
*
* The first thing that needs to be done is determining which memory blocks
* will be affected by the operation - this varies depending on the whether the
* operation will need to modify or free an existing block and/or allocate
* a new one.
*
* Simplified allocation process flow is as follows:
* - reserve a new block in the transient heap
* - prepare the new block
* - create redo log of required modifications
* - chunk metadata
* - offset of the new object
* - commit and process the redo log
*
* And similarly, the deallocation process:
* - create redo log of required modifications
* - reverse the chunk metadata back to the 'free' state
* - set the destination of the object offset to zero
* - commit and process the redo log
* There's an important distinction in the deallocation process - it does not
* return the memory block to the transient container. That is done once no more
* memory is available.
*
* Reallocation is a combination of the above, with one additional step
* of copying the old content.
*/
int
palloc_operation(struct palloc_heap *heap,
uint64_t off, uint64_t *dest_off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct operation_context *ctx)
{
size_t user_size = 0;
size_t nops = 0;
struct pobj_action_internal ops[2];
struct pobj_action_internal *alloc = NULL;
struct pobj_action_internal *dealloc = NULL;
/*
* The offset of an existing block can be nonzero which means this
* operation is either free or a realloc - either way the offset of the
* object needs to be translated into memory block, which is a structure
* that all of the heap methods expect.
*/
if (off != 0) {
dealloc = &ops[nops++];
palloc_defer_free_create(heap, off, dealloc);
user_size = dealloc->m.m_ops->get_user_size(&dealloc->m);
if (user_size == size) {
operation_cancel(ctx);
return 0;
}
}
/* alloc or realloc */
if (size != 0) {
alloc = &ops[nops++];
if (palloc_reservation_create(heap, size, constructor, arg,
extra_field, object_flags, class_id, alloc) != 0) {
operation_cancel(ctx);
return -1;
}
}
/* realloc */
if (alloc != NULL && dealloc != NULL) {
/* copy data to newly allocated memory */
size_t old_size = user_size;
size_t to_cpy = old_size > size ? size : old_size;
VALGRIND_ADD_TO_TX(
HEAP_OFF_TO_PTR(heap, alloc->offset),
to_cpy);
pmemops_memcpy(&heap->p_ops,
HEAP_OFF_TO_PTR(heap, alloc->offset),
HEAP_OFF_TO_PTR(heap, off),
to_cpy,
0);
VALGRIND_REMOVE_FROM_TX(
HEAP_OFF_TO_PTR(heap, alloc->offset),
to_cpy);
}
/*
* If the caller provided a destination value to update, it needs to be
* modified atomically alongside the heap metadata, and so the operation
* context must be used.
*/
if (dest_off) {
operation_add_entry(ctx, dest_off,
alloc ? alloc->offset : 0, ULOG_OPERATION_SET);
}
/* and now actually perform the requested operation! */
palloc_exec_actions(heap, ctx, ops, nops);
return 0;
}
/*
* palloc_usable_size -- returns the number of bytes in the memory block
*/
size_t
palloc_usable_size(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_user_size(&m);
}
/*
* palloc_extra -- returns allocation extra field
*/
uint64_t
palloc_extra(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_extra(&m);
}
/*
* palloc_flags -- returns allocation flags
*/
uint16_t
palloc_flags(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_flags(&m);
}
/*
* pmalloc_search_cb -- (internal) foreach callback.
*/
static int
pmalloc_search_cb(const struct memory_block *m, void *arg)
{
struct memory_block *out = arg;
if (MEMORY_BLOCK_EQUALS(*m, *out))
return 0; /* skip the same object */
*out = *m;
return 1;
}
/*
* palloc_first -- returns the first object from the heap.
*/
uint64_t
palloc_first(struct palloc_heap *heap)
{
struct memory_block search = MEMORY_BLOCK_NONE;
heap_foreach_object(heap, pmalloc_search_cb,
&search, MEMORY_BLOCK_NONE);
if (MEMORY_BLOCK_IS_NONE(search))
return 0;
void *uptr = search.m_ops->get_user_data(&search);
return HEAP_PTR_TO_OFF(heap, uptr);
}
/*
* palloc_next -- returns the next object relative to 'off'.
*/
uint64_t
palloc_next(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
struct memory_block search = m;
heap_foreach_object(heap, pmalloc_search_cb, &search, m);
if (MEMORY_BLOCK_IS_NONE(search) ||
MEMORY_BLOCK_EQUALS(search, m))
return 0;
void *uptr = search.m_ops->get_user_data(&search);
return HEAP_PTR_TO_OFF(heap, uptr);
}
/*
* palloc_boot -- initializes allocator section
*/
int
palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops, struct stats *stats,
struct pool_set *set)
{
return heap_boot(heap, heap_start, heap_size, sizep,
base, p_ops, stats, set);
}
/*
* palloc_buckets_init -- initialize buckets
*/
int
palloc_buckets_init(struct palloc_heap *heap)
{
return heap_buckets_init(heap);
}
/*
* palloc_init -- initializes palloc heap
*/
int
palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops)
{
return heap_init(heap_start, heap_size, sizep, p_ops);
}
/*
* palloc_heap_end -- returns first address after heap
*/
void *
palloc_heap_end(struct palloc_heap *h)
{
return heap_end(h);
}
/*
* palloc_heap_check -- verifies heap state
*/
int
palloc_heap_check(void *heap_start, uint64_t heap_size)
{
return heap_check(heap_start, heap_size);
}
/*
* palloc_heap_check_remote -- verifies state of remote replica
*/
int
palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops)
{
return heap_check_remote(heap_start, heap_size, ops);
}
/*
* palloc_heap_cleanup -- cleanups the volatile heap state
*/
void
palloc_heap_cleanup(struct palloc_heap *heap)
{
heap_cleanup(heap);
}
#if VG_MEMCHECK_ENABLED
/*
* palloc_vg_register_alloc -- (internal) registers allocation header
* in Valgrind
*/
static int
palloc_vg_register_alloc(const struct memory_block *m, void *arg)
{
struct palloc_heap *heap = arg;
m->m_ops->reinit_header(m);
void *uptr = m->m_ops->get_user_data(m);
size_t usize = m->m_ops->get_user_size(m);
VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
VALGRIND_DO_MAKE_MEM_DEFINED(uptr, usize);
return 0;
}
/*
* palloc_heap_vg_open -- notifies Valgrind about heap layout
*/
void
palloc_heap_vg_open(struct palloc_heap *heap, int objects)
{
heap_vg_open(heap, palloc_vg_register_alloc, heap, objects);
}
#endif
| 25,080 | 26.470975 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/cuckoo.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cuckoo.h -- internal definitions for cuckoo hash table
*/
#ifndef LIBPMEMOBJ_CUCKOO_H
#define LIBPMEMOBJ_CUCKOO_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct cuckoo;
struct cuckoo *cuckoo_new(void);
void cuckoo_delete(struct cuckoo *c);
int cuckoo_insert(struct cuckoo *c, uint64_t key, void *value);
void *cuckoo_remove(struct cuckoo *c, uint64_t key);
void *cuckoo_get(struct cuckoo *c, uint64_t key);
size_t cuckoo_get_size(struct cuckoo *c);
#ifdef __cplusplus
}
#endif
#endif
| 2,122 | 33.803279 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memops.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 3,212 | 31.785714 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/libpmemobj_main.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemobj_main.c -- entry point for libpmemobj.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemobj_init(void);
void libpmemobj_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemobj_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemobj_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmalloc.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 2,806 | 34.0875 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/recycler.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
void
recycler_pending_put(struct recycler *r,
struct memory_block_reserved *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 2,734 | 30.802326 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/palloc.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 4,336 | 30.427536 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* finds exact match memory block */
int (*get_exact)(struct block_container *c,
const struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 2,751 | 32.560976 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ravl.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ravl.c -- implementation of a RAVL tree
* http://sidsen.azurewebsites.net//papers/ravl-trees-journal.pdf
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "ravl.h"
#define RAVL_DEFAULT_DATA_SIZE (sizeof(void *))
enum ravl_slot_type {
RAVL_LEFT,
RAVL_RIGHT,
MAX_SLOTS,
RAVL_ROOT
};
struct ravl_node {
struct ravl_node *parent;
struct ravl_node *slots[MAX_SLOTS];
int32_t rank; /* cannot be greater than height of the subtree */
int32_t pointer_based;
char data[];
};
struct ravl {
struct ravl_node *root;
ravl_compare *compare;
size_t data_size;
};
/*
* ravl_new -- creates a new ravl tree instance
*/
struct ravl *
ravl_new_sized(ravl_compare *compare, size_t data_size)
{
struct ravl *r = Malloc(sizeof(*r));
if (r == NULL)
return NULL;
r->compare = compare;
r->root = NULL;
r->data_size = data_size;
return r;
}
/*
* ravl_new -- creates a new tree that stores data pointers
*/
struct ravl *
ravl_new(ravl_compare *compare)
{
return ravl_new_sized(compare, RAVL_DEFAULT_DATA_SIZE);
}
/*
* ravl_clear_node -- (internal) recursively clears the given subtree,
* calls callback in an in-order fashion. Frees the given node.
*/
static void
ravl_clear_node(struct ravl_node *n, ravl_cb cb, void *arg)
{
if (n == NULL)
return;
ravl_clear_node(n->slots[RAVL_LEFT], cb, arg);
if (cb)
cb((void *)n->data, arg);
ravl_clear_node(n->slots[RAVL_RIGHT], cb, arg);
Free(n);
}
/*
* ravl_clear -- clears the entire tree, starting from the root
*/
void
ravl_clear(struct ravl *ravl)
{
ravl_clear_node(ravl->root, NULL, NULL);
ravl->root = NULL;
}
/*
* ravl_delete_cb -- clears and deletes the given ravl instance, calls callback
*/
void
ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg)
{
ravl_clear_node(ravl->root, cb, arg);
Free(ravl);
}
/*
* ravl_delete -- clears and deletes the given ravl instance
*/
void
ravl_delete(struct ravl *ravl)
{
ravl_delete_cb(ravl, NULL, NULL);
}
/*
* ravl_empty -- checks whether the given tree is empty
*/
int
ravl_empty(struct ravl *ravl)
{
return ravl->root == NULL;
}
/*
* ravl_node_insert_constructor -- node data constructor for ravl_insert
*/
static void
ravl_node_insert_constructor(void *data, size_t data_size, const void *arg)
{
/* copy only the 'arg' pointer */
memcpy(data, &arg, sizeof(arg));
}
/*
* ravl_node_copy_constructor -- node data constructor for ravl_emplace_copy
*/
static void
ravl_node_copy_constructor(void *data, size_t data_size, const void *arg)
{
memcpy(data, arg, data_size);
}
/*
* ravl_new_node -- (internal) allocates and initializes a new node
*/
static struct ravl_node *
ravl_new_node(struct ravl *ravl, ravl_constr constr, const void *arg)
{
struct ravl_node *n = Malloc(sizeof(*n) + ravl->data_size);
if (n == NULL)
return NULL;
n->parent = NULL;
n->slots[RAVL_LEFT] = NULL;
n->slots[RAVL_RIGHT] = NULL;
n->rank = 0;
n->pointer_based = constr == ravl_node_insert_constructor;
constr(n->data, ravl->data_size, arg);
return n;
}
/*
* ravl_slot_opposite -- (internal) returns the opposite slot type, cannot be
* called for root type
*/
static enum ravl_slot_type
ravl_slot_opposite(enum ravl_slot_type t)
{
ASSERTne(t, RAVL_ROOT);
return t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT;
}
/*
* ravl_node_slot_type -- (internal) returns the type of the given node:
* left child, right child or root
*/
static enum ravl_slot_type
ravl_node_slot_type(struct ravl_node *n)
{
if (n->parent == NULL)
return RAVL_ROOT;
return n->parent->slots[RAVL_LEFT] == n ? RAVL_LEFT : RAVL_RIGHT;
}
/*
* ravl_node_sibling -- (internal) returns the sibling of the given node,
* NULL if the node is root (has no parent)
*/
static struct ravl_node *
ravl_node_sibling(struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
if (t == RAVL_ROOT)
return NULL;
return n->parent->slots[t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT];
}
/*
* ravl_node_ref -- (internal) returns the pointer to the memory location in
* which the given node resides
*/
static struct ravl_node **
ravl_node_ref(struct ravl *ravl, struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
return t == RAVL_ROOT ? &ravl->root : &n->parent->slots[t];
}
/*
* ravl_rotate -- (internal) performs a rotation around a given node
*
* The node n swaps place with its parent. If n is right child, parent becomes
* the left child of n, otherwise parent becomes right child of n.
*/
static void
ravl_rotate(struct ravl *ravl, struct ravl_node *n)
{
ASSERTne(n->parent, NULL);
struct ravl_node *p = n->parent;
struct ravl_node **pref = ravl_node_ref(ravl, p);
enum ravl_slot_type t = ravl_node_slot_type(n);
enum ravl_slot_type t_opposite = ravl_slot_opposite(t);
n->parent = p->parent;
p->parent = n;
*pref = n;
if ((p->slots[t] = n->slots[t_opposite]) != NULL)
p->slots[t]->parent = p;
n->slots[t_opposite] = p;
}
/*
* ravl_node_rank -- (internal) returns the rank of the node
*
* For the purpose of balancing, NULL nodes have rank -1.
*/
static int
ravl_node_rank(struct ravl_node *n)
{
return n == NULL ? -1 : n->rank;
}
/*
* ravl_node_rank_difference_parent -- (internal) returns the rank different
* between parent node p and its child n
*
* Every rank difference must be positive.
*
* Either of these can be NULL.
*/
static int
ravl_node_rank_difference_parent(struct ravl_node *p, struct ravl_node *n)
{
return ravl_node_rank(p) - ravl_node_rank(n);
}
/*
* ravl_node_rank_differenced - (internal) returns the rank difference between
* parent and its child
*
* Can be used to check if a given node is an i-child.
*/
static int
ravl_node_rank_difference(struct ravl_node *n)
{
return ravl_node_rank_difference_parent(n->parent, n);
}
/*
* ravl_node_is_i_j -- (internal) checks if a given node is strictly i,j-node
*/
static int
ravl_node_is_i_j(struct ravl_node *n, int i, int j)
{
return (ravl_node_rank_difference_parent(n, n->slots[RAVL_LEFT]) == i &&
ravl_node_rank_difference_parent(n, n->slots[RAVL_RIGHT]) == j);
}
/*
* ravl_node_is -- (internal) checks if a given node is i,j-node or j,i-node
*/
static int
ravl_node_is(struct ravl_node *n, int i, int j)
{
return ravl_node_is_i_j(n, i, j) || ravl_node_is_i_j(n, j, i);
}
/*
* ravl_node_promote -- promotes a given node by increasing its rank
*/
static void
ravl_node_promote(struct ravl_node *n)
{
n->rank += 1;
}
/*
* ravl_node_promote -- demotes a given node by increasing its rank
*/
static void
ravl_node_demote(struct ravl_node *n)
{
ASSERT(n->rank > 0);
n->rank -= 1;
}
/*
* ravl_balance -- balances the tree after insert
*
* This function must restore the invariant that every rank
* difference is positive.
*/
static void
ravl_balance(struct ravl *ravl, struct ravl_node *n)
{
/* walk up the tree, promoting nodes */
while (n->parent && ravl_node_is(n->parent, 0, 1)) {
ravl_node_promote(n->parent);
n = n->parent;
}
/*
* Either the rank rule holds or n is a 0-child whose sibling is an
* i-child with i > 1.
*/
struct ravl_node *s = ravl_node_sibling(n);
if (!(ravl_node_rank_difference(n) == 0 &&
ravl_node_rank_difference_parent(n->parent, s) > 1))
return;
struct ravl_node *y = n->parent;
/* if n is a left child, let z be n's right child and vice versa */
enum ravl_slot_type t = ravl_slot_opposite(ravl_node_slot_type(n));
struct ravl_node *z = n->slots[t];
if (z == NULL || ravl_node_rank_difference(z) == 2) {
ravl_rotate(ravl, n);
ravl_node_demote(y);
} else if (ravl_node_rank_difference(z) == 1) {
ravl_rotate(ravl, z);
ravl_rotate(ravl, z);
ravl_node_promote(z);
ravl_node_demote(n);
ravl_node_demote(y);
}
}
/*
* ravl_insert -- insert data into the tree
*/
int
ravl_insert(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_insert_constructor, data);
}
/*
* ravl_insert -- copy construct data inside of a new tree node
*/
int
ravl_emplace_copy(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_copy_constructor, data);
}
/*
* ravl_emplace -- construct data inside of a new tree node
*/
int
ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg)
{
LOG(6, NULL);
struct ravl_node *n = ravl_new_node(ravl, constr, arg);
if (n == NULL)
return -1;
/* walk down the tree and insert the new node into a missing slot */
struct ravl_node **dstp = &ravl->root;
struct ravl_node *dst = NULL;
while (*dstp != NULL) {
dst = (*dstp);
int cmp_result = ravl->compare(ravl_data(n), ravl_data(dst));
if (cmp_result == 0)
goto error_duplicate;
dstp = &dst->slots[cmp_result > 0];
}
n->parent = dst;
*dstp = n;
ravl_balance(ravl, n);
return 0;
error_duplicate:
errno = EEXIST;
Free(n);
return -1;
}
/*
* ravl_node_type_most -- (internal) returns left-most or right-most node in
* the subtree
*/
static struct ravl_node *
ravl_node_type_most(struct ravl_node *n, enum ravl_slot_type t)
{
while (n->slots[t] != NULL)
n = n->slots[t];
return n;
}
/*
* ravl_node_cessor -- (internal) returns the successor or predecessor of the
* node
*/
static struct ravl_node *
ravl_node_cessor(struct ravl_node *n, enum ravl_slot_type t)
{
/*
* If t child is present, we are looking for t-opposite-most node
* in t child subtree
*/
if (n->slots[t])
return ravl_node_type_most(n->slots[t], ravl_slot_opposite(t));
/* otherwise get the first parent on the t path */
while (n->parent != NULL && n == n->parent->slots[t])
n = n->parent;
return n->parent;
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node larger than n.
*/
static struct ravl_node *
ravl_node_successor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_RIGHT);
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node smaller than n.
*/
static struct ravl_node *
ravl_node_predecessor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_LEFT);
}
/*
* ravl_predicate_holds -- (internal) verifies the given predicate for
* the current node in the search path
*
* If the predicate holds for the given node or a node that can be directly
* derived from it, returns 1. Otherwise returns 0.
*/
static int
ravl_predicate_holds(struct ravl *ravl, int result, struct ravl_node **ret,
struct ravl_node *n, const void *data, enum ravl_predicate flags)
{
if (flags & RAVL_PREDICATE_EQUAL) {
if (result == 0) {
*ret = n;
return 1;
}
}
if (flags & RAVL_PREDICATE_GREATER) {
if (result < 0) { /* data < n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_successor(n);
return 1;
}
}
if (flags & RAVL_PREDICATE_LESS) {
if (result > 0) { /* data > n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_predecessor(n);
return 1;
}
}
return 0;
}
/*
* ravl_find -- searches for the node in the free
*/
struct ravl_node *
ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate flags)
{
LOG(6, NULL);
struct ravl_node *r = NULL;
struct ravl_node *n = ravl->root;
while (n) {
int result = ravl->compare(data, ravl_data(n));
if (ravl_predicate_holds(ravl, result, &r, n, data, flags))
return r;
n = n->slots[result > 0];
}
return r;
}
/*
* ravl_remove -- removes the given node from the tree
*/
void
ravl_remove(struct ravl *ravl, struct ravl_node *n)
{
LOG(6, NULL);
if (n->slots[RAVL_LEFT] != NULL && n->slots[RAVL_RIGHT] != NULL) {
/* if both children are present, remove the successor instead */
struct ravl_node *s = ravl_node_successor(n);
memcpy(n->data, s->data, ravl->data_size);
ravl_remove(ravl, s);
} else {
/* swap n with the child that may exist */
struct ravl_node *r = n->slots[RAVL_LEFT] ?
n->slots[RAVL_LEFT] : n->slots[RAVL_RIGHT];
if (r != NULL)
r->parent = n->parent;
*ravl_node_ref(ravl, n) = r;
Free(n);
}
}
/*
* ravl_data -- returns the data contained within the node
*/
void *
ravl_data(struct ravl_node *node)
{
if (node->pointer_based) {
void *data;
memcpy(&data, node->data, sizeof(void *));
return data;
} else {
return (void *)node->data;
}
}
| 13,775 | 22.27027 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/stats.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
int unused;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
int enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
if ((stats)->enabled)\
util_fetch_and_add64((&(stats)->type->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
if ((stats)->enabled)\
util_fetch_and_sub64((&(stats)->type->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
if ((stats)->enabled)\
util_atomic_store_explicit64((&(stats)->type->name), (value),\
memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 3,087 | 29.27451 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/bucket.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bucket.c -- bucket implementation
*
* Buckets manage volatile state of the heap. They are the abstraction layer
* between the heap-managed chunks/runs and memory allocations.
*
* Each bucket instance can have a different underlying container that is
* responsible for selecting blocks - which means that whether the allocator
* serves memory blocks in best/first/next -fit manner is decided during bucket
* creation.
*/
#include "alloc_class.h"
#include "bucket.h"
#include "heap.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* bucket_new -- creates a new bucket instance
*/
struct bucket *
bucket_new(struct block_container *c, struct alloc_class *aclass)
{
if (c == NULL)
return NULL;
struct bucket *b = Malloc(sizeof(*b));
if (b == NULL)
return NULL;
b->container = c;
b->c_ops = c->c_ops;
util_mutex_init(&b->lock);
b->is_active = 0;
b->active_memory_block = NULL;
if (aclass && aclass->type == CLASS_RUN) {
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
if (b->active_memory_block == NULL)
goto error_active_alloc;
}
b->aclass = aclass;
return b;
error_active_alloc:
util_mutex_destroy(&b->lock);
Free(b);
return NULL;
}
/*
* bucket_insert_block -- inserts a block into the bucket
*/
int
bucket_insert_block(struct bucket *b, const struct memory_block *m)
{
#if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED
if (On_valgrind) {
size_t size = m->m_ops->get_real_size(m);
void *data = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(data, size);
VALGRIND_ANNOTATE_NEW_MEMORY(data, size);
}
#endif
return b->c_ops->insert(b->container, m);
}
/*
* bucket_delete -- cleanups and deallocates bucket instance
*/
void
bucket_delete(struct bucket *b)
{
if (b->active_memory_block)
Free(b->active_memory_block);
util_mutex_destroy(&b->lock);
b->c_ops->destroy(b->container);
Free(b);
}
/*
* bucket_current_resvp -- returns the pointer to the current reservation count
*/
int *
bucket_current_resvp(struct bucket *b)
{
return b->active_memory_block ? &b->active_memory_block->nresv : NULL;
}
| 3,750 | 28.077519 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_seglists.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_seglists.c -- implementation of segregated lists block container
*
* This container is constructed from N (up to 64) intrusive lists and a
* single 8 byte bitmap that stores the information whether a given list is
* empty or not.
*/
#include "container_seglists.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define SEGLIST_BLOCK_LISTS 64U
struct block_container_seglists {
struct block_container super;
struct memory_block m;
VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS];
uint64_t nonempty_lists;
};
/*
* container_seglists_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_seglists_insert_block(struct block_container *bc,
const struct memory_block *m)
{
ASSERT(m->chunk_id < MAX_CHUNK);
ASSERT(m->zone_id < UINT16_MAX);
ASSERTne(m->size_idx, 0);
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
if (c->nonempty_lists == 0)
c->m = *m;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
ASSERT(m->chunk_id == c->m.chunk_id);
ASSERT(m->zone_id == c->m.zone_id);
if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
return -1;
/* marks the list as nonempty */
c->nonempty_lists |= 1ULL << (m->size_idx - 1);
return 0;
}
/*
* container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
uint32_t i = 0;
/* applicable lists */
uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
uint64_t v = c->nonempty_lists & ~size_mask;
if (v == 0)
return ENOMEM;
/* finds the list that serves the smallest applicable size */
i = util_lssb_index64(v);
uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);
if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
c->nonempty_lists &= ~(1ULL << (i));
*m = c->m;
m->block_off = block_offset;
m->size_idx = i + 1;
return 0;
}
/*
* container_seglists_is_empty -- (internal) checks whether the container is
* empty
*/
static int
container_seglists_is_empty(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
return c->nonempty_lists == 0;
}
/*
* container_seglists_rm_all -- (internal) removes all elements from the tree
*/
static void
container_seglists_rm_all(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_CLEAR(&c->blocks[i]);
c->nonempty_lists = 0;
}
/*
* container_seglists_delete -- (internal) deletes the container
*/
static void
container_seglists_destroy(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_DELETE(&c->blocks[i]);
Free(c);
}
/*
* This container does not support retrieval of exact memory blocks, but other
* than provides best-fit in O(1) time for unit sizes that do not exceed 64.
*/
static struct block_container_ops container_seglists_ops = {
.insert = container_seglists_insert_block,
.get_rm_exact = NULL,
.get_rm_bestfit = container_seglists_get_rm_block_bestfit,
.get_exact = NULL,
.is_empty = container_seglists_is_empty,
.rm_all = container_seglists_rm_all,
.destroy = container_seglists_destroy,
};
/*
* container_new_seglists -- allocates and initializes a seglists container
*/
struct block_container *
container_new_seglists(struct palloc_heap *heap)
{
struct block_container_seglists *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_seglists_ops;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_INIT(&bc->blocks[i]);
bc->nonempty_lists = 0;
return (struct block_container *)&bc->super;
error_container_malloc:
return NULL;
}
| 5,744 | 27.440594 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_ravl.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_ravl.h -- internal definitions for ravl-based block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_RAVL_H
#define LIBPMEMOBJ_CONTAINER_RAVL_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_ravl(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_RAVL_H */
| 1,960 | 36 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/tx.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 2,409 | 30.710526 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memblock.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessery, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 11,746 | 35.481366 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memblock.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memblock.c -- implementation of memory block
*
* Memory block is a representation of persistent object that resides in the
* heap. A valid memory block must be either a huge (free or used) chunk or a
* block inside a run.
*
* Huge blocks are 1:1 correlated with the chunk headers in the zone whereas
* run blocks are represented by bits in corresponding chunk bitmap.
*
* This file contains implementations of abstract operations on memory blocks.
* Instead of storing the mbops structure inside each memory block the correct
* method implementation is chosen at runtime.
*/
#include <string.h>
#include "obj.h"
#include "heap.h"
#include "memblock.h"
#include "out.h"
#include "valgrind_internal.h"
/* calculates the size of the entire run, including any additional chunks */
#define SIZEOF_RUN(runp, size_idx)\
(sizeof(*(runp)) + (((size_idx) - 1) * CHUNKSIZE))
/*
* memblock_header_type -- determines the memory block's header type
*/
static enum header_type
memblock_header_type(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->flags & CHUNK_FLAG_COMPACT_HEADER)
return HEADER_COMPACT;
if (hdr->flags & CHUNK_FLAG_HEADER_NONE)
return HEADER_NONE;
return HEADER_LEGACY;
}
/*
* memblock_header_legacy_get_size --
* (internal) returns the size stored in a legacy header
*/
static size_t
memblock_header_legacy_get_size(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return hdr->size;
}
/*
* memblock_header_compact_get_size --
* (internal) returns the size stored in a compact header
*/
static size_t
memblock_header_compact_get_size(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return hdr->size & ALLOC_HDR_FLAGS_MASK;
}
/*
* memblock_header_none_get_size --
* (internal) determines the sizes of an object without a header
*/
static size_t
memblock_header_none_get_size(const struct memory_block *m)
{
return m->m_ops->block_size(m);
}
/*
* memblock_header_legacy_get_extra --
* (internal) returns the extra field stored in a legacy header
*/
static uint64_t
memblock_header_legacy_get_extra(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return hdr->type_num;
}
/*
* memblock_header_compact_get_extra --
* (internal) returns the extra field stored in a compact header
*/
static uint64_t
memblock_header_compact_get_extra(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return hdr->extra;
}
/*
* memblock_header_none_get_extra --
* (internal) objects without a header don't have an extra field
*/
static uint64_t
memblock_header_none_get_extra(const struct memory_block *m)
{
return 0;
}
/*
* memblock_header_legacy_get_flags --
* (internal) returns the flags stored in a legacy header
*/
static uint16_t
memblock_header_legacy_get_flags(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return (uint16_t)(hdr->root_size >> ALLOC_HDR_SIZE_SHIFT);
}
/*
* memblock_header_compact_get_flags --
* (internal) returns the flags stored in a compact header
*/
static uint16_t
memblock_header_compact_get_flags(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return (uint16_t)(hdr->size >> ALLOC_HDR_SIZE_SHIFT);
}
/*
* memblock_header_none_get_flags --
* (internal) objects without a header do not support flags
*/
static uint16_t
memblock_header_none_get_flags(const struct memory_block *m)
{
return 0;
}
/*
* memblock_header_legacy_write --
* (internal) writes a legacy header of an object
*/
static void
memblock_header_legacy_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
struct allocation_header_legacy hdr;
hdr.size = size;
hdr.type_num = extra;
hdr.root_size = ((uint64_t)flags << ALLOC_HDR_SIZE_SHIFT);
struct allocation_header_legacy *hdrp = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdrp, sizeof(*hdrp));
VALGRIND_ADD_TO_TX(hdrp, sizeof(*hdrp));
pmemops_memcpy(&m->heap->p_ops, hdrp, &hdr,
sizeof(hdr), /* legacy header is 64 bytes in size */
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_RELAXED);
VALGRIND_REMOVE_FROM_TX(hdrp, sizeof(*hdrp));
/* unused fields of the legacy headers are used as a red zone */
VALGRIND_DO_MAKE_MEM_NOACCESS(hdrp->unused, sizeof(hdrp->unused));
}
/*
* memblock_header_compact_write --
* (internal) writes a compact header of an object
*/
static void
memblock_header_compact_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
COMPILE_ERROR_ON(ALLOC_HDR_COMPACT_SIZE > CACHELINE_SIZE);
struct {
struct allocation_header_compact hdr;
uint8_t padding[CACHELINE_SIZE - ALLOC_HDR_COMPACT_SIZE];
} padded;
padded.hdr.size = size | ((uint64_t)flags << ALLOC_HDR_SIZE_SHIFT);
padded.hdr.extra = extra;
struct allocation_header_compact *hdrp = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdrp, sizeof(*hdrp));
/*
* If possible write the entire header with a single memcpy, this allows
* the copy implementation to avoid a cache miss on a partial cache line
* write.
*/
size_t hdr_size = ALLOC_HDR_COMPACT_SIZE;
if ((uintptr_t)hdrp % CACHELINE_SIZE == 0 && size >= sizeof(padded))
hdr_size = sizeof(padded);
VALGRIND_ADD_TO_TX(hdrp, hdr_size);
pmemops_memcpy(&m->heap->p_ops, hdrp, &padded, hdr_size,
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_RELAXED);
VALGRIND_DO_MAKE_MEM_UNDEFINED((char *)hdrp + ALLOC_HDR_COMPACT_SIZE,
hdr_size - ALLOC_HDR_COMPACT_SIZE);
VALGRIND_REMOVE_FROM_TX(hdrp, hdr_size);
}
/*
* memblock_header_none_write --
* (internal) nothing to write
*/
static void
memblock_header_none_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
/* NOP */
}
/*
* memblock_header_legacy_invalidate --
* (internal) invalidates a legacy header
*/
static void
memblock_header_legacy_invalidate(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
VALGRIND_SET_CLEAN(hdr, sizeof(*hdr));
}
/*
* memblock_header_compact_invalidate --
* (internal) invalidates a compact header
*/
static void
memblock_header_compact_invalidate(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
VALGRIND_SET_CLEAN(hdr, sizeof(*hdr));
}
/*
* memblock_no_header_invalidate --
* (internal) nothing to invalidate
*/
static void
memblock_header_none_invalidate(const struct memory_block *m)
{
/* NOP */
}
/*
* memblock_header_legacy_reinit --
* (internal) reinitializes a legacy header after a heap restart
*/
static void
memblock_header_legacy_reinit(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/* unused fields of the legacy headers are used as a red zone */
VALGRIND_DO_MAKE_MEM_NOACCESS(hdr->unused, sizeof(hdr->unused));
}
/*
* memblock_header_compact_reinit --
* (internal) reinitializes a compact header after a heap restart
*/
static void
memblock_header_compact_reinit(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
}
/*
* memblock_header_none_reinit --
* (internal) nothing to reinitialize
*/
static void
memblock_header_none_reinit(const struct memory_block *m)
{
/* NOP */
}
static struct {
/* determines the sizes of an object */
size_t (*get_size)(const struct memory_block *m);
/* returns the extra field (if available, 0 if not) */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags stored in a header (if available, 0 if not) */
uint16_t (*get_flags)(const struct memory_block *m);
/*
* Stores size, extra info and flags in header of an object
* (if available, does nothing otherwise).
*/
void (*write)(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Reinitializes a header after a heap restart (if available, does
* nothing otherwise) (VG).
*/
void (*reinit)(const struct memory_block *m);
} memblock_header_ops[MAX_HEADER_TYPES] = {
[HEADER_LEGACY] = {
memblock_header_legacy_get_size,
memblock_header_legacy_get_extra,
memblock_header_legacy_get_flags,
memblock_header_legacy_write,
memblock_header_legacy_invalidate,
memblock_header_legacy_reinit,
},
[HEADER_COMPACT] = {
memblock_header_compact_get_size,
memblock_header_compact_get_extra,
memblock_header_compact_get_flags,
memblock_header_compact_write,
memblock_header_compact_invalidate,
memblock_header_compact_reinit,
},
[HEADER_NONE] = {
memblock_header_none_get_size,
memblock_header_none_get_extra,
memblock_header_none_get_flags,
memblock_header_none_write,
memblock_header_none_invalidate,
memblock_header_none_reinit,
}
};
/*
* memblock_run_default_nallocs -- returns the number of memory blocks
* available in the in a run with given parameters using the default
* fixed-bitmap algorithm
*/
static unsigned
memblock_run_default_nallocs(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment)
{
unsigned nallocs = (unsigned)
(RUN_DEFAULT_SIZE_BYTES(*size_idx) / unit_size);
while (nallocs > RUN_DEFAULT_BITMAP_NBITS) {
LOG(3, "tried to create a run (%lu) with number "
"of units (%u) exceeding the bitmap size (%u)",
unit_size, nallocs, RUN_DEFAULT_BITMAP_NBITS);
if (*size_idx > 1) {
*size_idx -= 1;
/* recalculate the number of allocations */
nallocs = (uint32_t)
(RUN_DEFAULT_SIZE_BYTES(*size_idx) / unit_size);
LOG(3, "run (%lu) was constructed with "
"fewer (%u) than requested chunks (%u)",
unit_size, *size_idx, *size_idx + 1);
} else {
LOG(3, "run (%lu) was constructed with "
"fewer units (%u) than optimal (%u), "
"this might lead to "
"inefficient memory utilization!",
unit_size,
RUN_DEFAULT_BITMAP_NBITS, nallocs);
nallocs = RUN_DEFAULT_BITMAP_NBITS;
}
}
return nallocs - (alignment ? 1 : 0);
}
/*
* memblock_run_bitmap -- calculate bitmap parameters for given arguments
*/
void
memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b)
{
ASSERTne(*size_idx, 0);
/*
* Flexible bitmaps have a variably sized values array. The size varies
* depending on:
* alignment - initial run alignment might require up-to a unit
* size idx - the larger the run, the more units it carries
* unit_size - the smaller the unit size, the more units per run
*
* The size of the bitmap also has to be calculated in such a way that
* the beginning of allocations data is cacheline aligned. This is
* required to perform many optimizations throughout the codebase.
* This alignment requirement means that some of the bitmap values might
* remain unused and will serve only as a padding for data.
*/
if (flags & CHUNK_FLAG_FLEX_BITMAP) {
/*
* First calculate the number of values without accounting for
* the bitmap size.
*/
size_t content_size = RUN_CONTENT_SIZE_BYTES(*size_idx);
b->nbits = (unsigned)(content_size / unit_size);
b->nvalues = util_div_ceil(b->nbits, RUN_BITS_PER_VALUE);
/*
* Then, align the number of values up, so that the cacheline
* alignment is preserved.
*/
b->nvalues = ALIGN_UP(b->nvalues + RUN_BASE_METADATA_VALUES, 8U)
- RUN_BASE_METADATA_VALUES;
/*
* This is the total number of bytes needed for the bitmap AND
* padding.
*/
b->size = b->nvalues * sizeof(*b->values);
/*
* Calculate the number of allocations again, but this time
* accounting for the bitmap/padding.
*/
b->nbits = (unsigned)((content_size - b->size) / unit_size)
- (alignment ? 1U : 0U);
/*
* The last step is to calculate how much of the padding
* is left at the end of the bitmap.
*/
unsigned unused_bits = (b->nvalues * RUN_BITS_PER_VALUE)
- b->nbits;
unsigned unused_values = unused_bits / RUN_BITS_PER_VALUE;
b->nvalues -= unused_values;
b->values = (uint64_t *)content;
return;
}
b->size = RUN_DEFAULT_BITMAP_SIZE;
b->nbits = memblock_run_default_nallocs(size_idx, flags,
unit_size, alignment);
unsigned unused_bits = RUN_DEFAULT_BITMAP_NBITS - b->nbits;
unsigned unused_values = unused_bits / RUN_BITS_PER_VALUE;
b->nvalues = RUN_DEFAULT_BITMAP_VALUES - unused_values;
b->values = (uint64_t *)content;
}
/*
* run_get_bitmap -- initializes run bitmap information
*/
static void
run_get_bitmap(const struct memory_block *m, struct run_bitmap *b)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
uint32_t size_idx = hdr->size_idx;
memblock_run_bitmap(&size_idx, hdr->flags, run->hdr.block_size,
run->hdr.alignment, run->content, b);
ASSERTeq(size_idx, hdr->size_idx);
}
/*
* huge_block_size -- returns the compile-time constant which defines the
* huge memory block size.
*/
static size_t
huge_block_size(const struct memory_block *m)
{
return CHUNKSIZE;
}
/*
* run_block_size -- looks for the right chunk and returns the block size
* information that is attached to the run block metadata.
*/
static size_t
run_block_size(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
return run->hdr.block_size;
}
/*
* huge_get_real_data -- returns pointer to the beginning data of a huge block
*/
static void *
huge_get_real_data(const struct memory_block *m)
{
return heap_get_chunk(m->heap, m)->data;
}
/*
* run_get_data_start -- (internal) returns the pointer to the beginning of
* allocations in a run
*/
static char *
run_get_data_start(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
struct run_bitmap b;
run_get_bitmap(m, &b);
if (hdr->flags & CHUNK_FLAG_ALIGNED) {
/*
* Alignment is property of user data in allocations. And
* since objects have headers, we need to take them into
* account when calculating the address.
*/
uintptr_t hsize = header_type_to_size[m->header_type];
uintptr_t base = (uintptr_t)run->content +
b.size + hsize;
return (char *)(ALIGN_UP(base, run->hdr.alignment) - hsize);
} else {
return (char *)&run->content + b.size;
}
}
/*
* run_get_data_offset -- (internal) returns the number of bytes between
* run base metadata and data
*/
static size_t
run_get_data_offset(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
return (size_t)run_get_data_start(m) - (size_t)&run->content;
}
/*
* run_get_real_data -- returns pointer to the beginning data of a run block
*/
static void *
run_get_real_data(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
ASSERT(run->hdr.block_size != 0);
return run_get_data_start(m) + (run->hdr.block_size * m->block_off);
}
/*
* block_get_user_data -- returns pointer to the data of a block
*/
static void *
block_get_user_data(const struct memory_block *m)
{
return (char *)m->m_ops->get_real_data(m) +
header_type_to_size[m->header_type];
}
/*
* chunk_get_chunk_hdr_value -- (internal) get value of a header for redo log
*/
static uint64_t
chunk_get_chunk_hdr_value(uint16_t type, uint16_t flags, uint32_t size_idx)
{
uint64_t val;
COMPILE_ERROR_ON(sizeof(struct chunk_header) != sizeof(uint64_t));
struct chunk_header hdr;
hdr.type = type;
hdr.flags = flags;
hdr.size_idx = size_idx;
memcpy(&val, &hdr, sizeof(val));
return val;
}
/*
* huge_prep_operation_hdr -- prepares the new value of a chunk header that will
* be set after the operation concludes.
*/
static void
huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
struct operation_context *ctx)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
/*
* Depending on the operation that needs to be performed a new chunk
* header needs to be prepared with the new chunk state.
*/
uint64_t val = chunk_get_chunk_hdr_value(
op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE,
hdr->flags,
m->size_idx);
if (ctx == NULL) {
util_atomic_store_explicit64((uint64_t *)hdr, val,
memory_order_relaxed);
pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
} else {
operation_add_entry(ctx, hdr, val, ULOG_OPERATION_SET);
}
VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1,
(hdr->size_idx - 1) * sizeof(struct chunk_header));
/*
* In the case of chunks larger than one unit the footer must be
* created immediately AFTER the persistent state is safely updated.
*/
if (m->size_idx == 1)
return;
struct chunk_header *footer = hdr + m->size_idx - 1;
VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer));
val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx);
/*
* It's only safe to write the footer AFTER the persistent part of
* the operation have been successfully processed because the footer
* pointer might point to a currently valid persistent state
* of a different chunk.
* The footer entry change is updated as transient because it will
* be recreated at heap boot regardless - it's just needed for runtime
* operations.
*/
if (ctx == NULL) {
util_atomic_store_explicit64((uint64_t *)footer, val,
memory_order_relaxed);
VALGRIND_SET_CLEAN(footer, sizeof(*footer));
} else {
operation_add_typed_entry(ctx,
footer, val, ULOG_OPERATION_SET, LOG_TRANSIENT);
}
}
/*
* run_prep_operation_hdr -- prepares the new value for a select few bytes of
* a run bitmap that will be set after the operation concludes.
*
* It's VERY important to keep in mind that the particular value of the
* bitmap this method is modifying must not be changed after this function
* is called and before the operation is processed.
*/
static void
run_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
struct operation_context *ctx)
{
ASSERT(m->size_idx <= RUN_BITS_PER_VALUE);
/*
* Free blocks are represented by clear bits and used blocks by set
* bits - which is the reverse of the commonly used scheme.
*
* Here a bit mask is prepared that flips the bits that represent the
* memory block provided by the caller - because both the size index and
* the block offset are tied 1:1 to the bitmap this operation is
* relatively simple.
*/
uint64_t bmask;
if (m->size_idx == RUN_BITS_PER_VALUE) {
ASSERTeq(m->block_off % RUN_BITS_PER_VALUE, 0);
bmask = UINT64_MAX;
} else {
bmask = ((1ULL << m->size_idx) - 1ULL) <<
(m->block_off % RUN_BITS_PER_VALUE);
}
/*
* The run bitmap is composed of several 8 byte values, so a proper
* element of the bitmap array must be selected.
*/
unsigned bpos = m->block_off / RUN_BITS_PER_VALUE;
struct run_bitmap b;
run_get_bitmap(m, &b);
/* the bit mask is applied immediately by the add entry operations */
if (op == MEMBLOCK_ALLOCATED) {
operation_add_entry(ctx, &b.values[bpos],
bmask, ULOG_OPERATION_OR);
} else if (op == MEMBLOCK_FREE) {
operation_add_entry(ctx, &b.values[bpos],
~bmask, ULOG_OPERATION_AND);
} else {
ASSERT(0);
}
}
/*
* huge_get_lock -- because huge memory blocks are always allocated from a
* single bucket there's no reason to lock them - the bucket itself is
* protected.
*/
static os_mutex_t *
huge_get_lock(const struct memory_block *m)
{
return NULL;
}
/*
* run_get_lock -- gets the runtime mutex from the heap.
*/
static os_mutex_t *
run_get_lock(const struct memory_block *m)
{
return heap_get_run_lock(m->heap, m->chunk_id);
}
/*
* huge_get_state -- returns whether a huge block is allocated or not
*/
static enum memblock_state
huge_get_state(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->type == CHUNK_TYPE_USED)
return MEMBLOCK_ALLOCATED;
if (hdr->type == CHUNK_TYPE_FREE)
return MEMBLOCK_FREE;
return MEMBLOCK_STATE_UNKNOWN;
}
/*
* huge_get_state -- returns whether a block from a run is allocated or not
*/
static enum memblock_state
run_get_state(const struct memory_block *m)
{
struct run_bitmap b;
run_get_bitmap(m, &b);
unsigned v = m->block_off / RUN_BITS_PER_VALUE;
uint64_t bitmap = b.values[v];
unsigned bit = m->block_off % RUN_BITS_PER_VALUE;
unsigned bit_last = bit + m->size_idx;
ASSERT(bit_last <= RUN_BITS_PER_VALUE);
for (unsigned i = bit; i < bit_last; ++i) {
if (!BIT_IS_CLR(bitmap, i)) {
return MEMBLOCK_ALLOCATED;
}
}
return MEMBLOCK_FREE;
}
/*
* huge_ensure_header_type -- checks the header type of a chunk and modifies
* it if necessary. This is fail-safe atomic.
*/
static void
huge_ensure_header_type(const struct memory_block *m,
enum header_type t)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_FREE);
if ((hdr->flags & header_type_to_flag[t]) == 0) {
VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
uint16_t f = ((uint16_t)header_type_to_flag[t]);
hdr->flags |= f;
pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));
}
}
/*
* run_ensure_header_type -- runs must be created with appropriate header type.
*/
static void
run_ensure_header_type(const struct memory_block *m,
enum header_type t)
{
#ifdef DEBUG
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
ASSERT((hdr->flags & header_type_to_flag[t]) == header_type_to_flag[t]);
#endif
}
/*
* block_get_real_size -- returns the size of a memory block that includes all
* of the overhead (headers)
*/
static size_t
block_get_real_size(const struct memory_block *m)
{
/*
* There are two valid ways to get a size. If the memory block
* initialized properly and the size index is set, the chunk unit size
* can be simply multiplied by that index, otherwise we need to look at
* the allocation header.
*/
if (m->size_idx != 0) {
return m->m_ops->block_size(m) * m->size_idx;
} else {
return memblock_header_ops[m->header_type].get_size(m);
}
}
/*
* block_get_user_size -- returns the size of a memory block without overheads,
* this is the size of a data block that can be used.
*/
static size_t
block_get_user_size(const struct memory_block *m)
{
return block_get_real_size(m) - header_type_to_size[m->header_type];
}
/*
* block_write_header -- writes a header of an allocation
*/
static void
block_write_header(const struct memory_block *m,
uint64_t extra_field, uint16_t flags)
{
memblock_header_ops[m->header_type].write(m,
block_get_real_size(m), extra_field, flags);
}
/*
* block_invalidate -- invalidates allocation data and header
*/
static void
block_invalidate(const struct memory_block *m)
{
void *data = m->m_ops->get_user_data(m);
size_t size = m->m_ops->get_user_size(m);
VALGRIND_SET_CLEAN(data, size);
memblock_header_ops[m->header_type].invalidate(m);
}
/*
* block_reinit_header -- reinitializes a block after a heap restart
*/
static void
block_reinit_header(const struct memory_block *m)
{
memblock_header_ops[m->header_type].reinit(m);
}
/*
* block_get_extra -- returns the extra field of an allocation
*/
static uint64_t
block_get_extra(const struct memory_block *m)
{
return memblock_header_ops[m->header_type].get_extra(m);
}
/*
* block_get_flags -- returns the flags of an allocation
*/
static uint16_t
block_get_flags(const struct memory_block *m)
{
return memblock_header_ops[m->header_type].get_flags(m);
}
/*
* heap_run_process_bitmap_value -- (internal) looks for unset bits in the
* value, creates a valid memory block out of them and inserts that
* block into the given bucket.
*/
static int
run_process_bitmap_value(const struct memory_block *m,
uint64_t value, uint32_t base_offset, object_callback cb, void *arg)
{
int ret = 0;
uint64_t shift = 0; /* already processed bits */
struct memory_block s = *m;
do {
/*
* Shift the value so that the next memory block starts on the
* least significant position:
* ..............0 (free block)
* or ..............1 (used block)
*/
uint64_t shifted = value >> shift;
/* all clear or set bits indicate the end of traversal */
if (shifted == 0) {
/*
* Insert the remaining blocks as free. Remember that
* unsigned values are always zero-filled, so we must
* take the current shift into account.
*/
s.block_off = (uint32_t)(base_offset + shift);
s.size_idx = (uint32_t)(RUN_BITS_PER_VALUE - shift);
if ((ret = cb(&s, arg)) != 0)
return ret;
break;
} else if (shifted == UINT64_MAX) {
break;
}
/*
* Offset and size of the next free block, either of these
* can be zero depending on where the free block is located
* in the value.
*/
unsigned off = (unsigned)util_lssb_index64(~shifted);
unsigned size = (unsigned)util_lssb_index64(shifted);
shift += off + size;
if (size != 0) { /* zero size means skip to the next value */
s.block_off = (uint32_t)(base_offset + (shift - size));
s.size_idx = (uint32_t)(size);
memblock_rebuild_state(m->heap, &s);
if ((ret = cb(&s, arg)) != 0)
return ret;
}
} while (shift != RUN_BITS_PER_VALUE);
return 0;
}
/*
* run_iterate_free -- iterates over free blocks in a run
*/
static int
run_iterate_free(const struct memory_block *m, object_callback cb, void *arg)
{
int ret = 0;
uint32_t block_off = 0;
struct run_bitmap b;
run_get_bitmap(m, &b);
struct memory_block nm = *m;
for (unsigned i = 0; i < b.nvalues; ++i) {
uint64_t v = b.values[i];
ASSERT((uint64_t)RUN_BITS_PER_VALUE * (uint64_t)i
<= UINT32_MAX);
block_off = RUN_BITS_PER_VALUE * i;
ret = run_process_bitmap_value(&nm, v, block_off, cb, arg);
if (ret != 0)
return ret;
}
return 0;
}
/*
* run_iterate_used -- iterates over used blocks in a run
*/
static int
run_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
uint32_t i = m->block_off / RUN_BITS_PER_VALUE;
uint32_t block_start = m->block_off % RUN_BITS_PER_VALUE;
uint32_t block_off;
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
struct memory_block iter = *m;
struct run_bitmap b;
run_get_bitmap(m, &b);
for (; i < b.nvalues; ++i) {
uint64_t v = b.values[i];
block_off = (uint32_t)(RUN_BITS_PER_VALUE * i);
for (uint32_t j = block_start; j < RUN_BITS_PER_VALUE; ) {
if (block_off + j >= (uint32_t)b.nbits)
break;
if (!BIT_IS_CLR(v, j)) {
iter.block_off = (uint32_t)(block_off + j);
/*
* The size index of this memory block cannot be
* retrieved at this time because the header
* might not be initialized in valgrind yet.
*/
iter.size_idx = 0;
if (cb(&iter, arg) != 0)
return 1;
iter.size_idx = CALC_SIZE_IDX(
run->hdr.block_size,
iter.m_ops->get_real_size(&iter));
j = (uint32_t)(j + iter.size_idx);
} else {
++j;
}
}
block_start = 0;
}
return 0;
}
/*
* huge_iterate_free -- calls cb on memory block if it's free
*/
static int
huge_iterate_free(const struct memory_block *m, object_callback cb, void *arg)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
return hdr->type == CHUNK_TYPE_FREE ? cb(m, arg) : 0;
}
/*
* huge_iterate_free -- calls cb on memory block if it's used
*/
static int
huge_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
return hdr->type == CHUNK_TYPE_USED ? cb(m, arg) : 0;
}
/*
* huge_vg_init -- initalizes chunk metadata in memcheck state
*/
static void
huge_vg_init(const struct memory_block *m, int objects,
object_callback cb, void *arg)
{
struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk *chunk = heap_get_chunk(m->heap, m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/*
* Mark unused chunk headers as not accessible.
*/
VALGRIND_DO_MAKE_MEM_NOACCESS(
&z->chunk_headers[m->chunk_id + 1],
(m->size_idx - 1) *
sizeof(struct chunk_header));
size_t size = block_get_real_size(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);
if (objects && huge_get_state(m) == MEMBLOCK_ALLOCATED) {
if (cb(m, arg) != 0)
FATAL("failed to initialize valgrind state");
}
}
/*
* run_vg_init -- initalizes run metadata in memcheck state
*/
static void
run_vg_init(const struct memory_block *m, int objects,
object_callback cb, void *arg)
{
struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/* set the run metadata as defined */
VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE);
struct run_bitmap b;
run_get_bitmap(m, &b);
/*
* Mark run data headers as defined.
*/
for (unsigned j = 1; j < m->size_idx; ++j) {
struct chunk_header *data_hdr =
&z->chunk_headers[m->chunk_id + j];
VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
sizeof(struct chunk_header));
ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA);
}
VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx));
/* set the run bitmap as defined */
VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE);
if (objects) {
if (run_iterate_used(m, cb, arg) != 0)
FATAL("failed to initialize valgrind state");
}
}
/*
* run_reinit_chunk -- run reinitialization on first zone traversal
*/
static void
run_reinit_chunk(const struct memory_block *m)
{
/* noop */
}
/*
* huge_write_footer -- (internal) writes a chunk footer
*/
static void
huge_write_footer(struct chunk_header *hdr, uint32_t size_idx)
{
if (size_idx == 1) /* that would overwrite the header */
return;
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr + size_idx - 1, sizeof(*hdr));
struct chunk_header f = *hdr;
f.type = CHUNK_TYPE_FOOTER;
f.size_idx = size_idx;
*(hdr + size_idx - 1) = f;
/* no need to persist, footers are recreated in heap_populate_buckets */
VALGRIND_SET_CLEAN(hdr + size_idx - 1, sizeof(f));
}
/*
* huge_reinit_chunk -- chunk reinitialization on first zone traversal
*/
static void
huge_reinit_chunk(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->type == CHUNK_TYPE_USED)
huge_write_footer(hdr, hdr->size_idx);
}
/*
* run_calc_free -- calculates the number of free units in a run
*/
static void
run_calc_free(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block)
{
struct run_bitmap b;
run_get_bitmap(m, &b);
for (unsigned i = 0; i < b.nvalues; ++i) {
uint64_t value = ~b.values[i];
if (value == 0)
continue;
uint32_t free_in_value = util_popcount64(value);
*free_space = *free_space + free_in_value;
/*
* If this value has less free blocks than already found max,
* there's no point in calculating.
*/
if (free_in_value < *max_free_block)
continue;
/* if the entire value is empty, no point in calculating */
if (free_in_value == RUN_BITS_PER_VALUE) {
*max_free_block = RUN_BITS_PER_VALUE;
continue;
}
/* if already at max, no point in calculating */
if (*max_free_block == RUN_BITS_PER_VALUE)
continue;
/*
* Calculate the biggest free block in the bitmap.
* This algorithm is not the most clever imaginable, but it's
* easy to implement and fast enough.
*/
uint16_t n = 0;
while (value != 0) {
value &= (value << 1ULL);
n++;
}
if (n > *max_free_block)
*max_free_block = n;
}
}
static const struct memory_block_ops mb_ops[MAX_MEMORY_BLOCK] = {
[MEMORY_BLOCK_HUGE] = {
.block_size = huge_block_size,
.prep_hdr = huge_prep_operation_hdr,
.get_lock = huge_get_lock,
.get_state = huge_get_state,
.get_user_data = block_get_user_data,
.get_real_data = huge_get_real_data,
.get_user_size = block_get_user_size,
.get_real_size = block_get_real_size,
.write_header = block_write_header,
.invalidate = block_invalidate,
.ensure_header_type = huge_ensure_header_type,
.reinit_header = block_reinit_header,
.vg_init = huge_vg_init,
.get_extra = block_get_extra,
.get_flags = block_get_flags,
.iterate_free = huge_iterate_free,
.iterate_used = huge_iterate_used,
.reinit_chunk = huge_reinit_chunk,
.calc_free = NULL,
.get_bitmap = NULL,
},
[MEMORY_BLOCK_RUN] = {
.block_size = run_block_size,
.prep_hdr = run_prep_operation_hdr,
.get_lock = run_get_lock,
.get_state = run_get_state,
.get_user_data = block_get_user_data,
.get_real_data = run_get_real_data,
.get_user_size = block_get_user_size,
.get_real_size = block_get_real_size,
.write_header = block_write_header,
.invalidate = block_invalidate,
.ensure_header_type = run_ensure_header_type,
.reinit_header = block_reinit_header,
.vg_init = run_vg_init,
.get_extra = block_get_extra,
.get_flags = block_get_flags,
.iterate_free = run_iterate_free,
.iterate_used = run_iterate_used,
.reinit_chunk = run_reinit_chunk,
.calc_free = run_calc_free,
.get_bitmap = run_get_bitmap,
}
};
/*
* memblock_huge_init -- initializes a new huge memory block
*/
struct memory_block
memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.size_idx = size_idx;
m.heap = heap;
struct chunk_header nhdr = {
.type = CHUNK_TYPE_FREE,
.flags = 0,
.size_idx = size_idx
};
struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));
VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));
*hdr = nhdr; /* write the entire header (8 bytes) at once */
pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
huge_write_footer(hdr, size_idx);
memblock_rebuild_state(heap, &m);
return m;
}
/*
* memblock_run_init -- initializes a new run memory block
*/
struct memory_block
memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment)
{
ASSERTne(size_idx, 0);
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.size_idx = size_idx;
m.heap = heap;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
struct chunk_run *run = heap_get_chunk_run(heap, &m);
size_t runsize = SIZEOF_RUN(run, size_idx);
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);
/* add/remove chunk_run and chunk_header to valgrind transaction */
VALGRIND_ADD_TO_TX(run, runsize);
run->hdr.block_size = unit_size;
run->hdr.alignment = alignment;
struct run_bitmap b;
memblock_run_bitmap(&size_idx, flags, unit_size, alignment,
run->content, &b);
size_t bitmap_size = b.size;
/* set all the bits */
memset(b.values, 0xFF, bitmap_size);
/* clear only the bits available for allocations from this bucket */
memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1));
unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE;
uint64_t last_value = UINT64_MAX << trailing_bits;
b.values[b.nvalues - 1] = last_value;
VALGRIND_REMOVE_FROM_TX(run, runsize);
pmemops_flush(&heap->p_ops, run,
sizeof(struct chunk_run_header) +
bitmap_size);
struct chunk_header run_data_hdr;
run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
run_data_hdr.flags = 0;
VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id],
sizeof(struct chunk_header) * size_idx);
struct chunk_header *data_hdr;
for (unsigned i = 1; i < size_idx; ++i) {
data_hdr = &z->chunk_headers[chunk_id + i];
VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr));
run_data_hdr.size_idx = i;
*data_hdr = run_data_hdr;
}
pmemops_persist(&heap->p_ops,
&z->chunk_headers[chunk_id + 1],
sizeof(struct chunk_header) * (size_idx - 1));
struct chunk_header *hdr = &z->chunk_headers[chunk_id];
ASSERT(hdr->type == CHUNK_TYPE_FREE);
VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));
struct chunk_header run_hdr;
run_hdr.size_idx = hdr->size_idx;
run_hdr.type = CHUNK_TYPE_RUN;
run_hdr.flags = flags;
*hdr = run_hdr;
pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id],
sizeof(struct chunk_header) * size_idx);
memblock_rebuild_state(heap, &m);
return m;
}
/*
* memblock_detect_type -- looks for the corresponding chunk header and
* depending on the chunks type returns the right memory block type
*/
static enum memory_block_type
memblock_detect_type(struct palloc_heap *heap, const struct memory_block *m)
{
enum memory_block_type ret;
switch (heap_get_chunk_hdr(heap, m)->type) {
case CHUNK_TYPE_RUN:
case CHUNK_TYPE_RUN_DATA:
ret = MEMORY_BLOCK_RUN;
break;
case CHUNK_TYPE_FREE:
case CHUNK_TYPE_USED:
case CHUNK_TYPE_FOOTER:
ret = MEMORY_BLOCK_HUGE;
break;
default:
/* unreachable */
FATAL("possible zone chunks metadata corruption");
}
return ret;
}
/*
* memblock_from_offset -- resolves a memory block data from an offset that
* originates from the heap
*/
struct memory_block
memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.heap = heap;
off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0);
m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE);
off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone);
m.chunk_id = (uint32_t)(off / CHUNKSIZE);
struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);
if (hdr->type == CHUNK_TYPE_RUN_DATA)
m.chunk_id -= hdr->size_idx;
off -= CHUNKSIZE * m.chunk_id;
m.header_type = memblock_header_type(&m);
off -= header_type_to_size[m.header_type];
m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE;
ASSERTeq(memblock_detect_type(heap, &m), m.type);
m.m_ops = &mb_ops[m.type];
uint64_t unit_size = m.m_ops->block_size(&m);
if (off != 0) { /* run */
off -= run_get_data_offset(&m);
off -= RUN_BASE_METADATA_SIZE;
m.block_off = (uint16_t)(off / unit_size);
off -= m.block_off * unit_size;
}
m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size,
memblock_header_ops[m.header_type].get_size(&m));
ASSERTeq(off, 0);
return m;
}
/*
* memblock_from_offset -- returns memory block with size
*/
struct memory_block
memblock_from_offset(struct palloc_heap *heap, uint64_t off)
{
return memblock_from_offset_opt(heap, off, 1);
}
/*
* memblock_rebuild_state -- fills in the runtime-state related fields of a
* memory block structure
*
* This function must be called on all memory blocks that were created by hand
* (as opposed to retrieved from memblock_from_offset function).
*/
void
memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m)
{
m->heap = heap;
m->header_type = memblock_header_type(m);
m->type = memblock_detect_type(heap, m);
m->m_ops = &mb_ops[m->type];
}
| 40,736 | 26.158 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmalloc.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmalloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "heap.h"
#include "lane.h"
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "palloc.h"
#include "pmalloc.h"
#include "alloc_class.h"
#include "set.h"
#include "mmap.h"
enum pmalloc_operation_type {
OPERATION_INTERNAL, /* used only for single, one-off operations */
OPERATION_EXTERNAL, /* used for everything else, incl. large redos */
MAX_OPERATION_TYPE,
};
struct lane_alloc_runtime {
struct operation_context *ctx[MAX_OPERATION_TYPE];
};
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context
*/
static struct operation_context *
pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type,
int start)
{
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = type == OPERATION_INTERNAL ?
lane->internal : lane->external;
if (start)
operation_start(ctx);
return ctx;
}
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context without starting
*/
struct operation_context *
pmalloc_operation_hold_no_start(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0);
}
/*
* pmalloc_operation_hold -- acquires allocator lane section and returns a
* pointer to its redo log
*/
struct operation_context *
pmalloc_operation_hold(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1);
}
/*
* pmalloc_operation_release -- releases allocator lane section
*/
void
pmalloc_operation_release(PMEMobjpool *pop)
{
lane_release(pop);
}
/*
* pmalloc -- allocates a new block of memory
*
* The pool offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL,
extra_field, object_flags, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmalloc_construct -- allocates a new block of memory with a constructor
*
* The block offset is written persistently into the off variable, but only
* after the constructor function has been called.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg,
extra_field, object_flags, class_id, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* prealloc -- resizes in-place a previously allocated memory block
*
* The block offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL,
extra_field, object_flags, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pfree -- deallocates a memory block previously allocated by pmalloc
*
* A zero value is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
void
pfree(PMEMobjpool *pop, uint64_t *off)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL,
0, 0, 0, ctx);
ASSERTeq(ret, 0);
pmalloc_operation_release(pop);
}
/*
* pmalloc_boot -- global runtime init routine of allocator section
*/
int
pmalloc_boot(PMEMobjpool *pop)
{
int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset,
pop->set->poolsize - pop->heap_offset, &pop->heap_size,
pop, &pop->p_ops,
pop->stats, pop->set);
if (ret)
return ret;
#if VG_MEMCHECK_ENABLED
if (On_valgrind)
palloc_heap_vg_open(&pop->heap, pop->vg_boot);
#endif
ret = palloc_buckets_init(&pop->heap);
if (ret)
palloc_heap_cleanup(&pop->heap);
return ret;
}
/*
* pmalloc_cleanup -- global cleanup routine of allocator section
*/
int
pmalloc_cleanup(PMEMobjpool *pop)
{
palloc_heap_cleanup(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(proto) -- creates a new allocation class
*/
static int
CTL_WRITE_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap);
struct pobj_alloc_class_desc *p = arg;
if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE ||
p->units_per_block <= 0) {
errno = EINVAL;
return -1;
}
if (p->alignment != 0 && p->unit_size % p->alignment != 0) {
ERR("unit size must be evenly divisible by alignment");
errno = EINVAL;
return -1;
}
if (p->alignment > (MEGABYTE * 2)) {
ERR("alignment cannot be larger than 2 megabytes");
errno = EINVAL;
return -1;
}
enum header_type lib_htype = MAX_HEADER_TYPES;
switch (p->header_type) {
case POBJ_HEADER_LEGACY:
lib_htype = HEADER_LEGACY;
break;
case POBJ_HEADER_COMPACT:
lib_htype = HEADER_COMPACT;
break;
case POBJ_HEADER_NONE:
lib_htype = HEADER_NONE;
break;
case MAX_POBJ_HEADER_TYPES:
default:
ERR("invalid header type");
errno = EINVAL;
return -1;
}
if (SLIST_EMPTY(indexes)) {
if (alloc_class_find_first_free_slot(ac, &id) != 0) {
ERR("no available free allocation class identifier");
errno = EINVAL;
return -1;
}
} else {
struct ctl_index *idx = SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
if (alloc_class_reserve(ac, id) != 0) {
ERR("attempted to overwrite an allocation class");
errno = EEXIST;
return -1;
}
}
size_t runsize_bytes =
CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) +
RUN_BASE_METADATA_SIZE);
/* aligning the buffer might require up-to to 'alignment' bytes */
if (p->alignment != 0)
runsize_bytes += p->alignment;
uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE);
if (size_idx > UINT16_MAX)
size_idx = UINT16_MAX;
struct alloc_class *c = alloc_class_new(id,
heap_alloc_classes(&pop->heap), CLASS_RUN,
lib_htype, p->unit_size, p->alignment, size_idx);
if (c == NULL) {
errno = EINVAL;
return -1;
}
if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) {
alloc_class_delete(ac, c);
return -1;
}
p->class_id = c->id;
return 0;
}
/*
* pmalloc_header_type_parser -- parses the alloc header type argument
*/
static int
pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_header_type *htype = dest;
ASSERTeq(dest_size, sizeof(enum pobj_header_type));
if (strcmp(vstr, "none") == 0) {
*htype = POBJ_HEADER_NONE;
} else if (strcmp(vstr, "compact") == 0) {
*htype = POBJ_HEADER_COMPACT;
} else if (strcmp(vstr, "legacy") == 0) {
*htype = POBJ_HEADER_LEGACY;
} else {
ERR("invalid header type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_READ_HANDLER(desc) -- reads the information about allocation class
*/
static int
CTL_READ_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct ctl_index *idx = SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
struct alloc_class *c = alloc_class_by_id(
heap_alloc_classes(&pop->heap), id);
if (c == NULL) {
ERR("class with the given id does not exist");
errno = ENOENT;
return -1;
}
enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES;
switch (c->header_type) {
case HEADER_LEGACY:
user_htype = POBJ_HEADER_LEGACY;
break;
case HEADER_COMPACT:
user_htype = POBJ_HEADER_COMPACT;
break;
case HEADER_NONE:
user_htype = POBJ_HEADER_NONE;
break;
default:
ASSERT(0); /* unreachable */
break;
}
struct pobj_alloc_class_desc *p = arg;
p->units_per_block = c->type == CLASS_HUGE ? 0 : c->run.nallocs;
p->header_type = user_htype;
p->unit_size = c->unit_size;
p->class_id = c->id;
p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->run.alignment : 0;
return 0;
}
static struct ctl_argument CTL_ARG(desc) = {
.dest_size = sizeof(struct pobj_alloc_class_desc),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
unit_size, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
alignment, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
units_per_block, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
header_type, pmalloc_header_type_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(class_id)[] = {
CTL_LEAF_RW(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(new)[] = {
CTL_LEAF_WO(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(alloc_class)[] = {
CTL_INDEXED(class_id),
CTL_INDEXED(new),
CTL_NODE_END
};
/*
* CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size
*/
static int
CTL_RUNNABLE_HANDLER(extend)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(ssize_t *)arg;
if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect size for extend, must be larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
struct palloc_heap *heap = &pop->heap;
struct bucket *defb = heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0;
heap_bucket_release(heap, defb);
return ret;
}
/*
* CTL_READ_HANDLER(granularity) -- reads the current heap grow size
*/
static int
CTL_READ_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->heap.growsize;
return 0;
}
/*
* CTL_WRITE_HANDLER(granularity) -- changes the heap grow size
*/
static int
CTL_WRITE_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect grow size, must be 0 or larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
pop->heap.growsize = (size_t)arg_in;
return 0;
}
static struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(size)[] = {
CTL_LEAF_RW(granularity),
CTL_LEAF_RUNNABLE(extend),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_CHILD(alloc_class),
CTL_CHILD(size),
CTL_NODE_END
};
/*
* pmalloc_ctl_register -- registers ctl nodes for "heap" module
*/
void
pmalloc_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, heap);
}
| 13,605 | 24.05709 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmemops.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBPMEMOBJ_PMEMOPS_H
#define LIBPMEMOBJ_PMEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*persist_fn)(void *base, const void *, size_t, unsigned);
typedef int (*flush_fn)(void *base, const void *, size_t, unsigned);
typedef void (*drain_fn)(void *base);
typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len,
unsigned flags);
typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
struct pmem_ops {
/* for 'master' replica: with or without data replication */
persist_fn persist; /* persist function */
flush_fn flush; /* flush function */
drain_fn drain; /* drain function */
memcpy_fn memcpy; /* persistent memcpy function */
memmove_fn memmove; /* persistent memmove function */
memset_fn memset; /* persistent memset function */
void *base;
struct remote_ops {
remote_read_fn read;
void *ctx;
uintptr_t base;
} remote;
};
static force_inline int
pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->persist(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xpersist(p_ops, d, s, 0);
}
static force_inline int
pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->flush(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xflush(p_ops, d, s, 0);
}
static force_inline void
pmemops_drain(const struct pmem_ops *p_ops)
{
p_ops->drain(p_ops->base);
}
static force_inline void *
pmemops_memcpy(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memcpy(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memmove(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memmove(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c,
size_t len, unsigned flags)
{
return p_ops->memset(p_ops->base, dest, c, len, flags);
}
#ifdef __cplusplus
}
#endif
#endif
| 4,125 | 29.791045 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ulog.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ulog.c -- unified log implementation
*/
#include <inttypes.h>
#include <string.h>
#include "libpmemobj.h"
#include "ulog.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* Operation flag at the three most significant bits
*/
#define ULOG_OPERATION(op) ((uint64_t)(op))
#define ULOG_OPERATION_MASK ((uint64_t)(0b111ULL << 61ULL))
#define ULOG_OPERATION_FROM_OFFSET(off) (ulog_operation_type)\
((off) & ULOG_OPERATION_MASK)
#define ULOG_OFFSET_MASK (~(ULOG_OPERATION_MASK))
#define CACHELINE_ALIGN(size) ALIGN_UP(size, CACHELINE_SIZE)
#define IS_CACHELINE_ALIGNED(ptr)\
(((uintptr_t)(ptr) & (CACHELINE_SIZE - 1)) == 0)
/*
* ulog_by_offset -- (internal) calculates the ulog pointer
*/
static struct ulog *
ulog_by_offset(size_t offset, const struct pmem_ops *p_ops)
{
if (offset == 0)
return NULL;
size_t aligned_offset = CACHELINE_ALIGN(offset);
return (struct ulog *)((char *)p_ops->base + aligned_offset);
}
/*
* ulog_next -- retrieves the pointer to the next ulog
*/
struct ulog *
ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops)
{
return ulog_by_offset(ulog->next, p_ops);
}
/*
* ulog_operation -- returns the type of entry operation
*/
ulog_operation_type
ulog_entry_type(const struct ulog_entry_base *entry)
{
return ULOG_OPERATION_FROM_OFFSET(entry->offset);
}
/*
* ulog_offset -- returns offset
*/
uint64_t
ulog_entry_offset(const struct ulog_entry_base *entry)
{
return entry->offset & ULOG_OFFSET_MASK;
}
/*
* ulog_entry_size -- returns the size of a ulog entry
*/
size_t
ulog_entry_size(const struct ulog_entry_base *entry)
{
struct ulog_entry_buf *eb;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
return sizeof(struct ulog_entry_val);
case ULOG_OPERATION_BUF_SET:
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)entry;
return CACHELINE_ALIGN(
sizeof(struct ulog_entry_buf) + eb->size);
default:
ASSERT(0);
}
return 0;
}
/*
* ulog_entry_valid -- (internal) checks if a ulog entry is valid
* Returns 1 if the range is valid, otherwise 0 is returned.
*/
static int
ulog_entry_valid(const struct ulog_entry_base *entry)
{
if (entry->offset == 0)
return 0;
size_t size;
struct ulog_entry_buf *b;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_BUF_CPY:
case ULOG_OPERATION_BUF_SET:
size = ulog_entry_size(entry);
b = (struct ulog_entry_buf *)entry;
if (!util_checksum(b, size, &b->checksum, 0, 0))
return 0;
break;
default:
break;
}
return 1;
}
/*
* ulog_construct -- initializes the ulog structure
*/
void
ulog_construct(uint64_t offset, size_t capacity, int flush,
const struct pmem_ops *p_ops)
{
struct ulog *ulog = ulog_by_offset(offset, p_ops);
VALGRIND_ADD_TO_TX(ulog, SIZEOF_ULOG(capacity));
ulog->capacity = capacity;
ulog->checksum = 0;
ulog->next = 0;
memset(ulog->unused, 0, sizeof(ulog->unused));
if (flush) {
pmemops_xflush(p_ops, ulog, sizeof(*ulog),
PMEMOBJ_F_RELAXED);
pmemops_memset(p_ops, ulog->data, 0, capacity,
PMEMOBJ_F_MEM_NONTEMPORAL |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
} else {
/*
* We want to avoid replicating zeroes for every ulog of every
* lane, to do that, we need to use plain old memset.
*/
memset(ulog->data, 0, capacity);
}
VALGRIND_REMOVE_FROM_TX(ulog, SIZEOF_ULOG(capacity));
}
/*
* ulog_foreach_entry -- iterates over every existing entry in the ulog
*/
int
ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops)
{
struct ulog_entry_base *e;
int ret = 0;
for (struct ulog *r = ulog; r != NULL; r = ulog_next(r, ops)) {
for (size_t offset = 0; offset < r->capacity; ) {
e = (struct ulog_entry_base *)(r->data + offset);
if (!ulog_entry_valid(e))
return ret;
if ((ret = cb(e, arg, ops)) != 0)
return ret;
offset += ulog_entry_size(e);
}
}
return ret;
}
/*
* ulog_capacity -- (internal) returns the total capacity of the ulog
*/
size_t
ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops)
{
size_t capacity = ulog_base_bytes;
/* skip the first one, we count it in 'ulog_base_bytes' */
while ((ulog = ulog_next(ulog, p_ops)) != NULL) {
capacity += ulog->capacity;
}
return capacity;
}
/*
* ulog_rebuild_next_vec -- rebuilds the vector of next entries
*/
void
ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
do {
if (ulog->next != 0)
VEC_PUSH_BACK(next, ulog->next);
} while ((ulog = ulog_next(ulog, p_ops)) != NULL);
}
/*
* ulog_reserve -- reserves new capacity in the ulog
*/
int
ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t *new_capacity, ulog_extend_fn extend,
struct ulog_next *next,
const struct pmem_ops *p_ops)
{
size_t capacity = ulog_base_nbytes;
uint64_t offset;
VEC_FOREACH(offset, next) {
ulog = ulog_by_offset(offset, p_ops);
capacity += ulog->capacity;
}
while (capacity < *new_capacity) {
if (extend(p_ops->base, &ulog->next) != 0)
return -1;
VEC_PUSH_BACK(next, ulog->next);
ulog = ulog_next(ulog, p_ops);
capacity += ulog->capacity;
}
*new_capacity = capacity;
return 0;
}
/*
* ulog_checksum -- (internal) calculates ulog checksum
*/
static int
ulog_checksum(struct ulog *ulog, size_t ulog_base_bytes, int insert)
{
return util_checksum(ulog, SIZEOF_ULOG(ulog_base_bytes),
&ulog->checksum, insert, 0);
}
/*
* ulog_store -- stores the transient src ulog in the
* persistent dest ulog
*
* The source and destination ulogs must be cacheline aligned.
*/
void
ulog_store(struct ulog *dest, struct ulog *src, size_t nbytes,
size_t ulog_base_nbytes, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
/*
* First, store all entries over the base capacity of the ulog in
* the next logs.
* Because the checksum is only in the first part, we don't have to
* worry about failsafety here.
*/
struct ulog *ulog = dest;
size_t offset = ulog_base_nbytes;
/*
* Copy at least 8 bytes more than needed. If the user always
* properly uses entry creation functions, this will zero-out the
* potential leftovers of the previous log. Since all we really need
* to zero is the offset, sizeof(struct redo_log_entry_base) is enough.
* If the nbytes is aligned, an entire cacheline needs to be addtionally
* zeroed.
* But the checksum must be calculated based solely on actual data.
*/
size_t checksum_nbytes = MIN(ulog_base_nbytes, nbytes);
nbytes = CACHELINE_ALIGN(nbytes + sizeof(struct ulog_entry_base));
size_t base_nbytes = MIN(ulog_base_nbytes, nbytes);
size_t next_nbytes = nbytes - base_nbytes;
size_t nlog = 0;
while (next_nbytes > 0) {
ulog = ulog_by_offset(VEC_ARR(next)[nlog++], p_ops);
ASSERTne(ulog, NULL);
size_t copy_nbytes = MIN(next_nbytes, ulog->capacity);
next_nbytes -= copy_nbytes;
ASSERT(IS_CACHELINE_ALIGNED(ulog->data));
VALGRIND_ADD_TO_TX(ulog->data, copy_nbytes);
pmemops_memcpy(p_ops,
ulog->data,
src->data + offset,
copy_nbytes,
PMEMOBJ_F_MEM_WC |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
VALGRIND_REMOVE_FROM_TX(ulog->data, copy_nbytes);
offset += copy_nbytes;
}
if (nlog != 0)
pmemops_drain(p_ops);
/*
* Then, calculate the checksum and store the first part of the
* ulog.
*/
src->next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
ulog_checksum(src, checksum_nbytes, 1);
pmemops_memcpy(p_ops, dest, src,
SIZEOF_ULOG(base_nbytes),
PMEMOBJ_F_MEM_WC);
}
/*
* ulog_entry_val_create -- creates a new log value entry in the ulog
*
* This function requires at least a cacheline of space to be available in the
* ulog.
*/
struct ulog_entry_val *
ulog_entry_val_create(struct ulog *ulog, size_t offset, uint64_t *dest,
uint64_t value, ulog_operation_type type,
const struct pmem_ops *p_ops)
{
struct ulog_entry_val *e =
(struct ulog_entry_val *)(ulog->data + offset);
struct {
struct ulog_entry_val v;
struct ulog_entry_base zeroes;
} data;
COMPILE_ERROR_ON(sizeof(data) != sizeof(data.v) + sizeof(data.zeroes));
/*
* Write a little bit more to the buffer so that the next entry that
* resides in the log is erased. This will prevent leftovers from
* a previous, clobbered, log from being incorrectly applied.
*/
data.zeroes.offset = 0;
data.v.base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
data.v.base.offset |= ULOG_OPERATION(type);
data.v.value = value;
pmemops_memcpy(p_ops, e, &data, sizeof(data),
PMEMOBJ_F_MEM_NOFLUSH | PMEMOBJ_F_RELAXED);
return e;
}
/*
* ulog_entry_buf_create -- atomically creates a buffer entry in the log
*/
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t *dest,
const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops)
{
struct ulog_entry_buf *e =
(struct ulog_entry_buf *)(ulog->data + offset);
//printf("change this\n");
/*
* Depending on the size of the source buffer, we might need to perform
* up to three separate copies:
* 1. The first cacheline, 24b of metadata and 40b of data
* If there's still data to be logged:
* 2. The entire remainder of data data aligned down to cacheline,
* for example, if there's 150b left, this step will copy only
* 128b.
* Now, we are left with between 0 to 63 bytes. If nonzero:
* 3. Create a stack allocated cacheline-sized buffer, fill in the
* remainder of the data, and copy the entire cacheline.
*
* This is done so that we avoid a cache-miss on misaligned writes.
*/
struct ulog_entry_buf *b = alloca(CACHELINE_SIZE);
b->base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
b->base.offset |= ULOG_OPERATION(type);
b->size = size;
b->checksum = 0;
size_t bdatasize = CACHELINE_SIZE - sizeof(struct ulog_entry_buf);
size_t ncopy = MIN(size, bdatasize);
memcpy(b->data, src, ncopy);
memset(b->data + ncopy, 0, bdatasize - ncopy);
size_t remaining_size = ncopy > size ? 0 : size - ncopy;
char *srcof = (char *)src + ncopy;
size_t rcopy = ALIGN_DOWN(remaining_size, CACHELINE_SIZE);
size_t lcopy = remaining_size - rcopy;
uint8_t last_cacheline[CACHELINE_SIZE];
if (lcopy != 0) {
memcpy(last_cacheline, srcof + rcopy, lcopy);
memset(last_cacheline + lcopy, 0, CACHELINE_SIZE - lcopy);
}
if (rcopy != 0) {
void *dest = e->data + ncopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, rcopy);
pmemops_memcpy(p_ops, dest, srcof, rcopy,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, rcopy);
}
if (lcopy != 0) {
void *dest = e->data + ncopy + rcopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, CACHELINE_SIZE);
pmemops_memcpy(p_ops, dest, last_cacheline, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, CACHELINE_SIZE);
}
b->checksum = util_checksum_seq(b, CACHELINE_SIZE, 0);
if (rcopy != 0)
b->checksum = util_checksum_seq(srcof, rcopy, b->checksum);
if (lcopy != 0)
b->checksum = util_checksum_seq(last_cacheline,
CACHELINE_SIZE, b->checksum);
ASSERT(IS_CACHELINE_ALIGNED(e));
VALGRIND_ADD_TO_TX(e, CACHELINE_SIZE);
pmemops_memcpy(p_ops, e, b, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, CACHELINE_SIZE);
pmemops_drain(p_ops);
ASSERT(ulog_entry_valid(&e->base));
return e;
}
/*
* ulog_entry_apply -- applies modifications of a single ulog entry
*/
void
ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops)
{
ulog_operation_type t = ulog_entry_type(e);
uint64_t offset = ulog_entry_offset(e);
size_t dst_size = sizeof(uint64_t);
uint64_t *dst = (uint64_t *)((uintptr_t)p_ops->base + offset);
struct ulog_entry_val *ev;
struct ulog_entry_buf *eb;
flush_fn f = persist ? p_ops->persist : p_ops->flush;
switch (t) {
case ULOG_OPERATION_AND:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst &= ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_OR:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst |= ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst = ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memset(p_ops, dst, *eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
break;
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memcpy(p_ops, dst, eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
break;
default:
ASSERT(0);
}
VALGRIND_REMOVE_FROM_TX(dst, dst_size);
}
/*
* ulog_process_entry -- (internal) processes a single ulog entry
*/
static int
ulog_process_entry(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
ulog_entry_apply(e, 0, p_ops);
return 0;
}
/*
* ulog_clobber -- zeroes the metadata of the ulog
*/
void
ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
struct ulog empty;
memset(&empty, 0, sizeof(empty));
if (next != NULL)
empty.next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
else
empty.next = dest->next;
pmemops_memcpy(p_ops, dest, &empty, sizeof(empty),
PMEMOBJ_F_MEM_WC);
}
/*
* ulog_clobber_data -- zeroes out 'nbytes' of data in the logs
*/
void
ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops)
{
size_t rcapacity = ulog_base_nbytes;
size_t nlog = 0;
ASSERTne(dest, NULL);
for (struct ulog *r = dest; r != NULL; ) {
size_t nzero = MIN(nbytes, rcapacity);
VALGRIND_ADD_TO_TX(r->data, nzero);
pmemops_memset(p_ops, r->data, 0, nzero, PMEMOBJ_F_MEM_WC);
VALGRIND_ADD_TO_TX(r->data, nzero);
nbytes -= nzero;
if (nbytes == 0)
break;
r = ulog_by_offset(VEC_ARR(next)[nlog++], p_ops);
if (nlog > 1)
break;
ASSERTne(r, NULL);
rcapacity = r->capacity;
}
/*
* To make sure that transaction logs do not occupy too much of space,
* all of them, expect for the first one, are freed at the end of
* the operation. The reasoning for this is that pmalloc() is
* a relatively cheap operation for transactions where many hundreds of
* kilobytes are being snapshot, and so, allocating and freeing the
* buffer for each transaction is an acceptable overhead for the average
* case.
*/
struct ulog *u = ulog_by_offset(dest->next, p_ops);
if (u == NULL)
return;
VEC(, uint64_t *) logs_past_first;
VEC_INIT(&logs_past_first);
size_t next_offset;
while (u != NULL && ((next_offset = u->next) != 0)) {
if (VEC_PUSH_BACK(&logs_past_first, &u->next) != 0) {
/* this is fine, it will just use more pmem */
LOG(1, "unable to free transaction logs memory");
goto out;
}
u = ulog_by_offset(u->next, p_ops);
}
uint64_t *ulog_ptr;
VEC_FOREACH_REVERSE(ulog_ptr, &logs_past_first) {
ulog_free(p_ops->base, ulog_ptr);
}
out:
VEC_DELETE(&logs_past_first);
}
/*
* ulog_process -- process ulog entries
*/
void
ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
#ifdef DEBUG
if (check)
ulog_check(ulog, check, p_ops);
#endif
ulog_foreach_entry(ulog, ulog_process_entry, NULL, p_ops);
}
/*
* ulog_base_nbytes -- (internal) counts the actual of number of bytes
* occupied by the ulog
*/
size_t
ulog_base_nbytes(struct ulog *ulog)
{
size_t offset = 0;
struct ulog_entry_base *e;
for (offset = 0; offset < ulog->capacity; ) {
e = (struct ulog_entry_base *)(ulog->data + offset);
if (!ulog_entry_valid(e))
break;
offset += ulog_entry_size(e);
}
return offset;
}
/*
* ulog_recovery_needed -- checks if the logs needs recovery
*/
int
ulog_recovery_needed(struct ulog *ulog, int verify_checksum)
{
size_t nbytes = MIN(ulog_base_nbytes(ulog), ulog->capacity);
if (nbytes == 0)
return 0;
if (verify_checksum && !ulog_checksum(ulog, nbytes, 0))
return 0;
return 1;
}
/*
* ulog_recover -- recovery of ulog
*
* The ulog_recover shall be preceded by ulog_check call.
*/
void
ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
if (ulog_recovery_needed(ulog, 1)) {
ulog_process(ulog, check, p_ops);
ulog_clobber(ulog, NULL, p_ops);
}
}
/*
* ulog_check_entry --
* (internal) checks consistency of a single ulog entry
*/
static int
ulog_check_entry(struct ulog_entry_base *e,
void *arg, const struct pmem_ops *p_ops)
{
uint64_t offset = ulog_entry_offset(e);
ulog_check_offset_fn check = arg;
if (!check(p_ops->base, offset)) {
LOG(15, "ulog %p invalid offset %" PRIu64,
e, e->offset);
return -1;
}
return offset == 0 ? -1 : 0;
}
/*
* ulog_check -- (internal) check consistency of ulog entries
*/
int
ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
return ulog_foreach_entry(ulog,
ulog_check_entry, check, p_ops);
}
| 19,076 | 24.538153 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/sync.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sync.h -- internal to obj synchronization API
*/
#ifndef LIBPMEMOBJ_SYNC_H
#define LIBPMEMOBJ_SYNC_H 1
#include <errno.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "out.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* internal definitions of PMEM-locks
*/
typedef union padded_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_mutex_t mutex;
struct {
void *bsd_mutex_p;
union padded_pmemmutex *next;
} bsd_u;
} mutex_u;
} pmemmutex;
} PMEMmutex_internal;
#define PMEMmutex_lock pmemmutex.mutex_u.mutex
#define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p
#define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next
typedef union padded_pmemrwlock {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_rwlock_t rwlock;
struct {
void *bsd_rwlock_p;
union padded_pmemrwlock *next;
} bsd_u;
} rwlock_u;
} pmemrwlock;
} PMEMrwlock_internal;
#define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock
#define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p
#define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next
typedef union padded_pmemcond {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_cond_t cond;
struct {
void *bsd_cond_p;
union padded_pmemcond *next;
} bsd_u;
} cond_u;
} pmemcond;
} PMEMcond_internal;
#define PMEMcond_cond pmemcond.cond_u.cond
#define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p
#define PMEMcond_next pmemcond.cond_u.bsd_u.next
/*
* pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never
* fails from caller perspective. If pmemobj_mutex_lock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_lock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_lock");
}
}
/*
* pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never
* fails from caller perspective. If pmemobj_mutex_unlock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_unlock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_unlock");
}
}
int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp);
#ifdef __cplusplus
}
#endif
#endif
| 4,019 | 27.309859 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/sync.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sync.c -- persistent memory resident synchronization primitives
*/
#include <inttypes.h>
#include "obj.h"
#include "out.h"
#include "util.h"
#include "sync.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __FreeBSD__
#define RECORD_LOCK(init, type, p) \
if (init) {\
PMEM##type##_internal *head = pop->type##_head;\
while (!util_bool_compare_and_swap64(&pop->type##_head, head,\
p)) {\
head = pop->type##_head;\
}\
p->PMEM##type##_next = head;\
}
#else
#define RECORD_LOCK(init, type, p)
#endif
/*
* _get_value -- (internal) atomically initialize and return a value.
* Returns -1 on error, 0 if the caller is not the value
* initializer, 1 if the caller is the value initializer.
*/
static int
_get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg,
int (*init_value)(void *value, void *arg))
{
uint64_t tmp_runid;
int initializer = 0;
while ((tmp_runid = *runid) != pop_runid) {
if (tmp_runid == pop_runid - 1)
continue;
if (!util_bool_compare_and_swap64(runid, tmp_runid,
pop_runid - 1))
continue;
initializer = 1;
if (init_value(value, arg)) {
ERR("error initializing lock");
util_fetch_and_and64(runid, 0);
return -1;
}
if (util_bool_compare_and_swap64(runid, pop_runid - 1,
pop_runid) == 0) {
ERR("error setting lock runid");
return -1;
}
}
return initializer;
}
/*
* get_mutex -- (internal) atomically initialize, record and return a mutex
*/
static inline os_mutex_t *
get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp)
{
if (likely(imp->pmemmutex.runid == pop->run_id))
return &imp->PMEMmutex_lock;
volatile uint64_t *runid = &imp->pmemmutex.runid;
LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64,
imp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t));
VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock,
NULL, (void *)os_mutex_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, mutex, imp);
return &imp->PMEMmutex_lock;
}
/*
* get_rwlock -- (internal) atomically initialize, record and return a rwlock
*/
static inline os_rwlock_t *
get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp)
{
if (likely(irp->pmemrwlock.runid == pop->run_id))
return &irp->PMEMrwlock_lock;
volatile uint64_t *runid = &irp->pmemrwlock.runid;
LOG(5, "PMEMrwlock %p pop->run_id %"\
PRIu64 " pmemrwlock.runid %" PRIu64,
irp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal));
COMPILE_ERROR_ON(util_alignof(PMEMrwlock)
!= util_alignof(os_rwlock_t));
VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock,
NULL, (void *)os_rwlock_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, rwlock, irp);
return &irp->PMEMrwlock_lock;
}
/*
* get_cond -- (internal) atomically initialize, record and return a
* condition variable
*/
static inline os_cond_t *
get_cond(PMEMobjpool *pop, PMEMcond_internal *icp)
{
if (likely(icp->pmemcond.runid == pop->run_id))
return &icp->PMEMcond_cond;
volatile uint64_t *runid = &icp->pmemcond.runid;
LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64,
icp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal));
COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t));
VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond,
NULL, (void *)os_cond_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, cond, icp);
return &icp->PMEMcond_cond;
}
/*
* pmemobj_mutex_zero -- zero-initialize a pmem resident mutex
*
* This function is not MT safe.
*/
void
pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
mutexip->pmemmutex.runid = 0;
pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid,
sizeof(mutexip->pmemmutex.runid));
}
/*
* pmemobj_mutex_lock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_lock(mutex);
}
/*
* pmemobj_mutex_assert_locked -- checks whether mutex is locked.
*
* Returns 0 when mutex is locked.
*/
int
pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
int ret = os_mutex_trylock(mutex);
if (ret == EBUSY)
return 0;
if (ret == 0) {
util_mutex_unlock(mutex);
/*
* There's no good error code for this case. EINVAL is used for
* something else here.
*/
return ENODEV;
}
return ret;
}
/*
* pmemobj_mutex_timedlock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_timedlock(mutex, abs_timeout);
}
/*
* pmemobj_mutex_trylock -- trylock a pmem resident mutex
*
* Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_trylock(mutex);
}
/*
* pmemobj_mutex_unlock -- unlock a pmem resident mutex
*/
int
pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
/* XXX potential performance improvement - move GET to debug version */
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_unlock(mutex);
}
/*
* pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock
*
* This function is not MT safe.
*/
void
pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
rwlockip->pmemrwlock.runid = 0;
pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid,
sizeof(rwlockip->pmemrwlock.runid));
}
/*
* pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex
*
* Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_rdlock(rwlock);
}
/*
* pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex
*
* Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_wrlock(rwlock);
}
/*
* pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex
*
* Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedrdlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex
*
* Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedwrlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex
*
* Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_tryrdlock(rwlock);
}
/*
* pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex
*
* Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_trywrlock(rwlock);
}
/*
* pmemobj_rwlock_unlock -- unlock a pmem resident rwlock
*/
int
pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
/* XXX potential performance improvement - move GET to debug version */
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_unlock(rwlock);
}
/*
* pmemobj_cond_zero -- zero-initialize a pmem resident condition variable
*
* This function is not MT safe.
*/
void
pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
condip->pmemcond.runid = 0;
pmemops_persist(&pop->p_ops, &condip->pmemcond.runid,
sizeof(condip->pmemcond.runid));
}
/*
* pmemobj_cond_broadcast -- broadcast a pmem resident condition variable
*
* Atomically initializes and broadcast a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_broadcast(cond);
}
/*
* pmemobj_cond_signal -- signal a pmem resident condition variable
*
* Atomically initializes and signal a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_signal(cond);
}
/*
* pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable
*
* Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp,
mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_timedwait(cond, mutex, abs_timeout);
}
/*
* pmemobj_cond_wait -- wait on a pmem resident condition variable
*
* Atomically initializes and wait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
PMEMmutex *__restrict mutexp)
{
LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_wait(cond, mutex);
}
/*
* pmemobj_volatile -- atomically initialize, record and return a
* generic value
*/
void *
pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
void *ptr, size_t size,
int (*constr)(void *ptr, void *arg), void *arg)
{
LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr,
constr, arg);
if (likely(vlt->runid == pop->run_id))
return ptr;
VALGRIND_REMOVE_PMEM_MAPPING(ptr, size);
VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt));
if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) {
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
return NULL;
}
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
VALGRIND_SET_CLEAN(vlt, sizeof(*vlt));
return ptr;
}
| 18,016 | 25.811012 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/lane.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* lane.h -- internal definitions for lanes
*/
#ifndef LIBPMEMOBJ_LANE_H
#define LIBPMEMOBJ_LANE_H 1
#include <stdint.h>
#include "ulog.h"
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Distance between lanes used by threads required to prevent threads from
* false sharing part of lanes array. Used if properly spread lanes are
* available. Otherwise less spread out lanes would be used.
*/
#define LANE_JUMP (64 / sizeof(uint64_t))
/*
* Number of times the algorithm will try to reacquire the primary lane for the
* thread. If this threshold is exceeded, a new primary lane is selected for the
* thread.
*/
#define LANE_PRIMARY_ATTEMPTS 128
#define RLANE_DEFAULT 0
#define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */
/*
* We have 3 kilobytes to distribute.
* The smallest capacity is needed for the internal redo log for which we can
* accurately calculate the maximum number of occupied space: 48 bytes,
* 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap
* AND, third for modification of the destination pointer. For future needs,
* this has been bumped up to 12 ulog entries.
*
* The remaining part has to be split between transactional redo and undo logs,
* and since by far the most space consuming operations are transactional
* snapshots, most of the space, 2 kilobytes, is assigned to the undo log.
* After that, the remainder, 640 bytes, or 40 ulog entries, is left for the
* transactional redo logs.
* Thanks to this distribution, all small and medium transactions should be
* entirely performed without allocating any additional metadata.
*/
#define LANE_UNDO_SIZE 2048
#define LANE_REDO_EXTERNAL_SIZE 640
#define LANE_REDO_INTERNAL_SIZE 192
struct lane_layout {
/*
* Redo log for self-contained and 'one-shot' allocator operations.
* Cannot be extended.
*/
struct ULOG(LANE_REDO_INTERNAL_SIZE) internal;
/*
* Redo log for large operations/transactions.
* Can be extended by the use of internal ulog.
*/
struct ULOG(LANE_REDO_EXTERNAL_SIZE) external;
/*
* Undo log for snapshots done in a transaction.
* Can be extended/shrunk by the use of internal ulog.
*/
struct ULOG(LANE_UNDO_SIZE) undo;
};
struct lane {
struct lane_layout *layout; /* pointer to persistent layout */
struct operation_context *internal; /* context for internal ulog */
struct operation_context *external; /* context for external ulog */
struct operation_context *undo; /* context for undo ulog */
};
struct lane_descriptor {
/*
* Number of lanes available at runtime must be <= total number of lanes
* available in the pool. Number of lanes can be limited by shortage of
* other resources e.g. available RNIC's submission queue sizes.
*/
unsigned runtime_nlanes;
unsigned next_lane_idx;
uint64_t *lane_locks;
struct lane *lane;
};
typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length);
typedef void *(*section_constr)(PMEMobjpool *pop, void *data);
typedef void (*section_destr)(PMEMobjpool *pop, void *rt);
typedef int (*section_global_op)(PMEMobjpool *pop);
struct section_operations {
section_constr construct_rt;
section_destr destroy_rt;
section_layout_op check;
section_layout_op recover;
section_global_op boot;
section_global_op cleanup;
};
struct lane_info {
uint64_t pop_uuid_lo;
uint64_t lane_idx;
unsigned long nest_count;
/*
* The index of the primary lane for the thread. A thread will always
* try to acquire the primary lane first, and only if that fails it will
* look for a different available lane.
*/
uint64_t primary;
int primary_attempts;
struct lane_info *prev, *next;
};
void lane_info_boot(void);
void lane_info_destroy(void);
void lane_init_data(PMEMobjpool *pop);
int lane_boot(PMEMobjpool *pop);
void lane_cleanup(PMEMobjpool *pop);
int lane_recover_and_section_boot(PMEMobjpool *pop);
int lane_section_cleanup(PMEMobjpool *pop);
int lane_check(PMEMobjpool *pop);
unsigned lane_hold(PMEMobjpool *pop, struct lane **lane);
void lane_release(PMEMobjpool *pop);
void lane_attach(PMEMobjpool *pop, unsigned lane);
unsigned lane_detach(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 5,804 | 32.554913 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/bucket.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bucket.h -- internal definitions for bucket
*/
#ifndef LIBPMEMOBJ_BUCKET_H
#define LIBPMEMOBJ_BUCKET_H 1
#include <stddef.h>
#include <stdint.h>
#include "container.h"
#include "memblock.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CALC_SIZE_IDX(_unit_size, _size)\
((_size) == 0 ? 0 : (uint32_t)((((_size) - 1) / (_unit_size)) + 1))
struct bucket {
os_mutex_t lock;
struct alloc_class *aclass;
struct block_container *container;
struct block_container_ops *c_ops;
struct memory_block_reserved *active_memory_block;
int is_active;
};
struct bucket *bucket_new(struct block_container *c,
struct alloc_class *aclass);
int *bucket_current_resvp(struct bucket *b);
int bucket_insert_block(struct bucket *b, const struct memory_block *m);
void bucket_delete(struct bucket *b);
#ifdef __cplusplus
}
#endif
#endif
| 2,466 | 29.8375 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ulog.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ulog.h -- unified log public interface
*/
#ifndef LIBPMEMOBJ_ULOG_H
#define LIBPMEMOBJ_ULOG_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
struct ulog_entry_base {
uint64_t offset; /* offset with operation type flag */
};
/*
* ulog_entry_val -- log entry
*/
struct ulog_entry_val {
struct ulog_entry_base base;
uint64_t value; /* value to be applied */
};
/*
* ulog_entry_buf - ulog buffer entry
*/
struct ulog_entry_buf {
struct ulog_entry_base base; /* offset with operation type flag */
uint64_t checksum; /* checksum of the entire log entry */
uint64_t size; /* size of the buffer to be modified */
uint8_t data[]; /* content to fill in */
};
/*
* This structure *must* be located at a cacheline boundry. To achieve this,
* the next field is always allocated with extra padding, and then the offset
* is additionally aligned.
*/
#define ULOG(capacity_bytes) {\
/* 64 bytes of metadata */\
uint64_t checksum; /* checksum of ulog header and its entries */\
uint64_t next; /* offset of ulog extension */\
uint64_t capacity; /* capacity of this ulog in bytes */\
uint64_t unused[5]; /* must be 0 */\
uint8_t data[capacity_bytes]; /* N bytes of data */\
}\
#define SIZEOF_ULOG(base_capacity)\
(sizeof(struct ulog) + base_capacity)
/* use this for allocations of aligned ulog extensions */
#define SIZEOF_ALIGNED_ULOG(base_capacity)\
(SIZEOF_ULOG(base_capacity) + CACHELINE_SIZE)
struct ulog ULOG(0);
VEC(ulog_next, uint64_t);
typedef uint64_t ulog_operation_type;
#define ULOG_OPERATION_SET (0b000ULL << 61ULL)
#define ULOG_OPERATION_AND (0b001ULL << 61ULL)
#define ULOG_OPERATION_OR (0b010ULL << 61ULL)
#define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL)
#define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL)
#define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR)
typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset);
typedef int (*ulog_extend_fn)(void *, uint64_t *);
typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops);
typedef void (*ulog_free_fn)(void *base, uint64_t *next);
struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops);
void ulog_construct(uint64_t offset, size_t capacity, int flush,
const struct pmem_ops *p_ops);
size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops);
void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops);
int ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t *new_capacity_bytes,
ulog_extend_fn extend, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_store(struct ulog *dest,
struct ulog *src, size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, const struct pmem_ops *p_ops);
void ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops);
void ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
size_t ulog_base_nbytes(struct ulog *ulog);
int ulog_recovery_needed(struct ulog *ulog, int verify_checksum);
uint64_t ulog_entry_offset(const struct ulog_entry_base *entry);
ulog_operation_type ulog_entry_type(
const struct ulog_entry_base *entry);
struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog,
size_t offset, uint64_t *dest, uint64_t value,
ulog_operation_type type,
const struct pmem_ops *p_ops);
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops);
void ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops);
size_t ulog_entry_size(const struct ulog_entry_base *entry);
void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
int ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
#endif
| 5,859 | 33.674556 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/libpmemobj.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemobj.c -- pmem entry points for libpmemobj
*/
#include "pmemcommon.h"
#include "obj.h"
/*
* libpmemobj_init -- load-time initialization for obj
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemobj_init(void)
{
common_init(PMEMOBJ_LOG_PREFIX, PMEMOBJ_LOG_LEVEL_VAR,
PMEMOBJ_LOG_FILE_VAR, PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION);
LOG(3, NULL);
obj_init();
}
/*
* libpmemobj_fini -- libpmemobj cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemobj_fini(void)
{
LOG(3, NULL);
obj_fini();
common_fini();
}
/*
* pmemobj_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMOBJ_MAJOR_VERSION) {
ERR("libpmemobj major version mismatch (need %u, found %u)",
major_required, PMEMOBJ_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMOBJ_MINOR_VERSION) {
ERR("libpmemobj minor version mismatch (need %u, found %u)",
minor_required, PMEMOBJ_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemobj_check_version -- see if lib meets application version requirements
*/
const char *
pmemobj_check_version(unsigned major_required, unsigned minor_required)
{
return pmemobj_check_versionU(major_required, minor_required);
}
#else
/*
* pmemobj_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemobj_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemobj_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemobj_set_funcs -- allow overriding libpmemobj's call to malloc, etc.
*/
void
pmemobj_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemobj_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemobj_errormsg -- return last error message
*/
const char *
pmemobj_errormsg(void)
{
return pmemobj_errormsgU();
}
#else
/*
* pmemobj_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemobj_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,294 | 24.873494 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/cuckoo.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cuckoo.c -- implementation of cuckoo hash table
*/
#include <stdint.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include "cuckoo.h"
#include "out.h"
#define MAX_HASH_FUNCS 2
#define GROWTH_FACTOR 1.2f
#define INITIAL_SIZE 8
#define MAX_INSERTS 8
#define MAX_GROWS 32
struct cuckoo_slot {
uint64_t key;
void *value;
};
struct cuckoo {
size_t size; /* number of hash table slots */
struct cuckoo_slot *tab;
};
static const struct cuckoo_slot null_slot = {0, NULL};
/*
* hash_mod -- (internal) first hash function
*/
static size_t
hash_mod(struct cuckoo *c, uint64_t key)
{
return key % c->size;
}
/*
* hash_mixer -- (internal) second hash function
*
* Based on Austin Appleby MurmurHash3 64-bit finalizer.
*/
static size_t
hash_mixer(struct cuckoo *c, uint64_t key)
{
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
return key % c->size;
}
static size_t
(*hash_funcs[MAX_HASH_FUNCS])(struct cuckoo *c, uint64_t key) = {
hash_mod,
hash_mixer
};
/*
* cuckoo_new -- allocates and initializes cuckoo hash table
*/
struct cuckoo *
cuckoo_new(void)
{
COMPILE_ERROR_ON((size_t)(INITIAL_SIZE * GROWTH_FACTOR)
== INITIAL_SIZE);
struct cuckoo *c = Malloc(sizeof(struct cuckoo));
if (c == NULL) {
ERR("!Malloc");
goto error_cuckoo_malloc;
}
c->size = INITIAL_SIZE;
size_t tab_rawsize = c->size * sizeof(struct cuckoo_slot);
c->tab = Zalloc(tab_rawsize);
if (c->tab == NULL)
goto error_tab_malloc;
return c;
error_tab_malloc:
Free(c);
error_cuckoo_malloc:
return NULL;
}
/*
* cuckoo_delete -- cleanups and deallocates cuckoo hash table
*/
void
cuckoo_delete(struct cuckoo *c)
{
ASSERTne(c, NULL);
Free(c->tab);
Free(c);
}
/*
* cuckoo_insert_try -- (internal) try inserting into the existing hash table
*/
static int
cuckoo_insert_try(struct cuckoo *c, struct cuckoo_slot *src)
{
struct cuckoo_slot srct;
size_t h[MAX_HASH_FUNCS] = {0};
for (int n = 0; n < MAX_INSERTS; ++n) {
for (int i = 0; i < MAX_HASH_FUNCS; ++i) {
h[i] = hash_funcs[i](c, src->key);
if (c->tab[h[i]].value == NULL) {
c->tab[h[i]] = *src;
return 0;
} else if (c->tab[h[i]].key == src->key) {
return EINVAL;
}
}
srct = c->tab[h[0]];
c->tab[h[0]] = *src;
src->key = srct.key;
src->value = srct.value;
}
return EAGAIN;
}
/*
* cuckoo_grow -- (internal) rehashes the table with GROWTH_FACTOR * size
*/
static int
cuckoo_grow(struct cuckoo *c)
{
size_t oldsize = c->size;
struct cuckoo_slot *oldtab = c->tab;
int n;
for (n = 0; n < MAX_GROWS; ++n) {
size_t nsize = (size_t)((float)c->size * GROWTH_FACTOR);
size_t tab_rawsize = nsize * sizeof(struct cuckoo_slot);
c->tab = Zalloc(tab_rawsize);
if (c->tab == NULL) {
c->tab = oldtab;
return ENOMEM;
}
c->size = nsize;
unsigned i;
for (i = 0; i < oldsize; ++i) {
struct cuckoo_slot s = oldtab[i];
if (s.value != NULL && (cuckoo_insert_try(c, &s) != 0))
break;
}
if (i == oldsize)
break;
else
Free(c->tab);
}
if (n == MAX_GROWS) {
c->tab = oldtab;
c->size = oldsize;
return EINVAL;
}
Free(oldtab);
return 0;
}
/*
* cuckoo_insert -- inserts key-value pair into the hash table
*/
int
cuckoo_insert(struct cuckoo *c, uint64_t key, void *value)
{
ASSERTne(c, NULL);
int err;
struct cuckoo_slot src = {key, value};
for (int n = 0; n < MAX_GROWS; ++n) {
if ((err = cuckoo_insert_try(c, &src)) != EAGAIN)
return err;
if ((err = cuckoo_grow(c)) != 0)
return err;
}
return EINVAL;
}
/*
* cuckoo_find_slot -- (internal) finds the hash table slot of key
*/
static struct cuckoo_slot *
cuckoo_find_slot(struct cuckoo *c, uint64_t key)
{
for (int i = 0; i < MAX_HASH_FUNCS; ++i) {
size_t h = hash_funcs[i](c, key);
if (c->tab[h].key == key)
return &c->tab[h];
}
return NULL;
}
/*
* cuckoo_remove -- removes key-value pair from the hash table
*/
void *
cuckoo_remove(struct cuckoo *c, uint64_t key)
{
ASSERTne(c, NULL);
void *ret = NULL;
struct cuckoo_slot *s = cuckoo_find_slot(c, key);
if (s) {
ret = s->value;
*s = null_slot;
}
return ret;
}
/*
* cuckoo_get -- returns the value of a key
*/
void *
cuckoo_get(struct cuckoo *c, uint64_t key)
{
ASSERTne(c, NULL);
struct cuckoo_slot *s = cuckoo_find_slot(c, key);
return s ? s->value : NULL;
}
/*
* cuckoo_get_size -- returns the size of the underlying table, useful for
* calculating load factor and predicting possible rehashes
*/
size_t
cuckoo_get_size(struct cuckoo *c)
{
ASSERTne(c, NULL);
return c->size;
}
| 6,169 | 21.114695 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ravl.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ravl.h -- internal definitions for ravl tree
*/
#ifndef LIBPMEMOBJ_RAVL_H
#define LIBPMEMOBJ_RAVL_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ravl;
struct ravl_node;
enum ravl_predicate {
RAVL_PREDICATE_EQUAL = 1 << 0,
RAVL_PREDICATE_GREATER = 1 << 1,
RAVL_PREDICATE_LESS = 1 << 2,
RAVL_PREDICATE_LESS_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS,
RAVL_PREDICATE_GREATER_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER,
};
typedef int ravl_compare(const void *lhs, const void *rhs);
typedef void ravl_cb(void *data, void *arg);
typedef void ravl_constr(void *data, size_t data_size, const void *arg);
struct ravl *ravl_new(ravl_compare *compare);
struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size);
void ravl_delete(struct ravl *ravl);
void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg);
int ravl_empty(struct ravl *ravl);
void ravl_clear(struct ravl *ravl);
int ravl_insert(struct ravl *ravl, const void *data);
int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg);
int ravl_emplace_copy(struct ravl *ravl, const void *data);
struct ravl_node *ravl_find(struct ravl *ravl, const void *data,
enum ravl_predicate predicate_flags);
void *ravl_data(struct ravl_node *node);
void ravl_remove(struct ravl *ravl, struct ravl_node *node);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_RAVL_H */
| 3,005 | 35.216867 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/lane.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* lane.c -- lane implementation
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <inttypes.h>
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include "libpmemobj.h"
#include "cuckoo.h"
#include "lane.h"
#include "out.h"
#include "util.h"
#include "obj.h"
#include "os_thread.h"
#include "valgrind_internal.h"
#include "memops.h"
#include "palloc.h"
#include "tx.h"
static os_tls_key_t Lane_info_key;
static __thread struct cuckoo *Lane_info_ht;
static __thread struct lane_info *Lane_info_records;
static __thread struct lane_info *Lane_info_cache;
/*
* lane_info_create -- (internal) constructor for thread shared data
*/
static inline void
lane_info_create(void)
{
Lane_info_ht = cuckoo_new();
if (Lane_info_ht == NULL)
FATAL("cuckoo_new");
}
/*
* lane_info_delete -- (internal) deletes lane info hash table
*/
static inline void
lane_info_delete(void)
{
if (unlikely(Lane_info_ht == NULL))
return;
cuckoo_delete(Lane_info_ht);
struct lane_info *record;
struct lane_info *head = Lane_info_records;
while (head != NULL) {
record = head;
head = head->next;
Free(record);
}
Lane_info_ht = NULL;
Lane_info_records = NULL;
Lane_info_cache = NULL;
}
/*
* lane_info_ht_boot -- (internal) boot lane info and add it to thread shared
* data
*/
static inline void
lane_info_ht_boot(void)
{
lane_info_create();
int result = os_tls_set(Lane_info_key, Lane_info_ht);
if (result != 0) {
errno = result;
FATAL("!os_tls_set");
}
}
/*
* lane_info_ht_destroy -- (internal) destructor for thread shared data
*/
static inline void
lane_info_ht_destroy(void *ht)
{
lane_info_delete();
}
/*
* lane_info_boot -- initialize lane info hash table and lane info key
*/
void
lane_info_boot(void)
{
int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy);
if (result != 0) {
errno = result;
FATAL("!os_tls_key_create");
}
}
/*
* lane_info_destroy -- destroy lane info hash table
*/
void
lane_info_destroy(void)
{
lane_info_delete();
(void) os_tls_key_delete(Lane_info_key);
}
/*
* lane_info_cleanup -- remove lane info record regarding pool being deleted
*/
static inline void
lane_info_cleanup(PMEMobjpool *pop)
{
if (unlikely(Lane_info_ht == NULL))
return;
struct lane_info *info = cuckoo_remove(Lane_info_ht, pop->uuid_lo);
if (likely(info != NULL)) {
if (info->prev)
info->prev->next = info->next;
if (info->next)
info->next->prev = info->prev;
if (Lane_info_cache == info)
Lane_info_cache = NULL;
if (Lane_info_records == info)
Lane_info_records = info->next;
Free(info);
}
}
/*
* lane_get_layout -- (internal) calculates the real pointer of the lane layout
*/
static struct lane_layout *
lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx)
{
return (void *)((char *)pop + pop->lanes_offset +
sizeof(struct lane_layout) * lane_idx);
}
/*
* lane_ulog_constructor -- (internal) constructor of a ulog extension
*/
static int
lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = base;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity, 1, p_ops);
return 0;
}
/*
* lane_undo_extend -- allocates a new undo log
*/
static int
lane_undo_extend(void *base, uint64_t *redo)
{
PMEMobjpool *pop = base;
struct tx_parameters *params = pop->tx_params;
size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, NULL,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_redo_extend -- allocates a new redo log
*/
static int
lane_redo_extend(void *base, uint64_t *redo)
{
size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, NULL,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_init -- (internal) initializes a single lane runtime variables
*/
static int
lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout)
{
ASSERTne(lane, NULL);
lane->layout = layout;
lane->internal = operation_new((struct ulog *)&layout->internal,
LANE_REDO_INTERNAL_SIZE,
NULL, NULL, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->internal == NULL)
goto error_internal_new;
lane->external = operation_new((struct ulog *)&layout->external,
LANE_REDO_EXTERNAL_SIZE,
lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->external == NULL)
goto error_external_new;
lane->undo = operation_new((struct ulog *)&layout->undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (lane->undo == NULL)
goto error_undo_new;
return 0;
error_undo_new:
operation_delete(lane->external);
error_external_new:
operation_delete(lane->internal);
error_internal_new:
return -1;
}
/*
* lane_destroy -- cleanups a single lane runtime variables
*/
static void
lane_destroy(PMEMobjpool *pop, struct lane *lane)
{
operation_delete(lane->undo);
operation_delete(lane->internal);
operation_delete(lane->external);
}
/*
* lane_boot -- initializes all lanes
*/
int
lane_boot(PMEMobjpool *pop)
{
int err = 0;
pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes);
if (pop->lanes_desc.lane == NULL) {
err = ENOMEM;
ERR("!Malloc of volatile lanes");
goto error_lanes_malloc;
}
pop->lanes_desc.next_lane_idx = 0;
pop->lanes_desc.lane_locks =
Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes);
if (pop->lanes_desc.lane_locks == NULL) {
ERR("!Malloc for lane locks");
goto error_locks_malloc;
}
/* add lanes to pmemcheck ignored list */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
(sizeof(struct lane_layout) * pop->nlanes));
uint64_t i;
for (i = 0; i < pop->nlanes; ++i) {
struct lane_layout *layout = lane_get_layout(pop, i);
if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) {
ERR("!lane_init");
goto error_lane_init;
}
}
return 0;
error_lane_init:
for (; i >= 1; --i)
lane_destroy(pop, &pop->lanes_desc.lane[i - 1]);
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
error_locks_malloc:
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
error_lanes_malloc:
return err;
}
/*
* lane_init_data -- initalizes ulogs for all the lanes
*/
void
lane_init_data(PMEMobjpool *pop)
{
struct lane_layout *layout;
for (uint64_t i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal),
LANE_REDO_INTERNAL_SIZE, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external),
LANE_REDO_EXTERNAL_SIZE, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo),
LANE_UNDO_SIZE, 0, &pop->p_ops);
}
layout = lane_get_layout(pop, 0);
pmemops_xpersist(&pop->p_ops, layout,
pop->nlanes * sizeof(struct lane_layout),
PMEMOBJ_F_RELAXED);
}
/*
* lane_cleanup -- destroys all lanes
*/
void
lane_cleanup(PMEMobjpool *pop)
{
for (uint64_t i = 0; i < pop->nlanes; ++i)
lane_destroy(pop, &pop->lanes_desc.lane[i]);
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
lane_info_cleanup(pop);
}
/*
* lane_recover_and_section_boot -- performs initialization and recovery of all
* lanes
*/
int
lane_recover_and_section_boot(PMEMobjpool *pop)
{
COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) +
SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) +
SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE);
int err = 0;
uint64_t i; /* lane index */
struct lane_layout *layout;
/*
* First we need to recover the internal/external redo logs so that the
* allocator state is consistent before we boot it.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_recover((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
ulog_recover((struct ulog *)&layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
}
if ((err = pmalloc_boot(pop)) != 0)
return err;
/*
* Undo logs must be processed after the heap is initialized since
* a undo recovery might require deallocation of the next ulogs.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
struct ulog *undo = (struct ulog *)&layout->undo;
struct operation_context *ctx = operation_new(
undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (ctx == NULL) {
LOG(2, "undo recovery failed %" PRIu64 " %d",
i, err);
return err;
}
operation_resume(ctx);
operation_process(ctx);
operation_finish(ctx);
operation_delete(ctx);
}
return 0;
}
/*
* lane_section_cleanup -- performs runtime cleanup of all lanes
*/
int
lane_section_cleanup(PMEMobjpool *pop)
{
return pmalloc_cleanup(pop);
}
/*
* lane_check -- performs check of all lanes
*/
int
lane_check(PMEMobjpool *pop)
{
int err = 0;
uint64_t j; /* lane index */
struct lane_layout *layout;
for (j = 0; j < pop->nlanes; ++j) {
layout = lane_get_layout(pop, j);
if (ulog_check((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) {
LOG(2, "lane %" PRIu64 " internal redo failed: %d",
j, err);
return err;
}
}
return 0;
}
/*
* get_lane -- (internal) get free lane index
*/
static inline void
get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks)
{
info->lane_idx = info->primary;
while (1) {
do {
info->lane_idx %= nlocks;
if (likely(util_bool_compare_and_swap64(
&locks[info->lane_idx], 0, 1))) {
if (info->lane_idx == info->primary) {
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
} else if (info->primary_attempts == 0) {
info->primary = info->lane_idx;
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
}
return;
}
if (info->lane_idx == info->primary &&
info->primary_attempts > 0) {
info->primary_attempts--;
}
++info->lane_idx;
} while (info->lane_idx < nlocks);
sched_yield();
}
}
/*
* get_lane_info_record -- (internal) get lane record attached to memory pool
* or first free
*/
static inline struct lane_info *
get_lane_info_record(PMEMobjpool *pop)
{
if (likely(Lane_info_cache != NULL &&
Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) {
return Lane_info_cache;
}
if (unlikely(Lane_info_ht == NULL)) {
lane_info_ht_boot();
}
struct lane_info *info = cuckoo_get(Lane_info_ht, pop->uuid_lo);
if (unlikely(info == NULL)) {
info = Malloc(sizeof(struct lane_info));
if (unlikely(info == NULL)) {
FATAL("Malloc");
}
info->pop_uuid_lo = pop->uuid_lo;
info->lane_idx = UINT64_MAX;
info->nest_count = 0;
info->next = Lane_info_records;
info->prev = NULL;
info->primary = 0;
info->primary_attempts = LANE_PRIMARY_ATTEMPTS;
if (Lane_info_records) {
Lane_info_records->prev = info;
}
Lane_info_records = info;
if (unlikely(cuckoo_insert(
Lane_info_ht, pop->uuid_lo, info) != 0)) {
FATAL("cuckoo_insert");
}
}
Lane_info_cache = info;
return info;
}
/*
* lane_hold -- grabs a per-thread lane in a round-robin fashion
*/
unsigned
lane_hold(PMEMobjpool *pop, struct lane **lanep)
{
/*
* Before runtime lane initialization all remote operations are
* executed using RLANE_DEFAULT.
*/
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
if (lanep != NULL)
FATAL("cannot obtain section before lane's init");
return RLANE_DEFAULT;
}
struct lane_info *lane = get_lane_info_record(pop);
while (unlikely(lane->lane_idx == UINT64_MAX)) {
/* initial wrap to next CL */
lane->primary = lane->lane_idx = util_fetch_and_add32(
&pop->lanes_desc.next_lane_idx, LANE_JUMP);
} /* handles wraparound */
uint64_t *llocks = pop->lanes_desc.lane_locks;
/* grab next free lane from lanes available at runtime */
if (!lane->nest_count++) {
get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes);
}
struct lane *l = &pop->lanes_desc.lane[lane->lane_idx];
/* reinitialize lane's content only if in outermost hold */
if (lanep && lane->nest_count == 1) {
VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l));
VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout));
operation_init(l->external);
operation_init(l->internal);
operation_init(l->undo);
}
if (lanep)
*lanep = l;
return (unsigned)lane->lane_idx;
}
/*
* lane_attach -- attaches the lane with the given index to the current thread
*/
void
lane_attach(PMEMobjpool *pop, unsigned lane)
{
struct lane_info *info = get_lane_info_record(pop);
info->nest_count = 1;
info->lane_idx = lane;
}
/*
* lane_detach -- detaches the currently held lane from the current thread
*/
unsigned
lane_detach(PMEMobjpool *pop)
{
struct lane_info *lane = get_lane_info_record(pop);
lane->nest_count -= 1;
ASSERTeq(lane->nest_count, 0);
return (unsigned)lane->lane_idx;
}
/*
* lane_release -- drops the per-thread lane
*/
void
lane_release(PMEMobjpool *pop)
{
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
return;
}
struct lane_info *lane = get_lane_info_record(pop);
ASSERTne(lane, NULL);
ASSERTne(lane->lane_idx, UINT64_MAX);
if (unlikely(lane->nest_count == 0)) {
FATAL("lane_release");
} else if (--(lane->nest_count) == 0) {
if (unlikely(!util_bool_compare_and_swap64(
&pop->lanes_desc.lane_locks[lane->lane_idx],
1, 0))) {
FATAL("util_bool_compare_and_swap64");
}
}
}
| 15,147 | 22.78022 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/common_badblock.sh
|
#!/usr/bin/env bash
#
# Copyright 2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# src/test/common_badblock.sh -- commons for the following tests:
# - util_badblock
# - pmempool_create
# - pmempool_info
#
LOG=out${UNITTEST_NUM}.log
COMMAND_NDCTL_NFIT_TEST_INIT="\
sudo ndctl disable-region all &>>$PREP_LOG_FILE && \
sudo modprobe -r nfit_test &>>$PREP_LOG_FILE && \
sudo modprobe nfit_test &>>$PREP_LOG_FILE && \
sudo ndctl disable-region all &>>$PREP_LOG_FILE && \
sudo ndctl zero-labels all &>>$PREP_LOG_FILE && \
sudo ndctl enable-region all &>>$PREP_LOG_FILE"
COMMAND_NDCTL_NFIT_TEST_FINI="\
sudo ndctl disable-region all &>>$PREP_LOG_FILE && \
sudo modprobe -r nfit_test &>>$PREP_LOG_FILE"
#
# ndctl_nfit_test_init -- reset all regions and reload the nfit_test module
#
function ndctl_nfit_test_init() {
expect_normal_exit $COMMAND_NDCTL_NFIT_TEST_INIT
}
#
# ndctl_nfit_test_init_node -- reset all regions and reload the nfit_test module on a remote node
#
function ndctl_nfit_test_init_node() {
expect_normal_exit run_on_node $1 "$COMMAND_NDCTL_NFIT_TEST_INIT"
}
#
# ndctl_nfit_test_fini -- disable all regions, remove the nfit_test module
# and (optionally) umount the pmem block device
#
# Input argument:
# 1) pmem mount directory to be umounted
#
function ndctl_nfit_test_fini() {
MOUNT_DIR=$1
[ $MOUNT_DIR ] && sudo umount $MOUNT_DIR &>> $PREP_LOG_FILE
expect_normal_exit $COMMAND_NDCTL_NFIT_TEST_FINI
}
#
# ndctl_nfit_test_fini_node -- disable all regions, remove the nfit_test module
# and (optionally) umount the pmem block device on a remote node
#
# Input arguments:
# 1) node number
# 2) pmem mount directory to be umounted
#
function ndctl_nfit_test_fini_node() {
MOUNT_DIR=$2
[ $MOUNT_DIR ] && expect_normal_exit run_on_node $1 "sudo umount $MOUNT_DIR &>> $PREP_LOG_FILE"
expect_normal_exit run_on_node $1 "$COMMAND_NDCTL_NFIT_TEST_FINI"
}
#
# ndctl_nfit_test_mount_pmem -- mount a pmem block device
#
# Input arguments:
# 1) path of a pmem block device
# 2) mount directory
#
function ndctl_nfit_test_mount_pmem() {
FULLDEV=$1
MOUNT_DIR=$2
expect_normal_exit "\
sudo mkfs.ext4 $FULLDEV &>>$PREP_LOG_FILE && \
sudo mkdir -p $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo mount $FULLDEV $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo chmod 0777 $MOUNT_DIR"
}
#
# ndctl_nfit_test_mount_pmem_node -- mount a pmem block device on a remote node
#
# Input arguments:
# 1) number of a node
# 2) path of a pmem block device
# 3) mount directory
#
function ndctl_nfit_test_mount_pmem_node() {
FULLDEV=$2
MOUNT_DIR=$3
expect_normal_exit run_on_node $1 "\
sudo mkfs.ext4 $FULLDEV &>>$PREP_LOG_FILE && \
sudo mkdir -p $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo mount $FULLDEV $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo chmod 0777 $MOUNT_DIR"
}
#
# ndctl_nfit_test_get_device -- create a namespace and get name of the pmem device
# of the nfit_test module
#
# Input argument:
# 1) mode of the namespace (devdax or fsdax)
#
function ndctl_nfit_test_get_device() {
MODE=$1
DEVTYPE=""
[ "$MODE" == "devdax" ] && DEVTYPE="chardev"
[ "$MODE" == "fsdax" ] && DEVTYPE="blockdev"
[ "$DEVTYPE" == "" ] && echo "ERROR: wrong namespace mode: $MODE" >&2 && exit 1
BUS="nfit_test.0"
REGION=$(ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
DEVICE=$(sudo ndctl create-namespace -b $BUS -r $REGION -f -m $MODE -a 4096 | sed "/$DEVTYPE/!d;s/[\", ]//g;s/$DEVTYPE://g")
echo $DEVICE
}
#
# ndctl_nfit_test_get_device_node -- create a namespace and get name of the pmem device
# of the nfit_test module on a remote node
#
# Input argument:
# 1) mode of the namespace (devdax or fsdax)
#
function ndctl_nfit_test_get_device_node() {
MODE=$2
DEVTYPE=""
[ "$MODE" == "devdax" ] && DEVTYPE="chardev"
[ "$MODE" == "fsdax" ] && DEVTYPE="blockdev"
[ "$DEVTYPE" == "" ] && echo "ERROR: wrong namespace mode: $MODE" >&2 && exit 1
BUS="nfit_test.0"
REGION=$(expect_normal_exit run_on_node $1 ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
DEVICE=$(expect_normal_exit run_on_node $1 sudo ndctl create-namespace -b $BUS -r $REGION -f -m $MODE -a 4096 | sed "/$DEVTYPE/!d;s/[\", ]//g;s/$DEVTYPE://g")
echo $DEVICE
}
#
# ndctl_nfit_test_get_dax_device -- create a namespace and get name of the dax device
# of the nfit_test module
#
function ndctl_nfit_test_get_dax_device() {
# XXX needed by libndctl (it should be removed when it is not needed)
sudo chmod o+rw /dev/ndctl*
DEVICE=$(ndctl_nfit_test_get_device devdax)
sudo chmod o+rw /dev/$DEVICE
echo $DEVICE
}
#
# ndctl_nfit_test_get_block_device -- create a namespace and get name of the pmem block device
# of the nfit_test module
#
function ndctl_nfit_test_get_block_device() {
DEVICE=$(ndctl_nfit_test_get_device fsdax)
echo $DEVICE
}
#
# ndctl_nfit_test_get_block_device_node -- create a namespace and get name of the pmem block device
# of the nfit_test module on a remote node
#
function ndctl_nfit_test_get_block_device_node() {
DEVICE=$(ndctl_nfit_test_get_device_node $1 fsdax)
echo $DEVICE
}
#
# ndctl_nfit_test_grant_access -- grant accesses required by libndctl
#
# XXX needed by libndctl (it should be removed when these extra access rights are not needed)
#
# Input argument:
# 1) a name of pmem device
#
function ndctl_nfit_test_grant_access() {
DEVICE=$1
BUS="nfit_test.0"
REGION=$(ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
expect_normal_exit "\
sudo chmod o+rw /dev/nmem* && \
sudo chmod o+r /sys/devices/platform/$BUS/ndbus?/$REGION/*/resource && \
sudo chmod o+r /sys/devices/platform/$BUS/ndbus?/$REGION/resource"
}
#
# ndctl_nfit_test_grant_access_node -- grant accesses required by libndctl on a node
#
# XXX needed by libndctl (it should be removed when these extra access rights are not needed)
#
# Input arguments:
# 1) node number
# 2) name of pmem device
#
function ndctl_nfit_test_grant_access_node() {
DEVICE=$1
BUS="nfit_test.0"
REGION=$(expect_normal_exit run_on_node $1 ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
expect_normal_exit run_on_node $1 "\
sudo chmod o+rw /dev/nmem* && \
sudo chmod o+r /sys/devices/platform/$BUS/ndbus?/$REGION/*/resource && \
sudo chmod o+r /sys/devices/platform/$BUS/ndbus?/$REGION/resource"
}
#
# ndctl_nfit_test_get_namespace_of_device -- get namespace of the pmem device
#
# Input argument:
# 1) a name of pmem device
#
function ndctl_nfit_test_get_namespace_of_device() {
DEVICE=$1
NAMESPACE=$(ndctl list | grep -e "$DEVICE" -e namespace | grep -B1 -e "$DEVICE" | head -n1 | cut -d'"' -f4)
# XXX needed by libndctl (it should be removed when it is not needed)
ndctl_nfit_test_grant_access $DEVICE
echo $NAMESPACE
}
#
# ndctl_nfit_test_get_namespace_of_device_node -- get namespace of the pmem device on a remote node
#
# Input arguments:
# 1) node number
# 2) name of pmem device
#
function ndctl_nfit_test_get_namespace_of_device_node() {
DEVICE=$2
NAMESPACE=$(expect_normal_exit run_on_node $1 ndctl list | grep -e "$DEVICE" -e namespace | grep -B1 -e "$DEVICE" | head -n1 | cut -d'"' -f4)
# XXX needed by libndctl (it should be removed when it is not needed)
ndctl_nfit_test_grant_access_node $1 $DEVICE
echo $NAMESPACE
}
#
# ndctl_inject_error -- inject error (bad blocks) to the namespace
#
# Input arguments:
# 1) namespace
# 2) the first bad block
# 3) number of bad blocks
#
function ndctl_inject_error() {
local NAMESPACE=$1
local BLOCK=$2
local COUNT=$3
echo "# sudo ndctl inject-error --block=$BLOCK --count=$COUNT $NAMESPACE" >> $PREP_LOG_FILE
sudo ndctl inject-error --block=$BLOCK --count=$COUNT $NAMESPACE &>> $PREP_LOG_FILE
echo "# sudo ndctl start-scrub" >> $PREP_LOG_FILE
sudo ndctl start-scrub &>> $PREP_LOG_FILE
echo "# sudo ndctl wait-scrub" >> $PREP_LOG_FILE
sudo ndctl wait-scrub &>> $PREP_LOG_FILE
echo "(done: ndctl wait-scrub)" >> $PREP_LOG_FILE
}
#
# print_bad_blocks -- print all bad blocks (count, offset and length)
# or "No bad blocks found" if there are no bad blocks
#
function print_bad_blocks {
# XXX sudo should be removed when it is not needed
sudo ndctl list -M | grep -e "badblock_count" -e "offset" -e "length" >> $LOG || echo "No bad blocks found" >> $LOG
}
#
# expect_bad_blocks -- verify if there are required bad blocks
# and fail if they are not there
#
function expect_bad_blocks {
# XXX sudo should be removed when it is not needed
sudo ndctl list -M | grep -e "badblock_count" -e "offset" -e "length" >> $LOG && true
if [ $? -ne 0 ]; then
# XXX sudo should be removed when it is not needed
sudo ndctl list -M &>> $PREP_LOG_FILE && true
msg "====================================================================="
msg "Error occurred, the preparation log ($PREP_LOG_FILE) is listed below:"
msg ""
cat $PREP_LOG_FILE
msg "====================================================================="
msg ""
fatal "Error: ndctl failed to inject or retain bad blocks"
fi
}
#
# expect_bad_blocks -- verify if there are required bad blocks
# and fail if they are not there
#
function expect_bad_blocks_node {
# XXX sudo should be removed when it is not needed
expect_normal_exit run_on_node $1 sudo ndctl list -M | grep -e "badblock_count" -e "offset" -e "length" >> $LOG || fatal "Error: ndctl failed to inject or retain bad blocks"
}
| 11,240 | 32.257396 | 174 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/libpmempool_rm_remote/config.sh
|
#!/usr/bin/env bash
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# libpmempool_rm_remote/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 1,771 | 41.190476 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_lane/obj_lane.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_lane.c -- unit test for lanes
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "tx.h"
#include "unittest.h"
#include "pmemcommon.h"
#define MAX_MOCK_LANES 5
#define MOCK_LAYOUT (void *)(0xAAA)
static void *base_ptr;
struct mock_pop {
PMEMobjpool p;
struct lane_layout l[MAX_MOCK_LANES];
};
/*
* mock_flush -- mock flush for lanes
*/
static int
mock_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_persist -- mock flush for lanes
*/
static int
mock_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_memset -- mock memset for lanes
*/
static void *
mock_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
return ptr;
}
/*
* mock_drain -- mock drain for lanes
*/
static void
mock_drain(void *ctx)
{
}
static void
test_lane_boot_cleanup_ok(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
lane_cleanup(&pop->p);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static ut_jmp_buf_t Jmp;
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
static void
test_lane_hold_release(void)
{
struct ulog *mock_ulog = ZALLOC(SIZEOF_ULOG(1024));
struct pmem_ops p_ops;
struct operation_context *ctx = operation_new(mock_ulog, 1024,
NULL, NULL, &p_ops, LOG_TYPE_REDO);
struct lane mock_lane = {
.layout = MOCK_LAYOUT,
.internal = ctx,
.external = ctx,
.undo = ctx,
};
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = 1;
pop->p.lanes_desc.runtime_nlanes = 1,
pop->p.lanes_desc.lane = &mock_lane;
pop->p.lanes_desc.next_lane_idx = 0;
pop->p.lanes_desc.lane_locks = CALLOC(OBJ_NLANES, sizeof(uint64_t));
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.uuid_lo = 123456;
base_ptr = &pop->p;
struct lane *lane;
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_release(&pop->p);
lane_release(&pop->p);
struct sigaction v, old;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGABRT, &v, &old);
if (!ut_sigsetjmp(Jmp)) {
lane_release(&pop->p); /* only two sections were held */
UT_ERR("we should not get here");
}
SIGACTION(SIGABRT, &old, NULL);
FREE(pop->p.lanes_desc.lane_locks);
FREE(pop);
operation_delete(ctx);
FREE(mock_ulog);
}
static void
test_lane_sizes(void)
{
UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) != LANE_TOTAL_SIZE);
}
enum thread_work_type {
LANE_INFO_DESTROY,
LANE_CLEANUP
};
struct thread_data {
enum thread_work_type work;
};
/*
* test_separate_thread -- child thread input point for multithreaded
* scenarios
*/
static void *
test_separate_thread(void *arg)
{
UT_ASSERTne(arg, NULL);
struct thread_data *data = arg;
switch (data->work) {
case LANE_INFO_DESTROY:
lane_info_destroy();
break;
case LANE_CLEANUP:
UT_ASSERTne(base_ptr, NULL);
lane_cleanup(base_ptr);
break;
default:
UT_FATAL("Unimplemented thread work type: %d", data->work);
}
return NULL;
}
/*
* test_lane_info_destroy_in_separate_thread -- lane info boot from one thread
* and lane info destroy from another
*/
static void
test_lane_info_destroy_in_separate_thread(void)
{
lane_info_boot();
struct thread_data data;
data.work = LANE_INFO_DESTROY;
os_thread_t thread;
os_thread_create(&thread, NULL, test_separate_thread, &data);
os_thread_join(&thread, NULL);
lane_info_destroy();
}
/*
* test_lane_cleanup_in_separate_thread -- lane boot from one thread and lane
* cleanup from another
*/
static void
test_lane_cleanup_in_separate_thread(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
struct thread_data data;
data.work = LANE_CLEANUP;
os_thread_t thread;
os_thread_create(&thread, NULL, test_separate_thread, &data);
os_thread_join(&thread, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static void
usage(const char *app)
{
UT_FATAL("usage: %s [scenario: s/m]", app);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_lane");
obj_init();
if (argc != 2)
usage(argv[0]);
switch (argv[1][0]) {
case 's':
/* single thread scenarios */
test_lane_boot_cleanup_ok();
test_lane_hold_release();
test_lane_sizes();
break;
case 'm':
/* multithreaded scenarios */
test_lane_info_destroy_in_separate_thread();
test_lane_cleanup_in_separate_thread();
break;
default:
usage(argv[0]);
}
obj_fini();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 7,562 | 21.114035 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_common.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_common.h -- common declarations for rpmem_obc test
*/
#include "unittest.h"
#include "out.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define BUFF_SIZE 8192
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT;
struct server {
int fd_in;
int fd_out;
};
void set_rpmem_cmd(const char *fmt, ...);
struct server *srv_init(void);
void srv_fini(struct server *s);
void srv_recv(struct server *s, void *buff, size_t len);
void srv_send(struct server *s, const void *buff, size_t len);
void srv_wait_disconnect(struct server *s);
void client_connect_wait(struct rpmem_obc *rpc, char *target);
/*
* Since the server may disconnect the connection at any moment
* from the client's perspective, execute the test in a loop so
* the moment when the connection is closed will be possibly different.
*/
#define ECONNRESET_LOOP 10
void server_econnreset(struct server *s, const void *msg, size_t len);
TEST_CASE_DECLARE(client_enotconn);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(client_monitor);
TEST_CASE_DECLARE(server_monitor);
TEST_CASE_DECLARE(server_wait);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_create);
TEST_CASE_DECLARE(server_create_econnreset);
TEST_CASE_DECLARE(server_create_eproto);
TEST_CASE_DECLARE(server_create_error);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(server_open_econnreset);
TEST_CASE_DECLARE(server_open_eproto);
TEST_CASE_DECLARE(server_open_error);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(server_close_econnreset);
TEST_CASE_DECLARE(server_close_eproto);
TEST_CASE_DECLARE(server_close_error);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server_set_attr);
TEST_CASE_DECLARE(server_set_attr_econnreset);
TEST_CASE_DECLARE(server_set_attr_eproto);
TEST_CASE_DECLARE(server_set_attr_error);
| 4,466 | 31.369565 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_close.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_close.c -- test cases for rpmem_obj_close function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* check_close_msg -- check close message
*/
static void
check_close_msg(struct rpmem_msg_close *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_close);
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CLOSE);
UT_ASSERTeq(msg->hdr.size, msg_size);
}
/*
* server_close_handle -- handle a close request message
*/
static void
server_close_handle(struct server *s, const struct rpmem_msg_close_resp *resp)
{
struct rpmem_msg_close msg;
srv_recv(s, &msg, sizeof(msg));
rpmem_ntoh_msg_close(&msg);
check_close_msg(&msg);
srv_send(s, resp, sizeof(*resp));
}
/*
* client_close_errno -- perform close request operation and expect
* specified errno
*/
static void
client_close_errno(char *target, int ex_errno)
{
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_close_eproto function.
*/
#define CLOSE_EPROTO_COUNT 5
/*
* server_close_eproto -- send invalid create request responses to a client
*/
int
server_close_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CLOSE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* client_close_error -- check if valid errno is set if error status returned
*/
static void
client_close_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_close_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_close -- test case for close request operation - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_close_econnreset %d", i % 2);
client_close_errno(target, ECONNRESET);
}
for (int i = 0; i < CLOSE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_close_eproto %d", i);
client_close_errno(target, EPROTO);
}
client_close_error(target);
set_rpmem_cmd("server_close");
client_close_errno(target, 0);
return 1;
}
/*
* server_close_error -- return error status in close response message
*/
int
server_close_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_close_econnreset -- test case for closing connection - server size
*/
int
server_close_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_close -- test case for close request operation - server side
*/
int
server_close(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 0;
}
| 6,240 | 21.777372 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_set_attr.c
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_set_attr.c -- test cases for rpmem_set_attr function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_set_attr_resp SET_ATTR_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR_RESP,
.size = sizeof(struct rpmem_msg_set_attr_resp),
.status = 0,
}
};
/*
* check_set_attr_msg -- check set attributes message
*/
static void
check_set_attr_msg(struct rpmem_msg_set_attr *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_SET_ATTR);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_open_handle -- handle an set attributes request message
*/
static void
server_set_attr_handle(struct server *s,
const struct rpmem_msg_set_attr_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_set_attr(msg);
check_set_attr_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_set_attr_eproto function.
*/
#define SET_ATTR_EPROTO_COUNT 5
/*
* server_set_attr_eproto -- send invalid set attributes request responses to
* a client
*/
int
server_set_attr_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, SET_ATTR_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_error -- return error status in set attributes response
* message
*/
int
server_set_attr_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
resp.hdr.status = e;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_econnreset -- test case for closing connection - server side
*/
int
server_set_attr_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_set_attr -- test case for rpmem_obc_set_attr - server side
* side
*/
int
server_set_attr(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_set_attr_init -- initialize communication - client side
*/
static struct rpmem_obc *
client_set_attr_init(char *target)
{
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
return rpc;
}
/*
* client_set_attr_fini -- finalize communication - client side
*/
static void
client_set_attr_fini(struct rpmem_obc *rpc)
{
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_set_attr_errno -- perform set attributes request operation and expect
* specified errno.
*/
static void
client_set_attr_errno(char *target, int ex_errno)
{
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
int ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
client_set_attr_fini(rpc);
}
/*
* client_set_attr_error -- check if valid errno is set if error status
* returned
*/
static void
client_set_attr_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_set_attr_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
client_set_attr_fini(rpc);
}
}
/*
* client_set_attr -- test case for set attributes request operation - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_set_attr_econnreset %d", i % 2);
client_set_attr_errno(target, ECONNRESET);
}
for (int i = 0; i < SET_ATTR_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_set_attr_eproto %d", i);
client_set_attr_errno(target, EPROTO);
}
client_set_attr_error(target);
set_rpmem_cmd("server_set_attr");
client_set_attr_errno(target, 0);
return 1;
}
| 7,199 | 22.684211 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_create.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_create.c -- test cases for rpmem_obc_create function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_create_resp CREATE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
};
/*
* check_create_msg -- check create message
*/
static void
check_create_msg(struct rpmem_msg_create *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_create_handle -- handle a create request message
*/
static void
server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_create) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_create *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_create(msg);
check_create_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_create_eproto function.
*/
#define CREATE_EPROTO_COUNT 8
/*
* server_create_eproto -- send invalid create request responses to a client
*/
int
server_create_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_error -- return an error status in create response message
*/
int
server_create_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_econnreset -- test case for closing connection - server side
*/
int
server_create_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_create -- test case for rpmem_obc_create function - server side
*/
int
server_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 0)
UT_FATAL("usage: %s", tc->name);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_create_errno -- perform create request operation and expect
* specified errno. If ex_errno is zero expect certain values in res struct.
*/
static void
client_create_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, CREATE_RESP.ibc.port);
UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
CREATE_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
CREATE_RESP.ibc.nlanes);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_create_error -- check if valid errno is set if error status returned
*/
static void
client_create_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_create_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_create -- test case for create request operation - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_create_econnreset %d", i % 2);
client_create_errno(target, ECONNRESET);
}
for (int i = 0; i < CREATE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_create_eproto %d", i);
client_create_errno(target, EPROTO);
}
client_create_error(target);
set_rpmem_cmd("server_create");
client_create_errno(target, 0);
return 1;
}
| 8,157 | 23.136095 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_common.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_common.c -- common definitions for rpmem_obc tests
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include "rpmem_obc_test_common.h"
#include "os.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = snprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
struct server *
srv_init(void)
{
struct server *s = MALLOC(sizeof(*s));
s->fd_in = STDIN_FILENO;
s->fd_out = STDOUT_FILENO;
uint32_t status = 0;
srv_send(s, &status, sizeof(status));
return s;
}
/*
* srv_stop -- close the server
*/
void
srv_fini(struct server *s)
{
FREE(s);
}
/*
* srv_recv -- read a message from the client
*/
void
srv_recv(struct server *s, void *buff, size_t len)
{
size_t rd = 0;
uint8_t *cbuf = buff;
while (rd < len) {
ssize_t ret = read(s->fd_in, &cbuf[rd], len - rd);
UT_ASSERT(ret > 0);
rd += (size_t)ret;
}
}
/*
* srv_send -- send a message to the client
*/
void
srv_send(struct server *s, const void *buff, size_t len)
{
size_t wr = 0;
const uint8_t *cbuf = buff;
while (wr < len) {
ssize_t ret = write(s->fd_out, &cbuf[wr], len - wr);
UT_ASSERT(ret > 0);
wr += (size_t)ret;
}
}
/*
* client_connect_wait -- wait until client connects to the server
*/
void
client_connect_wait(struct rpmem_obc *rpc, char *target)
{
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
while (rpmem_obc_connect(rpc, info))
;
rpmem_target_free(info);
}
/*
* server_econnreset -- disconnect from client during performing an
* operation
*/
void
server_econnreset(struct server *s, const void *msg, size_t len)
{
for (int i = 0; i < ECONNRESET_LOOP; i++) {
srv_send(s, msg, len);
}
}
| 4,138 | 23.204678 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test.c -- unit test for rpmem_obc module
*/
#include "rpmem_obc_test_common.h"
#include "pmemcommon.h"
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_enotconn),
TEST_CASE(client_connect),
TEST_CASE(client_create),
TEST_CASE(server_create),
TEST_CASE(server_create_econnreset),
TEST_CASE(server_create_eproto),
TEST_CASE(server_create_error),
TEST_CASE(client_open),
TEST_CASE(server_open),
TEST_CASE(server_open_econnreset),
TEST_CASE(server_open_eproto),
TEST_CASE(server_open_error),
TEST_CASE(client_close),
TEST_CASE(server_close),
TEST_CASE(server_close_econnreset),
TEST_CASE(server_close_eproto),
TEST_CASE(server_close_error),
TEST_CASE(client_monitor),
TEST_CASE(server_monitor),
TEST_CASE(client_set_attr),
TEST_CASE(server_set_attr),
TEST_CASE(server_set_attr_econnreset),
TEST_CASE(server_set_attr_eproto),
TEST_CASE(server_set_attr_error),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_obc",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
DONE(NULL);
}
| 2,903 | 29.893617 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_open.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_open.c -- test cases for rpmem_obj_open function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_open_resp OPEN_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
.pool_attr = POOL_ATTR_INIT,
};
/*
* check_open_msg -- check open message
*/
static void
check_open_msg(struct rpmem_msg_open *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
}
/*
* server_open_handle -- handle an open request message
*/
static void
server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_open) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_open *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_open(msg);
check_open_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_open_eproto function.
*/
#define OPEN_EPROTO_COUNT 8
/*
* server_open_eproto -- send invalid open request responses to a client
*/
int
server_open_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open_error -- return error status in open response message
*/
int
server_open_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
resp.hdr.status = e;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for rpmem_obc_create function - server side
*/
int
server_open_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for open request message - server side
*/
int
server_open(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_open_errno -- perform open request operation and expect
* specified errno, repeat the operation specified number of times.
* If ex_errno is zero expect certain values in res struct.
*/
static void
client_open_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, OPEN_RESP.ibc.port);
UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
OPEN_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
OPEN_RESP.ibc.nlanes);
UT_ASSERTeq(memcmp(pool_attr.signature,
OPEN_RESP.pool_attr.signature,
RPMEM_POOL_HDR_SIG_LEN), 0);
UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major);
UT_ASSERTeq(pool_attr.compat_features,
OPEN_RESP.pool_attr.compat_features);
UT_ASSERTeq(pool_attr.incompat_features,
OPEN_RESP.pool_attr.incompat_features);
UT_ASSERTeq(pool_attr.ro_compat_features,
OPEN_RESP.pool_attr.ro_compat_features);
UT_ASSERTeq(memcmp(pool_attr.poolset_uuid,
OPEN_RESP.pool_attr.poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.uuid,
OPEN_RESP.pool_attr.uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.next_uuid,
OPEN_RESP.pool_attr.next_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.prev_uuid,
OPEN_RESP.pool_attr.prev_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.user_flags,
OPEN_RESP.pool_attr.user_flags,
RPMEM_POOL_USER_FLAGS_LEN), 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_open_error -- check if valid errno is set if error status returned
*/
static void
client_open_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_open_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_open_econnreset %d", i % 2);
client_open_errno(target, ECONNRESET);
}
for (int i = 0; i < OPEN_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_open_eproto %d", i);
client_open_errno(target, EPROTO);
}
client_open_error(target);
set_rpmem_cmd("server_open");
client_open_errno(target, 0);
return 1;
}
| 8,942 | 23.70442 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_misc.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_misc.c -- miscellaneous test cases for rpmem_obc module
*/
#include <netdb.h>
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* client_enotconn -- check if ENOTCONN error is returned after
* calling rpmem_obc API without connecting to the server.
*/
int
client_enotconn(const struct test_case *tc, int argc, char *argv[])
{
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
rpmem_obc_fini(rpc);
return 0;
}
/*
* client_connect -- try to connect to the server at specified address and port
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]...", tc->name);
for (int i = 0; i < argc; i++) {
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(argv[i]);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
if (ret) {
UT_OUT("not connected: %s: %s", argv[i],
out_get_errormsg());
} else {
UT_OUT(" connected: %s", argv[i]);
rpmem_obc_disconnect(rpc);
}
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return argc;
}
/*
* server_monitor -- test case for rpmem_obc_create function - server side
*/
int
server_monitor(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close close;
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
srv_recv(s, &close, sizeof(close));
srv_send(s, &resp, sizeof(resp));
srv_fini(s);
return 0;
}
/*
* server_monitor -- test case for rpmem_obc_monitor function - server side
*/
int
client_monitor(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_monitor");
{
/*
* Connect to target node, check connection state before
* and after disconnecting.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTne(ret, 1);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
{
/*
* Connect to target node and expect that server will
* disconnect.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 0);
UT_ASSERTne(ret, 1);
rpmem_obc_disconnect(rpc);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return 1;
}
| 5,490 | 23.846154 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/setup.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# src/test/rpmem_obc/setup.sh -- common setup for rpmem_obc tests
#
set -e
require_nodes 2
require_node_log_files 1 $RPMEM_LOG_FILE
RPMEM_CMD="\"cd ${NODE_TEST_DIR[0]} && UNITTEST_FORCE_QUIET=1 \
LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:$REMOTE_LD_LIBRARY_PATH:${NODE_LD_LIBRARY_PATH[0]} \
./rpmem_obc$EXESUFFIX\""
export_vars_node 1 RPMEM_CMD
| 1,934 | 40.170213 | 87 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_out_of_memory/obj_out_of_memory.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_out_of_memory.c -- allocate objects until OOM
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "out_of_memory"
struct cargs {
size_t size;
};
static int
test_constructor(PMEMobjpool *pop, void *addr, void *args)
{
struct cargs *a = args;
pmemobj_memset_persist(pop, addr, rand() % 256, a->size / 2);
return 0;
}
static void
test_alloc(PMEMobjpool *pop, size_t size)
{
unsigned long cnt = 0;
while (1) {
struct cargs args = { size };
if (pmemobj_alloc(pop, NULL, size, 0,
test_constructor, &args) != 0)
break;
cnt++;
}
UT_OUT("size: %zu allocs: %lu", size, cnt);
}
static void
test_free(PMEMobjpool *pop)
{
PMEMoid oid;
PMEMoid next;
POBJ_FOREACH_SAFE(pop, oid, next)
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_out_of_memory");
if (argc < 3)
UT_FATAL("usage: %s size filename ...", argv[0]);
size_t size = ATOUL(argv[1]);
for (int i = 2; i < argc; i++) {
const char *path = argv[i];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_alloc(pop, size);
pmemobj_close(pop);
UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1);
/*
* To prevent subsequent opens from receiving exactly the same
* volatile memory addresses a dummy malloc has to be made.
* This can expose issues in which traces of previous volatile
* state are leftover in the persistent pool.
*/
void *heap_touch = MALLOC(1);
UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL);
test_free(pop);
pmemobj_close(pop);
FREE(heap_touch);
}
DONE(NULL);
}
| 3,270 | 25.379032 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmemd_db/rpmemd_db_test.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_db_test.c -- unit test for pool set database
*
* usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2>
*/
#include "file.h"
#include "unittest.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#define POOL_MODE 0644
#define FAILED_FUNC(func_name) \
UT_ERR("!%s(): %s() failed", __func__, func_name);
#define FAILED_FUNC_PARAM(func_name, param) \
UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param);
#define NPOOLS_DUAL 2
#define POOL_ATTR_CREATE 0
#define POOL_ATTR_OPEN 1
#define POOL_ATTR_SET_ATTR 2
#define POOL_STATE_INITIAL 0
#define POOL_STATE_CREATED 1
#define POOL_STATE_OPENED 2
#define POOL_STATE_CLOSED POOL_STATE_CREATED
#define POOL_STATE_REMOVED POOL_STATE_INITIAL
/*
* fill_rand -- fill a buffer with random values
*/
static void
fill_rand(void *addr, size_t len)
{
unsigned char *buff = addr;
srand(time(NULL));
for (unsigned i = 0; i < len; i++)
buff[i] = (rand() % ('z' - 'a')) + 'a';
}
/*
* test_init -- test rpmemd_db_init() and rpmemd_db_fini()
*/
static int
test_init(const char *root_dir)
{
struct rpmemd_db *db;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
rpmemd_db_fini(db);
return 0;
}
/*
* test_check_dir -- test rpmemd_db_check_dir()
*/
static int
test_check_dir(const char *root_dir)
{
struct rpmemd_db *db;
int ret;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
ret = rpmemd_db_check_dir(db);
if (ret) {
FAILED_FUNC("rpmemd_db_check_dir");
}
rpmemd_db_fini(db);
return ret;
}
/*
* test_create -- test rpmemd_db_pool_create()
*/
static int
test_create(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
memset(&attr, 0, sizeof(attr));
attr.incompat_features = 2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_create_dual -- dual test for rpmemd_db_pool_create()
*/
static int
test_create_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1;
memset(&attr1, 0, sizeof(attr1));
attr1.incompat_features = 2;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
/* test dual create */
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
rpmemd_db_pool_close(db, prp1);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_create_2:
rpmemd_db_pool_close(db, prp1);
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* compare_attr -- compare pool's attributes
*/
static void
compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2)
{
char *msg;
if (a1->major != a2->major) {
msg = "major";
goto err_mismatch;
}
if (a1->compat_features != a2->compat_features) {
msg = "compat_features";
goto err_mismatch;
}
if (a1->incompat_features != a2->incompat_features) {
msg = "incompat_features";
goto err_mismatch;
}
if (a1->ro_compat_features != a2->ro_compat_features) {
msg = "ro_compat_features";
goto err_mismatch;
}
if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) {
msg = "signature";
goto err_mismatch;
}
if (memcmp(a1->poolset_uuid, a2->poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN)) {
msg = "poolset_uuid";
goto err_mismatch;
}
if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "uuid";
goto err_mismatch;
}
if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "next_uuid";
goto err_mismatch;
}
if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "prev_uuid";
goto err_mismatch;
}
return;
err_mismatch:
errno = EINVAL;
UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg);
}
/*
* test_open -- test rpmemd_db_pool_open()
*/
static int
test_open(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr1, attr2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1, sizeof(attr1));
attr1.major = 1;
attr1.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto fini;
}
rpmemd_db_pool_close(db, prp);
compare_attr(&attr1, &attr2);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_open_dual -- dual test for rpmemd_db_pool_open()
*/
static int
test_open_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1a, sizeof(attr1a));
fill_rand(&attr1b, sizeof(attr1b));
attr1a.major = 1;
attr1a.incompat_features = 2;
attr1b.major = 1;
attr1b.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
rpmemd_db_pool_close(db, prp1);
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
/* test dual open */
prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1);
goto err_open_1;
}
prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2);
goto err_open_2;
}
rpmemd_db_pool_close(db, prp1);
rpmemd_db_pool_close(db, prp2);
compare_attr(&attr1a, &attr2a);
compare_attr(&attr1b, &attr2b);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_open_2:
rpmemd_db_pool_close(db, prp1);
err_open_1:
rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
err_create_2:
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr -- test rpmemd_db_pool_set_attr()
*/
static int
test_set_attr(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr[3];
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE]));
fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR]));
attr[POOL_ATTR_CREATE].major = 1;
attr[POOL_ATTR_CREATE].incompat_features = 2;
attr[POOL_ATTR_SET_ATTR].major = 1;
attr[POOL_ATTR_SET_ATTR].incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto err_create;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]);
ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_set_attr");
goto err_set_attr;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
goto fini;
err_set_attr:
rpmemd_db_pool_close(db, prp);
err_open:
rpmemd_db_pool_remove(db, pool_desc, 0, 0);
err_create:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr()
*/
static int
test_set_attr_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr[NPOOLS_DUAL][3];
struct rpmemd_db_pool *prp[NPOOLS_DUAL];
const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2};
unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL};
struct rpmemd_db *db;
int ret = -1;
/* initialize rpmem database */
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
/*
* generate random pool attributes for create and set
* attributes operations
*/
fill_rand(&attr[p][POOL_ATTR_CREATE],
sizeof(attr[p][POOL_ATTR_CREATE]));
fill_rand(&attr[p][POOL_ATTR_SET_ATTR],
sizeof(attr[p][POOL_ATTR_SET_ATTR]));
attr[p][POOL_ATTR_CREATE].major = 1;
attr[p][POOL_ATTR_CREATE].incompat_features = 2;
attr[p][POOL_ATTR_SET_ATTR].major = 1;
attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2;
/* create pool */
prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_CREATE]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CREATED;
}
/* open pools and check pool attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_CREATE],
&attr[p][POOL_ATTR_OPEN]);
}
/* set attributes and close pools */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
ret = rpmemd_db_pool_set_attr(prp[p],
&attr[p][POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
/* open pools and check attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_SET_ATTR],
&attr[p][POOL_ATTR_OPEN]);
}
err:
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
if (pool_state[p] == POOL_STATE_OPENED) {
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
if (pool_state[p] == POOL_STATE_CREATED) {
ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove",
pool_desc[p]);
}
pool_state[p] = POOL_STATE_REMOVED;
}
}
rpmemd_db_fini(db);
return ret;
}
static int
exists_cb(struct part_file *pf, void *arg)
{
return util_file_exists(pf->part->path);
}
static int
noexists_cb(struct part_file *pf, void *arg)
{
int exists = util_file_exists(pf->part->path);
if (exists < 0)
return -1;
else
return !exists;
}
/*
* test_remove -- test for rpmemd_db_pool_remove()
*/
static void
test_remove(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret;
char path[PATH_MAX];
snprintf(path, PATH_MAX, "%s/%s", root_dir, pool_desc);
fill_rand(&attr, sizeof(attr));
strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid));
attr.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
UT_ASSERTne(db, NULL);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = util_poolset_foreach_part(path, exists_cb, NULL);
UT_ASSERTeq(ret, 1);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_poolset_foreach_part(path, noexists_cb, NULL);
UT_ASSERTeq(ret, 1);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1);
UT_ASSERTeq(ret, 0);
ret = util_file_exists(path);
UT_ASSERTne(ret, 1);
rpmemd_db_fini(db);
}
int
main(int argc, char *argv[])
{
char *pool_desc[2], *log_file;
char root_dir[PATH_MAX];
START(argc, argv, "rpmemd_db");
util_init();
out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0);
if (argc != 5)
UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>"
" <pool_desc_2>", argv[0]);
log_file = argv[1];
if (realpath(argv[2], root_dir) == NULL)
UT_FATAL("!realpath(%s)", argv[1]);
pool_desc[0] = argv[3];
pool_desc[1] = argv[4];
if (rpmemd_log_init("rpmemd error: ", log_file, 0))
FAILED_FUNC("rpmemd_log_init");
test_init(root_dir);
test_check_dir(root_dir);
test_create(root_dir, pool_desc[0]);
test_create_dual(root_dir, pool_desc[0], pool_desc[1]);
test_open(root_dir, pool_desc[0]);
test_open_dual(root_dir, pool_desc[0], pool_desc[1]);
test_set_attr(root_dir, pool_desc[0]);
test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]);
test_remove(root_dir, pool_desc[0]);
rpmemd_log_close();
out_fini();
DONE(NULL);
}
| 16,549 | 23.701493 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/pmempool_sync_remote/config.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# pmempool_sync_remote/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 1,775 | 41.285714 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/pmempool_sync_remote/common.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# pmempool_sync_remote/common.sh -- pmempool sync with remote replication
#
set -e
require_nodes 2
require_node_libfabric 0 $RPMEM_PROVIDER
require_node_libfabric 1 $RPMEM_PROVIDER
setup
init_rpmem_on_node 1 0
require_node_log_files 1 pmemobj$UNITTEST_NUM.log
require_node_log_files 1 pmempool$UNITTEST_NUM.log
PMEMOBJCLI_SCRIPT="pmemobjcli.script"
copy_files_to_node 1 ${NODE_TEST_DIR[1]} $PMEMOBJCLI_SCRIPT
POOLSET_LOCAL="local_pool.set"
#
# configure_poolsets -- configure pool set files for test
# usage: configure_poolsets <local replicas> <remote replicas>
#
function configure_poolsets() {
local n_local=$1
local n_remote=$2
local poolset_args="8M:${NODE_DIR[1]}/pool.part.1:x
8M:${NODE_DIR[1]}/pool.part.2:x"
for i in $(seq 0 $((n_local - 1))); do
poolset_args="$poolset_args R 8M:${NODE_DIR[1]}/pool.$i.part.1:x
8M:${NODE_DIR[1]}/pool.$i.part.2:x"
done
for i in $(seq 0 $((n_remote - 1))); do
POOLSET_REMOTE[$i]="remote_pool.$i.set"
create_poolset $DIR/${POOLSET_REMOTE[$i]}\
8M:${NODE_DIR[0]}remote.$i.part.1:x\
8M:${NODE_DIR[0]}remote.$i.part.2:x
copy_files_to_node 0 ${NODE_DIR[0]} $DIR/${POOLSET_REMOTE[$i]}
poolset_args="$poolset_args m ${NODE_ADDR[0]}:${POOLSET_REMOTE[$i]}"
done
create_poolset $DIR/$POOLSET_LOCAL $poolset_args
copy_files_to_node 1 ${NODE_DIR[1]} $DIR/$POOLSET_LOCAL
expect_normal_exit run_on_node 1 ../pmempool rm -sf ${NODE_DIR[1]}$POOLSET_LOCAL
expect_normal_exit run_on_node 1 ../pmempool create obj ${NODE_DIR[1]}$POOLSET_LOCAL
expect_normal_exit run_on_node 1 ../pmemobjcli -s $PMEMOBJCLI_SCRIPT ${NODE_DIR[1]}$POOLSET_LOCAL > /dev/null
}
DUMP_INFO_LOG="../pmempool info -lHZCOoAa"
DUMP_INFO_LOG_REMOTE="$DUMP_INFO_LOG -f obj"
DUMP_INFO_SED="sed -e '/^Checksum/d' -e '/^Creation/d'"
DUMP_INFO_SED_REMOTE="$DUMP_INFO_SED -e '/^Previous part UUID/d' -e '/^Next part UUID/d'"
function dump_info_log() {
local node=$1
local rep=$2
local poolset=$3
local name=$4
local ignore=$5
local sed_cmd="$DUMP_INFO_SED"
if [ -n "$ignore" ]; then
sed_cmd="$sed_cmd -e '/^$ignore/d'"
fi
expect_normal_exit run_on_node $node "\"$DUMP_INFO_LOG -p $rep $poolset | $sed_cmd > $name\""
}
function dump_info_log_remote() {
local node=$1
local poolset=$2
local name=$3
local ignore=$4
local sed_cmd="$DUMP_INFO_SED_REMOTE"
if [ -n "$ignore" ]; then
sed_cmd="$sed_cmd -e '/^$ignore/d'"
fi
expect_normal_exit run_on_node $node "\"$DUMP_INFO_LOG_REMOTE $poolset | $sed_cmd > $name\""
}
function diff_log() {
local node=$1
local f1=$2
local f2=$3
expect_normal_exit run_on_node $node "\"[ -s $f1 ] && [ -s $f2 ] && diff $f1 $f2\""
}
exec_pmemobjcli_script() {
local node=$1
local script=$2
local poolset=$3
local out=$4
expect_normal_exit run_on_node $node "\"../pmemobjcli -s $script $poolset > $out \""
}
| 4,397 | 30.414286 | 110 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_test.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_test.c -- tests for rpmem_fip and rpmemd_fip modules
*/
#include <netdb.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip_common.h"
#include "rpmem_fip_oob.h"
#include "rpmemd_fip.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "rpmem_fip.h"
#include "os.h"
#define SIZE_PER_LANE 64
#define COUNT_PER_LANE 32
#define NLANES 1024
#define SOCK_NLANES 32
#define NTHREADS 32
#define TOTAL_PER_LANE (SIZE_PER_LANE * COUNT_PER_LANE)
#define POOL_SIZE (NLANES * TOTAL_PER_LANE)
static uint8_t lpool[POOL_SIZE];
static uint8_t rpool[POOL_SIZE];
TEST_CASE_DECLARE(client_init);
TEST_CASE_DECLARE(server_init);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(server_connect);
TEST_CASE_DECLARE(server_process);
TEST_CASE_DECLARE(client_persist);
TEST_CASE_DECLARE(client_persist_mt);
TEST_CASE_DECLARE(client_read);
/*
* get_persist_method -- parse persist method
*/
static enum rpmem_persist_method
get_persist_method(const char *pm)
{
if (strcmp(pm, "GPSPM") == 0)
return RPMEM_PM_GPSPM;
else if (strcmp(pm, "APM") == 0)
return RPMEM_PM_APM;
else
UT_FATAL("unknown method");
}
/*
* get_provider -- get provider for given target
*/
static enum rpmem_provider
get_provider(const char *target, const char *prov_name, unsigned *nlanes)
{
struct rpmem_fip_probe probe;
int ret;
int any = 0;
if (strcmp(prov_name, "any") == 0)
any = 1;
ret = rpmem_fip_probe_get(target, &probe);
UT_ASSERTeq(ret, 0);
UT_ASSERT(rpmem_fip_probe_any(probe));
enum rpmem_provider provider;
if (any) {
/* return verbs in first place */
if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_VERBS))
provider = RPMEM_PROV_LIBFABRIC_VERBS;
else if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_SOCKETS))
provider = RPMEM_PROV_LIBFABRIC_SOCKETS;
else
UT_ASSERT(0);
} else {
provider = rpmem_provider_from_str(prov_name);
UT_ASSERTne(provider, RPMEM_PROV_UNKNOWN);
UT_ASSERT(rpmem_fip_probe(probe, provider));
}
/*
* Decrease number of lanes for socket provider because
* the test may be too long.
*/
if (provider == RPMEM_PROV_LIBFABRIC_SOCKETS)
*nlanes = min(*nlanes, SOCK_NLANES);
return provider;
}
/*
* set_pool_data -- set pools data to well known values
*/
static void
set_pool_data(uint8_t *pool, int inverse)
{
for (unsigned l = 0; l < NLANES; l++) {
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = l * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = i + l;
if (inverse)
val = ~val;
memset(&pool[offset], (int)val, SIZE_PER_LANE);
}
}
}
/*
* persist_arg -- arguments for client persist thread
*/
struct persist_arg {
struct rpmem_fip *fip;
unsigned lane;
};
/*
* client_persist_thread -- thread callback for persist operation
*/
static void *
client_persist_thread(void *arg)
{
struct persist_arg *args = arg;
int ret;
/* persist with len == 0 should always succeed */
ret = rpmem_fip_persist(args->fip, args->lane * TOTAL_PER_LANE,
0, args->lane, RPMEM_PERSIST_WRITE);
UT_ASSERTeq(ret, 0);
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = args->lane + i;
memset(&lpool[offset], (int)val, SIZE_PER_LANE);
ret = rpmem_fip_persist(args->fip, offset,
SIZE_PER_LANE, args->lane, RPMEM_PERSIST_WRITE);
UT_ASSERTeq(ret, 0);
}
return NULL;
}
/*
* client_init -- test case for client initialization
*/
int
client_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_init %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
/*
* Tune the maximum number of lanes according to environment.
*/
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
client_close_begin(client);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_init -- test case for server initialization
*/
int
server_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
int ret;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
server_close_begin();
server_close_end();
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* client_connect -- test case for establishing connection - client side
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_connect %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_connect -- test case for establishing connection - server side
*/
int
server_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
server_close_begin();
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* server_process -- test case for processing data on server side
*/
int
server_process(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
set_pool_data(rpool, 1);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_process_start(fip);
server_close_begin();
ret = rpmemd_fip_process_stop(fip);
UT_ASSERTeq(ret, 0);
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* client_persist -- test case for single-threaded persist operation
*/
int
client_persist(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret;
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
struct persist_arg arg = {
.fip = fip,
.lane = 0,
};
client_persist_thread(&arg);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* client_persist_mt -- test case for multi-threaded persist operation
*/
int
client_persist_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
os_thread_t *persist_thread = MALLOC(resp.nlanes * sizeof(os_thread_t));
struct persist_arg *args = MALLOC(resp.nlanes *
sizeof(struct persist_arg));
for (unsigned i = 0; i < nlanes; i++) {
args[i].fip = fip;
args[i].lane = i;
PTHREAD_CREATE(&persist_thread[i], NULL,
client_persist_thread, &args[i]);
}
for (unsigned i = 0; i < nlanes; i++)
PTHREAD_JOIN(&persist_thread[i], NULL);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
FREE(persist_thread);
FREE(args);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* client_read -- test case for read operation
*/
int
client_read(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 0);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
/* read with len == 0 should always succeed */
ret = rpmem_fip_read(fip, lpool, 0, 0, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_init),
TEST_CASE(server_init),
TEST_CASE(client_connect),
TEST_CASE(server_connect),
TEST_CASE(client_persist),
TEST_CASE(client_persist_mt),
TEST_CASE(server_process),
TEST_CASE(client_read),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
/* workaround for left-opened files by libfabric */
rpmem_fip_probe_get("localhost", NULL);
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
common_fini();
rpmemd_log_close();
rpmem_util_cmds_fini();
DONE(NULL);
}
| 17,818 | 21.903599 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/config.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# src/test/rpmem_fip/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=none
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 1,774 | 41.261905 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_oob.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_sock.h -- simple oob connection implementation for exchanging
* required RDMA related data
*/
#include <stdint.h>
#include <netinet/in.h>
typedef struct rpmem_ssh client_t;
client_t *client_exchange(struct rpmem_target_info *info,
unsigned nlanes,
enum rpmem_provider provider,
struct rpmem_resp_attr *resp);
void client_close_begin(client_t *c);
void client_close_end(client_t *c);
void server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider,
char **addr);
void server_exchange_end(struct rpmem_resp_attr resp);
void server_close_begin(void);
void server_close_end(void);
void set_rpmem_cmd(const char *fmt, ...);
| 2,258 | 37.948276 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/setup.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# src/test/rpmem_fip/setup.sh -- common setup for rpmem_fip tests
#
set -e
require_nodes 2
require_node_libfabric 0 $RPMEM_PROVIDER
require_node_libfabric 1 $RPMEM_PROVIDER
require_node_log_files 0 $RPMEM_LOG_FILE $RPMEMD_LOG_FILE
require_node_log_files 1 $RPMEM_LOG_FILE $RPMEMD_LOG_FILE
SRV=srv${UNITTEST_NUM}.pid
clean_remote_node 0 $SRV
RPMEM_CMD="\"cd ${NODE_TEST_DIR[0]} && RPMEMD_LOG_LEVEL=\$RPMEMD_LOG_LEVEL RPMEMD_LOG_FILE=\$RPMEMD_LOG_FILE UNITTEST_FORCE_QUIET=1 \
LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:$REMOTE_LD_LIBRARY_PATH:${NODE_LD_LIBRARY_PATH[0]} \
./rpmem_fip$EXESUFFIX\""
export_vars_node 1 RPMEM_CMD
if [ -n ${RPMEM_MAX_NLANES+x} ]; then
export_vars_node 1 RPMEM_MAX_NLANES
fi
| 2,292 | 39.946429 | 133 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_oob.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_oob.c -- simple oob connection implementation for exchanging
* required RDMA related data
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <string.h>
#include "rpmem_common.h"
#include "rpmem_proto.h"
#include "rpmem_fip_oob.h"
#include "rpmem_ssh.h"
#include "unittest.h"
#include "rpmem_util.h"
#include "os.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = snprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
/*
* client_exchange -- connect to remote host and exchange required information
*/
client_t *
client_exchange(struct rpmem_target_info *info,
unsigned nlanes,
enum rpmem_provider provider,
struct rpmem_resp_attr *resp)
{
struct rpmem_ssh *ssh = rpmem_ssh_open(info);
UT_ASSERTne(ssh, NULL);
int ret;
ret = rpmem_ssh_send(ssh, &nlanes, sizeof(nlanes));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_send(ssh, &provider, sizeof(provider));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_recv(ssh, resp, sizeof(*resp));
UT_ASSERTeq(ret, 0);
return ssh;
}
/*
* client_close_begin -- begin closing connection
*/
void
client_close_begin(client_t *c)
{
int cmd = 1;
int ret;
ret = rpmem_ssh_send(c, &cmd, sizeof(cmd));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_recv(c, &cmd, sizeof(cmd));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cmd, 0);
}
/*
* client_close_end -- end closing connection
*/
void
client_close_end(client_t *c)
{
rpmem_ssh_close(c);
}
/*
* server_exchange_begin -- accept a connection and read required information
*/
void
server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider,
char **addr)
{
UT_ASSERTne(addr, NULL);
char *conn = rpmem_get_ssh_conn_addr();
UT_ASSERTne(conn, NULL);
*addr = strdup(conn);
UT_ASSERTne(*addr, NULL);
uint32_t status = 0;
WRITE(STDOUT_FILENO, &status, sizeof(status));
READ(STDIN_FILENO, lanes, sizeof(*lanes));
READ(STDIN_FILENO, provider, sizeof(*provider));
}
/*
* server_exchange_end -- send response to client
*/
void
server_exchange_end(struct rpmem_resp_attr resp)
{
WRITE(STDOUT_FILENO, &resp, sizeof(resp));
}
/*
* server_close_begin -- wait for close command
*/
void
server_close_begin(void)
{
int cmd = 0;
READ(STDIN_FILENO, &cmd, sizeof(cmd));
UT_ASSERTeq(cmd, 1);
}
/*
* server_close_end -- send close response and wait for disconnect
*/
void
server_close_end(void)
{
int cmd = 0;
WRITE(STDOUT_FILENO, &cmd, sizeof(cmd));
}
| 4,863 | 23.199005 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmemd_log/rpmemd_log_test.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_log_test.c -- unit tests for rpmemd_log
*/
#include <stddef.h>
#include <sys/param.h>
#include <syslog.h>
#include "unittest.h"
#include "rpmemd_log.h"
#define PREFIX "prefix"
static FILE *syslog_fh;
/*
* openlog -- mock for openlog function which logs its usage
*/
FUNC_MOCK(openlog, void, const char *ident, int option, int facility)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("openlog: ident = %s, option = %d, facility = %d",
ident, option, facility);
}
FUNC_MOCK_END
/*
* closelog -- mock for closelog function which logs its usage
*/
FUNC_MOCK(closelog, void, void)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("closelog");
}
FUNC_MOCK_END
/*
* syslog -- mock for syslog function which redirects message to a file
*/
FUNC_MOCK(syslog, void, int priority, const char *format, ...)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERT(priority == LOG_ERR ||
priority == LOG_WARNING ||
priority == LOG_NOTICE ||
priority == LOG_INFO ||
priority == LOG_DEBUG);
va_list ap;
va_start(ap, format);
vfprintf(syslog_fh, format, ap);
va_end(ap);
}
FUNC_MOCK_END
/*
* vsyslog -- mock for vsyslog function which redirects message to a file
*/
FUNC_MOCK(vsyslog, void, int priority, const char *format, va_list ap)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERT(priority == LOG_ERR ||
priority == LOG_WARNING ||
priority == LOG_NOTICE ||
priority == LOG_INFO ||
priority == LOG_DEBUG);
vfprintf(syslog_fh, format, ap);
}
FUNC_MOCK_END
/*
* l2s -- level to string
*/
static const char *
l2s(enum rpmemd_log_level level)
{
return rpmemd_log_level_to_str(level);
}
/*
* test_log_messages -- test log messages on specified level
*/
static void
test_log_messages(enum rpmemd_log_level level)
{
rpmemd_log_level = level;
RPMEMD_LOG(ERR, "ERR message on %s level", l2s(level));
RPMEMD_LOG(WARN, "WARN message on %s level", l2s(level));
RPMEMD_LOG(NOTICE, "NOTICE message on %s level", l2s(level));
RPMEMD_LOG(INFO, "INFO message on %s level", l2s(level));
RPMEMD_DBG("DBG message on %s level", l2s(level));
}
/*
* test_all_log_messages -- test log messages on all levels, with and without
* a prefix.
*/
static void
test_all_log_messages(void)
{
rpmemd_prefix(NULL);
test_log_messages(RPD_LOG_ERR);
test_log_messages(RPD_LOG_WARN);
test_log_messages(RPD_LOG_NOTICE);
test_log_messages(RPD_LOG_INFO);
test_log_messages(_RPD_LOG_DBG);
rpmemd_prefix("[%s]", PREFIX);
test_log_messages(RPD_LOG_ERR);
test_log_messages(RPD_LOG_WARN);
test_log_messages(RPD_LOG_NOTICE);
test_log_messages(RPD_LOG_INFO);
test_log_messages(_RPD_LOG_DBG);
}
#define USAGE() do {\
UT_ERR("usage: %s fatal|log|assert "\
"stderr|file|syslog <file>", argv[0]);\
} while (0)
enum test_log_type {
TEST_STDERR,
TEST_FILE,
TEST_SYSLOG,
};
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_log");
if (argc < 4) {
USAGE();
return 1;
}
const char *log_op = argv[1];
const char *log_type = argv[2];
const char *file = argv[3];
int do_fatal = 0;
int do_assert = 0;
if (strcmp(log_op, "fatal") == 0) {
do_fatal = 1;
} else if (strcmp(log_op, "assert") == 0) {
do_assert = 1;
} else if (strcmp(log_op, "log") == 0) {
} else {
USAGE();
return 1;
}
enum test_log_type type;
if (strcmp(log_type, "stderr") == 0) {
type = TEST_STDERR;
} else if (strcmp(log_type, "file") == 0) {
type = TEST_FILE;
} else if (strcmp(log_type, "syslog") == 0) {
type = TEST_SYSLOG;
} else {
USAGE();
return 1;
}
int fd_stderr = -1;
FILE *stderr_fh = NULL;
switch (type) {
case TEST_STDERR:
/*
* Duplicate stdout file descriptor in order to preserve
* the file list after redirecting the stdout to a file.
*/
fd_stderr = dup(2);
UT_ASSERTne(fd_stderr, -1);
os_close(2);
stderr_fh = os_fopen(file, "a");
UT_ASSERTne(stderr_fh, NULL);
break;
case TEST_SYSLOG:
syslog_fh = os_fopen(file, "a");
UT_ASSERTne(syslog_fh, NULL);
break;
default:
break;
}
int ret;
switch (type) {
case TEST_STDERR:
ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
break;
case TEST_SYSLOG:
ret = rpmemd_log_init("rpmemd_log", NULL, 1);
UT_ASSERTeq(ret, 0);
break;
case TEST_FILE:
ret = rpmemd_log_init("rpmemd_log", file, 0);
UT_ASSERTeq(ret, 0);
break;
}
if (do_fatal) {
RPMEMD_FATAL("fatal");
} else if (do_assert) {
RPMEMD_ASSERT(1);
RPMEMD_ASSERT(0);
} else {
test_all_log_messages();
}
rpmemd_log_close();
switch (type) {
case TEST_STDERR:
/* restore the original stdout file descriptor */
fclose(stderr_fh);
UT_ASSERTeq(dup2(fd_stderr, 2), 2);
os_close(fd_stderr);
break;
case TEST_SYSLOG:
fclose(syslog_fh);
break;
default:
break;
}
DONE(NULL);
}
| 6,270 | 23.02682 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/log_pool_win/log_pool_win.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* log_pool.c -- unit test for pmemlog_create() and pmemlog_open()
*
* usage: log_pool op path [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const wchar_t *path, size_t poolsize, unsigned mode)
{
char *upath = ut_toUTF8(path);
PMEMlogpool *plp = pmemlog_createW(path, poolsize, mode);
if (plp == NULL)
UT_OUT("!%s: pmemlog_create", upath);
else {
os_stat_t stbuf;
STATW(path, &stbuf);
UT_OUT("%s: file size %zu usable space %zu mode 0%o",
upath, stbuf.st_size,
pmemlog_nbyte(plp),
stbuf.st_mode & 0777);
pmemlog_close(plp);
int result = pmemlog_checkW(path);
if (result < 0)
UT_OUT("!%s: pmemlog_check", upath);
else if (result == 0)
UT_OUT("%s: pmemlog_check: not consistent", upath);
}
free(upath);
}
static void
pool_open(const wchar_t *path)
{
char *upath = ut_toUTF8(path);
PMEMlogpool *plp = pmemlog_openW(path);
if (plp == NULL)
UT_OUT("!%s: pmemlog_open", upath);
else {
UT_OUT("%s: pmemlog_open: Success", upath);
pmemlog_close(plp);
}
free(upath);
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "log_pool_win");
if (argc < 3)
UT_FATAL("usage: %s op path [poolsize mode]",
ut_toUTF8(argv[0]));
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = wcstoul(argv[3], NULL, 0) * MB; /* in megabytes */
mode = wcstoul(argv[4], NULL, 8);
pool_create(argv[2], poolsize, mode);
break;
case 'o':
pool_open(argv[2]);
break;
default:
UT_FATAL("unknown operation");
}
DONEW(NULL);
}
| 3,277 | 25.650407 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/util_poolset_size/util_poolset_size.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util_poolset_size.c -- unit test for util_poolset_size
*
* usage: util_poolset_size file...
*/
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include <errno.h>
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset_size");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 2)
UT_FATAL("usage: %s file...",
argv[0]);
for (int i = 1; i < argc; i++) {
char *fname = argv[i];
size_t size = util_poolset_size(fname);
UT_OUT("util_poolset_size(%s): %lu", fname, size);
}
common_fini();
DONE(NULL);
}
| 2,351 | 31.666667 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemobjcli/pmemobjcli.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobjcli.c -- CLI interface for pmemobj API
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <stdarg.h>
#include <unistd.h>
#include <stdbool.h>
#include <limits.h>
#include <setjmp.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "common.h"
#include "os.h"
#include "vec.h"
#define POCLI_ENV_EXIT_ON_ERROR "PMEMOBJCLI_EXIT_ON_ERROR"
#define POCLI_ENV_ECHO_MODE "PMEMOBJCLI_ECHO_MODE"
#define POCLI_ENV_COMMENTS "PMEMOBJCLI_COMMENTS"
#define POCLI_ENV_EMPTY_CMDS "PMEMOBJCLI_EMPTY_CMDS"
#define POCLI_ENV_LONG_NAMES "PMEMOBJCLI_LONG_NAMES"
#define POCLI_ENV_HELP "PMEMOBJCLI_HELP"
#define POCLI_CMD_DELIM " "
#define POCLI_CMD_PROMPT "pmemobjcli $ "
#define POCLI_INBUF_LEN 4096
struct pocli;
TOID_DECLARE(struct item, 1);
/*
* item -- structure used to connect elements in lists.
*/
struct item {
POBJ_LIST_ENTRY(struct item) field;
};
/*
* plist -- structure used as a list entry.
*/
POBJ_LIST_HEAD(plist, struct item);
/*
* struct pocli_ctx -- pmemobjcli context structure for commands
*/
struct pocli_ctx {
PMEMobjpool *pop;
PMEMoid root;
FILE *err;
FILE *out;
struct pocli *pocli;
bool tx_aborted;
VEC(, struct pocli_args *) free_on_abort;
};
/*
* struct pocli_args -- arguments for pmemobjcli command
*/
struct pocli_args {
int argc;
char *argv[];
};
/*
* enum pocli_ret -- return values
*/
enum pocli_ret {
POCLI_RET_OK,
POCLI_ERR_ARGS,
POCLI_ERR_PARS,
POCLI_ERR_CMD,
POCLI_ERR_MALLOC,
POCLI_RET_QUIT,
};
/*
* pocli_cmd_fn -- function prototype for pmemobjcli commands
*/
typedef enum pocli_ret (*pocli_cmd_fn)(struct pocli_ctx *ctx,
struct pocli_args *args);
/*
* struct pocli_cmd -- pmemobjcli command descriptor
*/
struct pocli_cmd {
const char *name; /* long name of command */
const char *name_short; /* short name of command */
const char *usage; /* usage string */
pocli_cmd_fn func; /* command's entry point */
};
/*
* struct pocli_opts -- configuration options for pmemobjcli
*/
struct pocli_opts {
bool exit_on_error; /* exit when error occurred */
bool echo_mode; /* print every command from input */
bool enable_comments; /* enable comments on input */
bool enable_empty_cmds; /* enable empty lines */
bool enable_long_names; /* enable long names */
bool enable_help; /* enable printing help */
};
/*
* struct pocli -- main context of pmemobjcli
*/
struct pocli {
FILE *in; /* input file handle */
const char *fname; /* pool's file name */
char *inbuf; /* input buffer */
size_t inbuf_len; /* input buffer length */
struct pocli_ctx ctx; /* context for commands */
const struct pocli_cmd *cmds; /* available commands */
size_t ncmds; /* number of available commands */
int istty; /* stdout is tty */
struct pocli_opts opts; /* configuration options */
};
int pocli_process(struct pocli *pcli);
/*
* pocli_err -- print error message
*/
static enum pocli_ret
pocli_err(struct pocli_ctx *ctx, enum pocli_ret ret, const char *fmt, ...)
{
fprintf(ctx->err, "error: ");
va_list ap;
va_start(ap, fmt);
vfprintf(ctx->err, fmt, ap);
va_end(ap);
return ret;
}
/*
* pocli_printf -- print message
*/
static void
pocli_printf(struct pocli_ctx *ctx, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(ctx->out, fmt, ap);
va_end(ap);
}
/*
* pocli_args_number -- parse type number
*/
static enum pocli_ret
pocli_args_number(struct pocli_args *args, int arg, uint64_t *type_num)
{
assert(args != NULL);
assert(arg >= 0 && arg < args->argc);
assert(type_num != NULL);
uint64_t tn;
char c;
int ret = sscanf(args->argv[arg], "%" SCNu64 "%c", &tn, &c);
if (ret != 1)
return POCLI_ERR_PARS;
*type_num = tn;
return POCLI_RET_OK;
}
/*
* pocli_args_size -- parse size
*/
static enum pocli_ret
pocli_args_size(struct pocli_args *args, int arg, size_t *sizep)
{
assert(args != NULL);
assert(arg >= 0 && arg < args->argc);
assert(sizep != NULL);
if (util_parse_size(args->argv[arg], sizep))
return POCLI_ERR_PARS;
return POCLI_RET_OK;
}
/*
* pocli_args_alloc -- split line into array of arguments
*/
static struct pocli_args *
pocli_args_alloc(char *cmdstr, char *argstr, char *delim)
{
size_t size = sizeof(struct pocli_args);
struct pocli_args *args = NULL;
if (cmdstr) {
size += sizeof(char *);
args = (struct pocli_args *)malloc(size);
if (!args)
return NULL;
args->argc = 1;
args->argv[0] = cmdstr;
}
char *n = strtok(argstr, delim);
while (n) {
int cur = args ? args->argc++ : 0;
size += sizeof(char *);
struct pocli_args *nargs =
(struct pocli_args *)realloc(args, size);
if (!nargs) {
free(args);
return NULL;
}
if (!args)
nargs->argc = 1;
args = nargs;
args->argv[cur] = n;
n = strtok(NULL, delim);
}
return args;
}
/*
* pocli_args_obj_root -- parse object's descriptor from root object
*/
static enum pocli_ret
pocli_args_obj_root(struct pocli_ctx *ctx, char *in, PMEMoid **oidp)
{
char *input = strdup(in);
if (!input)
return POCLI_ERR_MALLOC;
if (!oidp)
return POCLI_ERR_PARS;
struct pocli_args *args = pocli_args_alloc(NULL, input, ".");
if (!args)
return POCLI_ERR_PARS;
enum pocli_ret ret = POCLI_RET_OK;
if (strcmp(args->argv[0], "r") != 0) {
ret = POCLI_ERR_PARS;
goto out;
}
PMEMoid *oid = &ctx->root;
size_t size = pmemobj_root_size(ctx->pop);
for (int i = 1; i < args->argc; i++) {
unsigned ind;
char c;
int n = sscanf(args->argv[i], "%u%c", &ind, &c);
if (n != 1) {
ret = POCLI_ERR_PARS;
goto out;
}
size_t max_ind = size / sizeof(PMEMoid);
if (!max_ind || ind >= max_ind) {
ret = POCLI_ERR_PARS;
goto out;
}
PMEMoid *oids = (PMEMoid *)pmemobj_direct(*oid);
oid = &oids[ind];
size = pmemobj_alloc_usable_size(*oid);
}
*oidp = oid;
out:
free(input);
free(args);
return ret;
}
/*
* pocli_args_obj -- parse object's descriptor
*/
static enum pocli_ret
pocli_args_obj(struct pocli_ctx *ctx, struct pocli_args *args,
int arg, PMEMoid **oidp)
{
assert(args != NULL);
assert(arg >= 0 && arg < args->argc);
assert(oidp != NULL);
assert(ctx != NULL);
char *objs = args->argv[arg];
if (strcmp(objs, "r") == 0) {
*oidp = &ctx->root;
} else if (strcmp(objs, "0") == 0) {
*oidp = NULL;
} else if (strcmp(objs, "NULL") == 0) {
*oidp = NULL;
} else if (objs[0] == 'r') {
return pocli_args_obj_root(ctx, args->argv[arg], oidp);
} else {
return pocli_err(ctx, POCLI_ERR_PARS,
"invalid object specified -- '%s'\n", objs);
}
return POCLI_RET_OK;
}
/*
* pocli_args_list_elm -- parse object's descriptor and checks if it's on list
*/
static enum pocli_ret
pocli_args_list_elm(struct pocli_ctx *ctx, struct pocli_args *args,
int arg, PMEMoid **oidp, struct plist *head)
{
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, arg, oidp);
if (ret)
return ret;
if (*oidp == NULL)
return POCLI_RET_OK;
TOID(struct item) tmp;
POBJ_LIST_FOREACH(tmp, head, field) {
if (OID_EQUALS(tmp.oid, **oidp))
return POCLI_RET_OK;
}
return pocli_err(ctx, POCLI_ERR_PARS,
"object %s is not member of given list\n", args->argv[arg]);
}
/*
* parse_stage -- return proper string variable referring to transaction state
*/
static const char *
parse_stage(void)
{
enum pobj_tx_stage st = pmemobj_tx_stage();
const char *stage = "";
switch (st) {
case TX_STAGE_NONE:
stage = "TX_STAGE_NONE";
break;
case TX_STAGE_WORK:
stage = "TX_STAGE_WORK";
break;
case TX_STAGE_ONCOMMIT:
stage = "TX_STAGE_ONCOMMIT";
break;
case TX_STAGE_ONABORT:
stage = "TX_STAGE_ONABORT";
break;
case TX_STAGE_FINALLY:
stage = "TX_STAGE_FINALLY";
break;
default:
assert(0); /* unreachable */
break;
}
return stage;
}
/*
* pocli_pmemobj_direct -- pmemobj_direct() command
*/
static enum pocli_ret
pocli_pmemobj_direct(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
void *obj = pmemobj_direct(*oidp);
pocli_printf(ctx, "%s(%s): off = 0x%jx uuid = 0x%jx ptr = %p\n",
args->argv[0], args->argv[1],
oidp->off, oidp->pool_uuid_lo, obj);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_type_num -- pmemobj_type_num() command
*/
static enum pocli_ret
pocli_pmemobj_type_num(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
uint64_t type_num = pmemobj_type_num(*oidp);
pocli_printf(ctx, "%s(%s): type num = %llu\n",
args->argv[0], args->argv[1], type_num);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_alloc_usable_size -- pmemobj_alloc_usable_size() command
*/
static enum pocli_ret
pocli_pmemobj_alloc_usable_size(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
size_t size = pmemobj_alloc_usable_size(*oidp);
pocli_printf(ctx, "%s(%s): size = %zu\n",
args->argv[0], args->argv[1], size);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_root -- pmemobj_root() command
*/
static enum pocli_ret
pocli_pmemobj_root(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_size(args, 1, &size);
if (ret)
return ret;
PMEMoid root = pmemobj_root(ctx->pop, size);
if (OID_IS_NULL(root))
return pocli_err(ctx, POCLI_ERR_CMD, "pmemobj_root failed\n");
ctx->root = root;
pocli_printf(ctx, "%s(%zu): off = 0x%jx uuid = 0x%jx\n",
args->argv[0], size, ctx->root.off,
ctx->root.pool_uuid_lo);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_root_size -- pmemobj_root_size() command
*/
static enum pocli_ret
pocli_pmemobj_root_size(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
size_t size = pmemobj_root_size(ctx->pop);
pocli_printf(ctx, "%s: size = %lu\n",
args->argv[0], size);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_do_alloc -- pmemobj_alloc and pmemobj_zalloc() common part
*/
static enum pocli_ret
pocli_pmemobj_do_alloc(struct pocli_ctx *ctx, struct pocli_args *args,
int (fn_alloc)(PMEMobjpool *pop, PMEMoid *oid, size_t size,
uint64_t type_num))
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
uint64_t type_num = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot allocate to root object\n");
ret = pocli_args_number(args, 2, &type_num);
if (ret)
return ret;
ret = pocli_args_size(args, 3, &size);
if (ret)
return ret;
int r = fn_alloc(ctx->pop, oidp, size, type_num);
pocli_printf(ctx, "%s(%s, %zu, %llu): %d\n",
args->argv[0], args->argv[1], size, type_num, r);
return ret;
}
/*
* do_alloc -- wrapper for pmemobj_alloc() function with default constructor.
*/
static int
do_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num)
{
return pmemobj_alloc(pop, oidp, size, type_num, NULL, NULL);
}
/*
* pocli_pmemobj_alloc -- pmemobj_alloc() command
*/
static enum pocli_ret
pocli_pmemobj_alloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_alloc(ctx, args, do_alloc);
}
/*
* pocli_pmemobj_zalloc -- pmemobj_zalloc() command
*/
static enum pocli_ret
pocli_pmemobj_zalloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_alloc(ctx, args, pmemobj_zalloc);
}
/*
* pocli_pmemobj_do_realloc -- pmemobj_realloc and pmemobj_zrealloc() commands
* common part
*/
static enum pocli_ret
pocli_pmemobj_do_realloc(struct pocli_ctx *ctx, struct pocli_args *args,
int (*fn_realloc)(PMEMobjpool *pop, PMEMoid *oid, size_t size,
uint64_t type_num))
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
uint64_t type_num = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot realloc with NULL oid pointer\n");
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot reallocate to root object\n");
ret = pocli_args_number(args, 2, &type_num);
if (ret)
return ret;
ret = pocli_args_size(args, 3, &size);
if (ret)
return ret;
int r = fn_realloc(ctx->pop, oidp, size, type_num);
pocli_printf(ctx, "%s(%s, %zu, %llu): %d off = 0x%llx uuid = 0x%llx\n",
args->argv[0], args->argv[1], size, type_num,
r, oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_realloc -- pmemobj_realloc() command
*/
static enum pocli_ret
pocli_pmemobj_realloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_realloc(ctx, args, pmemobj_realloc);
}
/*
* pocli_pmemobj_zrealloc -- pmemobj_zrealloc() command
*/
static enum pocli_ret
pocli_pmemobj_zrealloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_realloc(ctx, args, pmemobj_zrealloc);
}
/*
* pocli_pmemobj_free -- pmemobj_free() command
*/
static enum pocli_ret
pocli_pmemobj_free(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"NULL pointer not allowed here\n");
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot free root object\n");
void *oidp_tmp = pmemobj_direct(*oidp);
pmemobj_free(oidp);
pocli_printf(ctx, "%s(%p): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], oidp_tmp, oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_strdup -- pmemobj_strdup() command
*/
static enum pocli_ret
pocli_pmemobj_strdup(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oidp = NULL;
uint64_t type_num;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use root object\n");
ret = pocli_args_number(args, 3, &type_num);
if (ret)
return ret;
int r = pmemobj_strdup(ctx->pop, oidp, args->argv[2], type_num);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_strdup() failed\n");
pocli_printf(ctx, "%s(%s, %s, %llu): %d\n",
args->argv[0], args->argv[1], args->argv[2],
type_num, r);
return ret;
}
/*
* pocli_str_root_copy -- copy a string into a root object data
*/
static enum pocli_ret
pocli_str_root_copy(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 3)
return POCLI_ERR_ARGS;
size_t offset = 0;
enum pocli_ret ret = pocli_args_size(args, 1, &offset);
if (ret)
return ret;
const char *str = args->argv[2];
if (str == NULL)
return POCLI_ERR_ARGS;
size_t len = strlen(str);
size_t root_size = pmemobj_root_size(ctx->pop);
if (offset + len > root_size)
return POCLI_ERR_ARGS;
PMEMoid root = pmemobj_root(ctx->pop, root_size);
assert(!OID_IS_NULL(root));
char *root_data = (char *)pmemobj_direct(root);
pmemobj_memcpy_persist(ctx->pop, root_data + offset, str, len);
return ret;
}
/*
* pocli_str_root_print -- print a string stored in the root object data
*/
static enum pocli_ret
pocli_str_root_print(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 3)
return POCLI_ERR_ARGS;
size_t offset = 0;
enum pocli_ret ret = pocli_args_size(args, 1, &offset);
if (ret)
return ret;
size_t len = 0;
ret = pocli_args_number(args, 2, &len);
if (ret)
return ret;
size_t root_size = pmemobj_root_size(ctx->pop);
if (offset + len > root_size)
return POCLI_ERR_ARGS;
PMEMoid root = pmemobj_root(ctx->pop, root_size);
assert(!OID_IS_NULL(root));
char *root_data = (char *)pmemobj_direct(root);
char *buff = (char *)malloc(len + 1);
assert(buff != NULL);
memcpy(buff, root_data + offset, len);
buff[len] = '\0';
printf("%s\n", buff);
free(buff);
return ret;
}
/*
* pocli_pmemobj_first -- pmemobj_first() command
*/
static enum pocli_ret
pocli_pmemobj_first(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
PMEMoid oidp = pmemobj_first(ctx->pop);
if (OID_IS_NULL(oidp))
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_first() failed\n");
pocli_printf(ctx, "%s: off = 0x%llx uuid = 0x%llx\n",
args->argv[0], oidp.off, oidp.pool_uuid_lo);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_next -- pmemobj_next() command
*/
static enum pocli_ret
pocli_pmemobj_next(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oidp;
PMEMoid oidp_next;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
oidp_next = pmemobj_next(*oidp);
pocli_printf(ctx, "%s(%p): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], pmemobj_direct(*oidp), oidp_next.off,
oidp_next.pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_memcpy_persist -- pmemobj_memcpy_persist() command
*/
static enum pocli_ret
pocli_pmemobj_memcpy_persist(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 6)
return POCLI_ERR_ARGS;
PMEMoid *dest;
PMEMoid *src;
enum pocli_ret ret;
uint64_t offset;
uint64_t len;
if ((ret = pocli_args_obj(ctx, args, 1, &dest)))
return ret;
if ((ret = pocli_args_number(args, 2, &offset)))
return ret;
if (dest == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
char *dest_p = (char *)pmemobj_direct(*dest);
dest_p += offset;
if ((ret = pocli_args_obj(ctx, args, 3, &src)))
return ret;
if ((ret = pocli_args_number(args, 4, &offset)))
return ret;
if (src == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[3]);
char *src_p = (char *)pmemobj_direct(*src);
src_p += offset;
if ((ret = pocli_args_number(args, 5, &len)))
return ret;
void *result = pmemobj_memcpy_persist(ctx->pop, dest_p, src_p, len);
pocli_printf(ctx, "%s(%p, %p, %u): ptr = %p\n",
args->argv[0], dest_p, src_p, len, result);
return ret;
}
/*
* pocli_pmemobj_memset_persist -- pmemobj_memset_persist() command
*/
static enum pocli_ret
pocli_pmemobj_memset_persist(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 5)
return POCLI_ERR_ARGS;
PMEMoid *oid;
enum pocli_ret ret;
uint64_t offset;
uint64_t len;
uint64_t c;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if ((ret = pocli_args_number(args, 2, &offset)))
return ret;
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
char *dest_p = (char *)pmemobj_direct(*oid);
dest_p += offset;
if ((ret = pocli_args_number(args, 3, &c)))
return ret;
if ((ret = pocli_args_number(args, 4, &len)))
return ret;
if (len == UINT64_MAX)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[4]);
void *result = pmemobj_memset_persist(ctx->pop, dest_p, (int)c, len);
pocli_printf(ctx, "%s(%p, %u, %d): ptr = %p\n",
args->argv[0], dest_p, c, len, result);
return ret;
}
/*
* pocli_pmemobj_do_persist -- common part of pmemobj_persist() and
* pmemobj_flush() command
*/
static enum pocli_ret
pocli_pmemobj_do_persist(struct pocli_ctx *ctx, struct pocli_args *args,
void (*fn_persist)(PMEMobjpool *pop, const void *addr, size_t len))
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oid;
enum pocli_ret ret;
uint64_t offset;
uint64_t len;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if ((ret = pocli_args_number(args, 2, &offset)))
return ret;
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
char *dest_p = (char *)pmemobj_direct(*oid);
dest_p += offset;
if ((ret = pocli_args_number(args, 3, &len)))
return ret;
fn_persist(ctx->pop, dest_p, len);
pocli_printf(ctx, "%s(%p, %u)\n",
args->argv[0], dest_p, len);
return ret;
}
/*
* pocli_pmemobj_persist -- pmemobj_persist() command
*/
static enum pocli_ret
pocli_pmemobj_persist(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_persist(ctx, args, pmemobj_persist);
}
/*
* pocli_pmemobj_flush -- pmemobj_flush() command
*/
static enum pocli_ret
pocli_pmemobj_flush(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_persist(ctx, args, pmemobj_flush);
}
/*
* pocli_pmemobj_drain -- pmemobj_drain() command
*/
static enum pocli_ret
pocli_pmemobj_drain(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
pmemobj_drain(ctx->pop);
pocli_printf(ctx, "%s\n", args->argv[0]);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_pool_by_ptr -- pmemobj_pool_by_ptr() command
*/
static enum pocli_ret
pocli_pmemobj_pool_by_ptr(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 3)
return POCLI_ERR_ARGS;
PMEMoid *oid;
enum pocli_ret ret;
uint64_t offset;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if ((ret = pocli_args_number(args, 2, &offset)))
return ret;
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
char *dest_p = (char *)pmemobj_direct(*oid);
dest_p += offset;
PMEMobjpool *pop = pmemobj_pool_by_ptr(dest_p);
pocli_printf(ctx, "%s(%p): uuid = 0x%llx\n",
args->argv[0], dest_p, pop->uuid_lo);
return ret;
}
/*
* pocli_pmemobj_pool_by_oid -- pmemobj_pool_by_oid() command
*/
static enum pocli_ret
pocli_pmemobj_pool_by_oid(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
PMEMoid *oid;
enum pocli_ret ret;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
PMEMobjpool *pop = pmemobj_pool_by_oid(*oid);
pocli_printf(ctx, "%s(%p): uuid = 0x%llx\n",
args->argv[0], pmemobj_direct(*oid), pop->uuid_lo);
return ret;
}
/*
* pocli_pmemobj_list_insert -- pmemobj_list_insert() command
*/
static enum pocli_ret
pocli_pmemobj_list_insert(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 5)
return POCLI_ERR_ARGS;
PMEMoid nulloid = OID_NULL;
PMEMoid *dest;
PMEMoid *oid;
PMEMoid *head_oid;
enum pocli_ret ret;
uint64_t before;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if (pocli_args_obj(ctx, args, 2, &head_oid))
return ret;
if (head_oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[2]);
struct plist *head = (struct plist *)pmemobj_direct(*head_oid);
if ((ret = pocli_args_list_elm(ctx, args, 3, &dest, head)))
return ret;
if (dest == NULL)
dest = &nulloid;
if ((ret = pocli_args_number(args, 4, &before)))
return ret;
if (before > 1)
return pocli_err(ctx, POCLI_ERR_ARGS,
"Before flag different than 0 or 1\n");
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
int r = pmemobj_list_insert(ctx->pop, offsetof(struct item, field),
head, *dest, (int)before, *oid);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_list_insert() failed\n");
pocli_printf(ctx, "%s(%p, %s, %p, %u): %d\n",
args->argv[0], pmemobj_direct(*oid), args->argv[2],
dest, before, r);
return ret;
}
/*
* pocli_pmemobj_list_insert_new -- pmemobj_list_insert_new() command
*/
static enum pocli_ret
pocli_pmemobj_list_insert_new(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 7)
return POCLI_ERR_ARGS;
PMEMoid nulloid = OID_NULL;
PMEMoid *dest;
PMEMoid *oid;
PMEMoid *head_oid;
enum pocli_ret ret;
uint64_t before;
uint64_t type_num;
uint64_t size;
if ((ret = pocli_args_obj(ctx, args, 1, &oid)))
return ret;
if (oid == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot allocate to root object\n");
if ((ret = pocli_args_obj(ctx, args, 2, &head_oid)))
return ret;
if (head_oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[2]);
struct plist *head = (struct plist *)pmemobj_direct(*head_oid);
if ((ret = pocli_args_list_elm(ctx, args, 3, &dest, head)))
return ret;
if (dest == NULL)
dest = &nulloid;
if ((ret = pocli_args_number(args, 4, &before)))
return ret;
if (before > 1)
return pocli_err(ctx, POCLI_ERR_ARGS,
"Before flag different than 0 or 1\n");
if ((ret = pocli_args_number(args, 5, &type_num)))
return ret;
if ((ret = pocli_args_number(args, 6, &size)))
return ret;
*oid = pmemobj_list_insert_new(ctx->pop, offsetof(struct item, field),
head, *dest, (int)before, size, type_num, NULL, NULL);
pmemobj_persist(ctx->pop, oid, sizeof(PMEMoid));
if (OID_IS_NULL(*oid))
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_list_insert_new() failed\n");
pocli_printf(ctx, "%s(%s, %p, %u, %llu, %zu): off = 0x%jx uuid = 0x%jx"
" ptr = %p\n", args->argv[0], args->argv[2],
dest, before, type_num, size, oid->off,
oid->pool_uuid_lo, pmemobj_direct(*oid));
return ret;
}
/*
* pocli_pmemobj_list_remove -- pmemobj_list_remove() command
*/
static enum pocli_ret
pocli_pmemobj_list_remove(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oid;
PMEMoid *head_oid;
enum pocli_ret ret;
uint64_t if_free;
if ((ret = pocli_args_obj(ctx, args, 2, &head_oid)))
return ret;
if (head_oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[2]);
struct plist *head = (struct plist *)pmemobj_direct(*head_oid);
if ((ret = pocli_args_list_elm(ctx, args, 1, &oid, head)))
return ret;
if ((ret = pocli_args_number(args, 3, &if_free)))
return ret;
if (if_free > 1)
return pocli_err(ctx, POCLI_ERR_ARGS,
"Free flag different than 0 or 1\n");
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
void *oidp = pmemobj_direct(*oid);
int r = pmemobj_list_remove(ctx->pop, offsetof(struct item, field),
head, *oid, (int)if_free);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_list_remove() failed\n");
pocli_printf(ctx, "%s(%p, %s, %u): off = 0x%jx uuid = 0x%jx\n",
args->argv[0], oidp, args->argv[2], if_free,
oid->off, oid->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_list_move -- pmemobj_list_move() command
*/
static enum pocli_ret
pocli_pmemobj_list_move(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 6)
return POCLI_ERR_ARGS;
PMEMoid nulloid = OID_NULL;
PMEMoid *dest;
PMEMoid *oid;
PMEMoid *head_oid;
enum pocli_ret ret;
uint64_t before;
size_t offset = offsetof(struct item, field);
if ((ret = pocli_args_obj(ctx, args, 2, &head_oid)))
return ret;
if (head_oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[2]);
struct plist *head_src = (struct plist *)pmemobj_direct(*head_oid);
if ((ret = pocli_args_obj(ctx, args, 3, &head_oid)))
return ret;
if (head_oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[3]);
struct plist *head_dest = (struct plist *)pmemobj_direct(*head_oid);
if ((ret = pocli_args_list_elm(ctx, args, 1, &oid, head_src)))
return ret;
if ((ret = pocli_args_list_elm(ctx, args, 4, &dest, head_dest)))
return ret;
if (dest == NULL)
dest = &nulloid;
if ((ret = pocli_args_number(args, 5, &before)))
return ret;
if (before > 1)
return pocli_err(ctx, POCLI_ERR_ARGS,
"Before flag different than 0 or 1\n");
if (oid == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
int r = pmemobj_list_move(ctx->pop, offset, head_src, offset, head_dest,
*dest, (int)before, *oid);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_list_move() failed\n");
pocli_printf(ctx, "%s(%p, %s, %s, %p, %u): %d\n", args->argv[0],
pmemobj_direct(*oid), args->argv[2], args->argv[3],
pmemobj_direct(*dest), before, r);
return ret;
}
/*
* pocli_pmemobj_tx_begin -- pmemobj_tx_begin() command
*/
static enum pocli_ret
pocli_pmemobj_tx_begin(struct pocli_ctx *ctx, struct pocli_args *args)
{
enum pocli_ret ret = POCLI_RET_OK;
int r;
switch (args->argc) {
case 1: {
r = pmemobj_tx_begin(ctx->pop, NULL, TX_PARAM_NONE);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_tx_begin() failed");
pocli_printf(ctx, "%s: %d\n", args->argv[0], r);
}
break;
case 2: {
if (strcmp(args->argv[1], "jmp") != 0)
return POCLI_ERR_ARGS;
jmp_buf jmp;
if (setjmp(jmp)) {
const char *command = ctx->tx_aborted ?
"pmemobj_tx_abort" : "pmemobj_tx_end";
pocli_printf(ctx, "%s: %d\n",
command, pmemobj_tx_errno());
/*
* Free all objects, except the one we currently
* use.
*/
while (VEC_SIZE(&ctx->free_on_abort) > 1) {
free(VEC_BACK(&ctx->free_on_abort));
VEC_POP_BACK(&ctx->free_on_abort);
}
return POCLI_RET_OK;
} else {
r = pmemobj_tx_begin(ctx->pop, jmp,
TX_PARAM_NONE);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_tx_begin() failed");
}
pocli_printf(ctx, "%s(jmp): %d\n", args->argv[0], r);
ret = (enum pocli_ret)pocli_process(ctx->pocli);
if (ret)
return ret;
}
break;
default:
return POCLI_ERR_ARGS;
}
return ret;
}
/*
* pocli_pmemobj_tx_end -- pmemobj_tx_end() command
*/
static enum pocli_ret
pocli_pmemobj_tx_end(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() == TX_STAGE_NONE ||
pmemobj_tx_stage() == TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"transaction in improper stage\n");
ctx->tx_aborted = false;
int ret = pmemobj_tx_end();
pocli_printf(ctx, "%s: %d\n", args->argv[0], ret);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_tx_commit -- pmemobj_tx_commit() command
*/
static enum pocli_ret
pocli_pmemobj_tx_commit(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
pmemobj_tx_commit();
pocli_printf(ctx, "%s\n", args->argv[0]);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_tx_abort -- pmemobj_tx_abort() command
*/
static enum pocli_ret
pocli_pmemobj_tx_abort(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
int err;
int count = sscanf(args->argv[1], "%d", &err);
if (count != 1)
return POCLI_ERR_PARS;
ctx->tx_aborted = true;
pmemobj_tx_abort(err);
pocli_printf(ctx, "pmemobj_tx_abort: %d", err);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_tx_stage -- pmemobj_tx_stage() command
*/
static enum pocli_ret
pocli_pmemobj_tx_stage(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
pocli_printf(ctx, "%s: %s\n", args->argv[0], parse_stage());
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_tx_add_range -- pmemobj_tx_add_range() command
*/
static enum pocli_ret
pocli_pmemobj_tx_add_range(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oidp;
size_t offset = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot add NULL pointer\n");
ret = pocli_args_size(args, 2, &offset);
if (ret)
return ret;
ret = pocli_args_size(args, 3, &size);
if (ret)
return ret;
int r = pmemobj_tx_add_range(*oidp, offset, size);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_tx_add_range() failed");
pocli_printf(ctx, "%s(%s, %zu, %zu): %d\n", args->argv[0],
args->argv[1], offset, size, ret, r);
return ret;
}
/*
* pocli_pmemobj_tx_add_range_direct -- pmemobj_tx_add_range_direct() command
*/
static enum pocli_ret
pocli_pmemobj_tx_add_range_direct(struct pocli_ctx *ctx,
struct pocli_args *args)
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
PMEMoid *oidp;
size_t off = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot add NULL pointer\n");
char *ptr = (char *)pmemobj_direct(*oidp);
ret = pocli_args_size(args, 2, &off);
if (ret)
return ret;
ret = pocli_args_size(args, 3, &size);
if (ret)
return ret;
int r = pmemobj_tx_add_range_direct((void *)(ptr + off), size);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_tx_add_range_direct() failed");
pocli_printf(ctx, "%s(%p, %zu, %zu): %d\n", args->argv[0], ptr,
off, size, r);
return ret;
}
/*
* pocli_pmemobj_do_tx_alloc -- pmemobj_tx_zalloc() and pmemobj_tx_zalloc()
* commands common part
*/
static enum pocli_ret
pocli_pmemobj_do_tx_alloc(struct pocli_ctx *ctx, struct pocli_args *args,
PMEMoid (*fn_alloc)(size_t size, uint64_t type_num))
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
PMEMoid *oidp = NULL;
uint64_t type_num = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot allocate to root object\n");
ret = pocli_args_size(args, 2, &size);
if (ret)
return ret;
ret = pocli_args_number(args, 3, &type_num);
if (ret)
return ret;
*oidp = fn_alloc(size, type_num);
pocli_printf(ctx, "%s(%zu, %llu): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], size, type_num, oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_tx_alloc -- pmemobj_tx_alloc() command
*/
static enum pocli_ret
pocli_pmemobj_tx_alloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_tx_alloc(ctx, args, pmemobj_tx_alloc);
}
/*
* pocli_pmemobj_tx_zalloc -- pmemobj_tx_zalloc() command
*/
static enum pocli_ret
pocli_pmemobj_tx_zalloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_tx_alloc(ctx, args, pmemobj_tx_zalloc);
}
/*
* pocli_pmemobj_do_tx_realloc -- pmemobj_tx_zrealloc() and
* pmemobj_tx_zrealloc() commands common part
*/
static enum pocli_ret
pocli_pmemobj_do_tx_realloc(struct pocli_ctx *ctx, struct pocli_args *args,
PMEMoid (*fn_realloc)(PMEMoid oid, size_t size, uint64_t type_num))
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
PMEMoid *oidp = NULL;
uint64_t type_num = 0;
size_t size = 0;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot reallocate root object\n");
ret = pocli_args_size(args, 2, &size);
if (ret)
return ret;
ret = pocli_args_number(args, 3, &type_num);
if (ret)
return ret;
*oidp = fn_realloc(*oidp, size, type_num);
pocli_printf(ctx, "%s(%p, %zu, %llu): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], oidp, size, type_num,
oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_tx_realloc -- pmemobj_tx_realloc() command
*/
static enum pocli_ret
pocli_pmemobj_tx_realloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_tx_realloc(ctx, args, pmemobj_tx_realloc);
}
/*
* pocli_pmemobj_tx_zrealloc -- pmemobj_tx_zrealloc() command
*/
static enum pocli_ret
pocli_pmemobj_tx_zrealloc(struct pocli_ctx *ctx, struct pocli_args *args)
{
return pocli_pmemobj_do_tx_realloc(ctx, args, pmemobj_tx_zrealloc);
}
/*
* pocli_pmemobj_tx_free -- pmemobj_tx_free() command
*/
static enum pocli_ret
pocli_pmemobj_tx_free(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 2)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
PMEMoid *oidp = NULL;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot free root object\n");
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
int r = pmemobj_tx_free(*oidp);
if (r != POCLI_RET_OK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"pmemobj_tx_free() failed\n");
pocli_printf(ctx, "%s(%p): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], oidp,
oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_tx_strdup -- pmemobj_tx_strdup() command
*/
static enum pocli_ret
pocli_pmemobj_tx_strdup(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 4)
return POCLI_ERR_ARGS;
if (pmemobj_tx_stage() != TX_STAGE_WORK)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use in stage different than TX_STAGE_WORK\n");
PMEMoid *oidp = NULL;
uint64_t type_num;
enum pocli_ret ret;
ret = pocli_args_obj(ctx, args, 1, &oidp);
if (ret)
return ret;
if (oidp == &ctx->root)
return pocli_err(ctx, POCLI_ERR_ARGS,
"cannot use root object\n");
ret = pocli_args_number(args, 3, &type_num);
if (ret)
return ret;
if (oidp == NULL)
return pocli_err(ctx, POCLI_ERR_ARGS,
"invalid object -- '%s'\n", args->argv[1]);
*oidp = pmemobj_tx_strdup(args->argv[2], type_num);
pocli_printf(ctx, "%s(%s, %llu): off = 0x%llx uuid = 0x%llx\n",
args->argv[0], args->argv[2], type_num,
oidp->off, oidp->pool_uuid_lo);
return ret;
}
/*
* pocli_pmemobj_tx_process -- pmemobj_tx_process() command
*/
static enum pocli_ret
pocli_pmemobj_tx_process(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
pmemobj_tx_process();
pocli_printf(ctx, "%s\n", args->argv[0]);
return POCLI_RET_OK;
}
/*
* pocli_pmemobj_tx_errno -- pmemobj_tx_errno() command
*/
static enum pocli_ret
pocli_pmemobj_tx_errno(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
pocli_printf(ctx, "%s: %d\n", args->argv[0], pmemobj_tx_errno());
return POCLI_RET_OK;
}
/*
* pocli_get_cmd -- find command of given name
*/
static const struct pocli_cmd *
pocli_get_cmd(struct pocli *pcli, const char *cmds)
{
for (size_t i = 0; i < pcli->ncmds; i++) {
const char *name = pcli->cmds[i].name;
const char *name_short = pcli->cmds[i].name_short;
if (strcmp(cmds, name_short) == 0 ||
(pcli->opts.enable_long_names &&
strcmp(cmds, name) == 0)) {
return &pcli->cmds[i];
}
}
return NULL;
}
/*
* pocli_print_cmd -- print description of specified command
*/
static void
pocli_print_cmd(struct pocli_ctx *ctx, const struct pocli_cmd *cmd)
{
pocli_printf(ctx, "[%-5s] %-32s - usage: %s %s\n",
cmd->name_short,
cmd->name,
cmd->name,
cmd->usage);
}
/*
* pocli_print_cmd_usage -- print usage of specified command
*/
static void
pocli_print_cmd_usage(struct pocli_ctx *ctx, const struct pocli_cmd *cmd)
{
pocli_printf(ctx, "usage: %s %s\n",
cmd->name,
cmd->usage);
}
/*
* pocli_help -- help command
*/
static enum pocli_ret
pocli_help(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (!ctx->pocli->opts.enable_help)
return POCLI_ERR_CMD;
if (args->argc != 2 && args->argc != 1)
return POCLI_ERR_ARGS;
if (args->argc == 1) {
for (size_t i = 0; i < ctx->pocli->ncmds; i++)
pocli_print_cmd(ctx, &ctx->pocli->cmds[i]);
} else {
const struct pocli_cmd *cmd =
pocli_get_cmd(ctx->pocli, args->argv[1]);
if (!cmd)
return POCLI_ERR_PARS;
pocli_print_cmd_usage(ctx, cmd);
}
return POCLI_RET_OK;
}
/*
* pocli_quit -- quit command
*/
static enum pocli_ret
pocli_quit(struct pocli_ctx *ctx, struct pocli_args *args)
{
if (args->argc != 1)
return POCLI_ERR_ARGS;
return POCLI_RET_QUIT;
}
/*
* pocli_commands -- list of available commands
*/
static struct pocli_cmd pocli_commands[] = {
{
"help", /* name */
"h", /* name_short */
"[<cmd>]", /* usage */
pocli_help, /* func */
},
{
"quit",
"q",
"",
pocli_quit,
},
{
"pmemobj_root",
"pr",
"<size>",
pocli_pmemobj_root,
},
{
"pmemobj_root_size",
"prs",
"",
pocli_pmemobj_root_size,
},
{
"pmemobj_direct",
"pdr",
"<obj>",
pocli_pmemobj_direct,
},
{
"pmemobj_alloc_usable_size",
"paus",
"<obj>",
pocli_pmemobj_alloc_usable_size,
},
{
"pmemobj_alloc",
"pa",
"<obj> <type_num> <size>",
pocli_pmemobj_alloc,
},
{
"pmemobj_zalloc",
"pza",
"<obj> <type_num> <size>",
pocli_pmemobj_zalloc,
},
{
"pmemobj_realloc",
"pre",
"<obj> <type_num> <size>",
pocli_pmemobj_realloc,
},
{
"pmemobj_zrealloc",
"pzre",
"<obj> <type_num> <size>",
pocli_pmemobj_zrealloc,
},
{
"pmemobj_free",
"pf",
"<obj>",
pocli_pmemobj_free,
},
{
"pmemobj_type_num",
"ptn",
"<obj>",
pocli_pmemobj_type_num,
},
{
"pmemobj_strdup",
"psd",
"<obj> <string> <type_num>",
pocli_pmemobj_strdup,
},
{
"pmemobj_first",
"pfi",
"<type_num>",
pocli_pmemobj_first,
},
{
"pmemobj_next",
"pn",
"<obj>",
pocli_pmemobj_next,
},
{
"pmemobj_memcpy_persist",
"pmcp",
"<dest> <off_dest> <src> <off_src> <len>",
pocli_pmemobj_memcpy_persist,
},
{
"pmemobj_memset_persist",
"pmsp",
"<obj> <offset> <pattern> <len>",
pocli_pmemobj_memset_persist,
},
{
"pmemobj_persist",
"pp",
"<obj> <offset> <len>",
pocli_pmemobj_persist,
},
{
"pmemobj_flush",
"pfl",
"<obj> <offset> <len>",
pocli_pmemobj_flush,
},
{
"pmemobj_drain",
"pd",
"",
pocli_pmemobj_drain,
},
{
"pmemobj_pool_by_oid",
"ppbo",
"<obj>",
pocli_pmemobj_pool_by_oid,
},
{
"pmemobj_pool_by_ptr",
"ppbp",
"<obj> <offset>",
pocli_pmemobj_pool_by_ptr,
},
{
"pmemobj_list_insert",
"pli",
"<obj> <head> <dest> <before>",
pocli_pmemobj_list_insert,
},
{
"pmemobj_list_insert_new",
"plin",
"<obj> <head> <dest> <before>"
" <size> <type_num>",
pocli_pmemobj_list_insert_new,
},
{
"pmemobj_list_remove",
"plr",
"<obj> <head> <free>",
pocli_pmemobj_list_remove,
},
{
"pmemobj_list_move",
"plm",
"<obj> <head_src> <head_dest> "
"<dest> <before>",
pocli_pmemobj_list_move,
},
{
"pmemobj_tx_begin",
"ptb",
"[<jmp>]",
pocli_pmemobj_tx_begin,
},
{
"pmemobj_tx_end",
"pte",
"",
pocli_pmemobj_tx_end,
},
{
"pmemobj_tx_abort",
"ptab",
"<errnum>",
pocli_pmemobj_tx_abort,
},
{
"pmemobj_tx_commit",
"ptc",
"",
pocli_pmemobj_tx_commit,
},
{
"pmemobj_tx_stage",
"pts",
"",
pocli_pmemobj_tx_stage,
},
{
"pmemobj_tx_add_range",
"ptar",
"<obj> <offset> <size>",
pocli_pmemobj_tx_add_range,
},
{
"pmemobj_tx_add_range_direct",
"ptard",
"<obj> <offset> <size>",
pocli_pmemobj_tx_add_range_direct,
},
{
"pmemobj_tx_process",
"ptp",
"",
pocli_pmemobj_tx_process,
},
{
"pmemobj_tx_alloc",
"ptal",
"<obj> <size> <type_num>",
pocli_pmemobj_tx_alloc,
},
{
"pmemobj_tx_zalloc",
"ptzal",
"<obj> <size> <type_num>",
pocli_pmemobj_tx_zalloc,
},
{
"pmemobj_tx_realloc",
"ptre",
"<obj> <size> <type_num>",
pocli_pmemobj_tx_realloc,
},
{
"pmemobj_tx_zrealloc",
"ptzre",
"<obj> <size> <type_num>",
pocli_pmemobj_tx_zrealloc,
},
{
"pmemobj_tx_strdup",
"ptsd",
"<obj> <string> <type_num>",
pocli_pmemobj_tx_strdup,
},
{
"pmemobj_tx_free",
"ptf",
"<obj>",
pocli_pmemobj_tx_free,
},
{
"pmemobj_tx_errno",
"pter",
"",
pocli_pmemobj_tx_errno,
},
{
"str_root_copy",
"srcp",
"<size> <string>",
pocli_str_root_copy,
},
{
"str_root_print",
"srpr",
"<size> <size>",
pocli_str_root_print,
}
};
#define POCLI_NCOMMANDS (sizeof(pocli_commands) / sizeof(pocli_commands[0]))
/*
* pocli_evn_parse_bool -- parse environment variable as boolean (1/0)
*/
static int
pocli_env_parse_bool(const char *envname, bool *value)
{
char *env = os_getenv(envname);
if (!env)
return 0;
if (strlen(env) > 1 || (env[0] != '0' && env[0] != '1')) {
fprintf(stderr, "invalid value specified for %s -- '%s'\n",
envname, env);
return -1;
}
*value = env[0] != '0';
return 0;
}
/*
* pocli_read_opts -- read options from env variables
*/
static int
pocli_read_opts(struct pocli_opts *opts)
{
/* default values */
opts->exit_on_error = false;
opts->echo_mode = false;
opts->enable_comments = true;
opts->enable_empty_cmds = true;
opts->enable_long_names = true;
opts->enable_help = true;
int ret;
ret = pocli_env_parse_bool(POCLI_ENV_EXIT_ON_ERROR,
&opts->exit_on_error);
if (ret)
return ret;
ret = pocli_env_parse_bool(POCLI_ENV_ECHO_MODE,
&opts->echo_mode);
if (ret)
return ret;
ret = pocli_env_parse_bool(POCLI_ENV_COMMENTS,
&opts->enable_comments);
if (ret)
return ret;
ret = pocli_env_parse_bool(POCLI_ENV_EMPTY_CMDS,
&opts->enable_empty_cmds);
if (ret)
return ret;
ret = pocli_env_parse_bool(POCLI_ENV_LONG_NAMES,
&opts->enable_long_names);
if (ret)
return ret;
ret = pocli_env_parse_bool(POCLI_ENV_HELP,
&opts->enable_help);
if (ret)
return ret;
return 0;
}
/*
* pocli_alloc -- allocate main context
*/
static struct pocli *
pocli_alloc(FILE *input, const char *fname, const struct pocli_cmd *cmds,
size_t ncmds, size_t inbuf_len)
{
assert(inbuf_len < INT_MAX);
struct pocli_opts opts;
if (pocli_read_opts(&opts))
return NULL;
struct pocli *pcli = (struct pocli *)calloc(1, sizeof(*pcli));
if (!pcli)
return NULL;
memcpy(&pcli->opts, &opts, sizeof(pcli->opts));
pcli->in = input;
pcli->istty = isatty(fileno(pcli->in));
pcli->cmds = cmds;
pcli->ncmds = ncmds;
pcli->ctx.pocli = pcli;
pcli->ctx.err = stderr;
pcli->ctx.out = stdout;
pcli->ctx.pop = pmemobj_open(fname, NULL);
if (!pcli->ctx.pop) {
fprintf(stderr, "%s: %s\n", fname, pmemobj_errormsg());
goto err_free_pcli;
}
size_t root_size = pmemobj_root_size(pcli->ctx.pop);
if (root_size)
pcli->ctx.root = pmemobj_root(pcli->ctx.pop, root_size);
pcli->inbuf_len = inbuf_len;
pcli->inbuf = (char *)malloc(inbuf_len);
if (!pcli->inbuf)
goto err_close_pool;
return pcli;
err_close_pool:
pmemobj_close(pcli->ctx.pop);
err_free_pcli:
free(pcli);
return NULL;
}
/*
* pocli_free -- free main context
*/
static void
pocli_free(struct pocli *pcli)
{
while (pmemobj_tx_stage() != TX_STAGE_NONE) {
while (pmemobj_tx_stage() != TX_STAGE_NONE)
pmemobj_tx_process();
pmemobj_tx_end();
}
VEC_DELETE(&pcli->ctx.free_on_abort);
pmemobj_close(pcli->ctx.pop);
free(pcli->inbuf);
free(pcli);
}
/*
* pocli_prompt -- print prompt
*/
static void
pocli_prompt(struct pocli *pcli)
{
if (pcli->istty)
printf(POCLI_CMD_PROMPT);
}
/*
* pocli_process -- process input commands
*/
int
pocli_process(struct pocli *pcli)
{
while (1) {
pocli_prompt(pcli);
if (!fgets(pcli->inbuf, (int)pcli->inbuf_len, pcli->in))
return 0;
char *nl = strchr(pcli->inbuf, '\n');
if (!nl)
return 1;
*nl = '\0';
char *hash = strchr(pcli->inbuf, '#');
if (hash) {
if (pcli->opts.enable_comments)
*hash = '\0';
else
return 1;
}
if (pcli->inbuf[0] == 0 || pcli->inbuf[0] == '\n') {
if (pcli->opts.enable_empty_cmds)
continue;
else
return 1;
}
if (pcli->opts.echo_mode)
pocli_printf(&pcli->ctx, "%s\n", pcli->inbuf);
char *argstr = strchr(pcli->inbuf, ' ');
if (argstr) {
*argstr = '\0';
argstr++;
}
char *cmds = pcli->inbuf;
const struct pocli_cmd *cmd = pocli_get_cmd(pcli, cmds);
if (!cmd) {
pocli_err(&pcli->ctx, POCLI_RET_OK, /* XXX */
"unknown command -- '%s'\n", cmds);
if (pcli->opts.exit_on_error)
return 1;
else
continue;
}
if (!argstr)
argstr = cmds + strlen(pcli->inbuf) + 1;
struct pocli_args *args = pocli_args_alloc(pcli->inbuf,
argstr, POCLI_CMD_DELIM);
if (!args)
return 1;
/*
* Put the args object on the stack, just in case we are
* in transaction, cmd->func will abort it and skip free(args).
*/
VEC_PUSH_BACK(&pcli->ctx.free_on_abort, args);
enum pocli_ret ret = cmd->func(&pcli->ctx, args);
free(args);
/* Take args off the stack. */
VEC_POP_BACK(&pcli->ctx.free_on_abort);
if (ret != POCLI_RET_OK)
return (int)ret;
}
}
/*
* pocli_do_process -- process input commands and return value
*/
static int
pocli_do_process(struct pocli *pcli)
{
enum pocli_ret ret = (enum pocli_ret)pocli_process(pcli);
if (ret == POCLI_RET_QUIT || ret == POCLI_RET_OK)
return 0;
else
return 1;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
int ret = 1;
const char *fname = NULL;
FILE *input = stdin;
if (argc < 2 || argc > 4) {
printf("usage: %s [-s <script>] <file>\n", argv[0]);
goto out;
}
int is_script = strcmp(argv[1], "-s") == 0;
if (is_script) {
if (argc != 4) {
if (argc == 2) {
printf("usage: %s -s <script> <file>\n",
argv[0]);
goto out;
} else if (argc == 3) {
printf("usage: %s -s <script> <file> "
"or %s <file>\n", argv[0], argv[2]);
goto out;
}
}
fname = argv[3];
input = os_fopen(argv[2], "r");
if (!input) {
perror(argv[2]);
goto out;
}
} else {
if (argc != 2) {
printf("usage: %s <file>\n", argv[0]);
goto out;
}
fname = argv[1];
}
struct pocli *pcli = pocli_alloc(input, fname,
pocli_commands, POCLI_NCOMMANDS, POCLI_INBUF_LEN);
if (!pcli) {
perror("pocli_alloc");
goto out;
}
ret = pocli_do_process(pcli);
pocli_free(pcli);
fclose(input);
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 52,680 | 21.125577 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/ddmap/ddmap.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ddmap.c -- simple app for reading and writing data from/to a regular file or
* dax device using mmap instead of file io API
*/
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include "common.h"
#include "output.h"
#include "mmap.h"
#include "file.h"
#include "util.h"
#include "os.h"
/*
* ddmap_context -- context and arguments
*/
struct ddmap_context {
char *file_in; /* input file name */
char *file_out; /* output file name */
char *str; /* string data to write */
size_t offset_in; /* offset from beginning of input file for */
/* read/write operations expressed in blocks */
size_t offset_out; /* offset from beginning of output file for */
/* read/write operations expressed in blocks */
size_t bytes; /* size of blocks to write at the time */
size_t count; /* number of blocks to read/write */
int checksum; /* compute checksum */
int runlen; /* print bytes as runlen/char sequence */
};
/*
* the default context, with all fields initialized to zero or NULL
*/
static struct ddmap_context ddmap_default;
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: ddmap [option] ...\n");
printf("Valid options:\n");
printf("-i FILE - read from FILE\n");
printf("-o FILE - write to FILE\n");
printf("-d STRING - STRING to be written\n");
printf("-s N - skip N blocks at start of input\n");
printf("-q N - skip N blocks at start of output\n");
printf("-b N - read/write N bytes at a time\n");
printf("-n N - copy N input blocks\n");
printf("-c - compute checksum\n");
printf("-r - print file content as runlen/char pairs\n");
printf("-h - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"input-file", required_argument, NULL, 'i'},
{"output-file", required_argument, NULL, 'o'},
{"string", required_argument, NULL, 'd'},
{"offset-in", required_argument, NULL, 's'},
{"offset-out", required_argument, NULL, 'q'},
{"block-size", required_argument, NULL, 'b'},
{"count", required_argument, NULL, 'n'},
{"checksum", no_argument, NULL, 'c'},
{"runlen", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* ddmap_print_char -- (internal) print single char
*
* Printable ASCII characters are printed normally,
* NUL character is printed as a little circle (the degree symbol),
* non-printable ASCII characters are printed as centered dots.
*/
static void
ddmap_print_char(char c)
{
if (c == '\0')
/* print the degree symbol for NUL */
printf("\u00B0");
else if (c >= ' ' && c <= '~')
/* print printable ASCII character */
printf("%c", c);
else
/* print centered dot for non-printable character */
printf("\u00B7");
}
/*
* ddmap_print_runlen -- (internal) print file content as length/char pairs
*
* For each sequence of chars of the same value (could be just 1 byte)
* print length of the sequence and the char value.
*/
static void
ddmap_print_runlen(char *addr, size_t len)
{
char c = '\0';
ssize_t cnt = 0;
for (size_t i = 0; i < len; i++) {
if (i > 0 && c != addr[i] && cnt != 0) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
cnt = 0;
}
c = addr[i];
cnt++;
}
if (cnt) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
}
}
/*
* ddmap_print_bytes -- (internal) print array of bytes
*/
static void
ddmap_print_bytes(const char *data, size_t len)
{
for (size_t i = 0; i < len; ++i) {
ddmap_print_char(data[i]);
}
printf("\n");
}
/*
* ddmap_read -- (internal) read a string from the file at the offset and
* print it to stdout
*/
static int
ddmap_read(const char *path, size_t offset_in, size_t bytes, size_t count,
int runlen)
{
size_t len = bytes * count;
os_off_t offset = (os_off_t)(bytes * offset_in);
char *read_buff = Zalloc(len + 1);
if (read_buff == NULL) {
outv_err("Zalloc(%zu) failed\n", len + 1);
return -1;
}
ssize_t read_len = util_file_pread(path, read_buff, len, offset);
if (read_len < 0) {
outv_err("pread failed");
Free(read_buff);
return -1;
} else if ((size_t)read_len < len) {
outv(1, "read less bytes than requested: %zd vs. %zu\n",
read_len, len);
}
if (runlen)
ddmap_print_runlen(read_buff, (size_t)read_len);
else
ddmap_print_bytes(read_buff, (size_t)read_len);
Free(read_buff);
return 0;
}
/*
* ddmap_zero -- (internal) zero a range of data in the file
*/
static int
ddmap_zero(const char *path, size_t offset, size_t len)
{
void *addr;
ssize_t filesize = util_file_get_size(path);
if (filesize < 0) {
outv_err("invalid file size");
return -1;
}
if (offset + len > (size_t)filesize)
len = (size_t)filesize - offset;
addr = util_file_map_whole(path);
if (addr == NULL) {
outv_err("map failed");
return -1;
}
memset((char *)addr + offset, 0, len);
util_unmap(addr, (size_t)filesize);
return 0;
}
/*
* ddmap_write_data -- (internal) write data to a file
*/
static int
ddmap_write_data(const char *path, const char *data,
os_off_t offset, size_t len)
{
if (util_file_pwrite(path, data, len, offset) < 0) {
outv_err("pwrite for dax device failed: path %s,"
" len %zu, offset %zd", path, len, offset);
return -1;
}
return 0;
}
/*
* ddmap_write_from_file -- (internal) write data from file to dax device or
* file
*/
static int
ddmap_write_from_file(const char *path_in, const char *path_out,
size_t offset_in, size_t offset_out, size_t bytes,
size_t count)
{
char *src, *tmp_src;
os_off_t offset;
ssize_t file_in_size = util_file_get_size(path_in);
size_t data_left, len;
util_init();
src = util_file_map_whole(path_in);
src += (os_off_t)(offset_in * bytes);
offset = (os_off_t)(offset_out * bytes);
data_left = (size_t)file_in_size;
tmp_src = src;
do {
len = MIN(data_left, bytes);
ddmap_write_data(path_out, tmp_src, offset, len);
tmp_src += len;
data_left -= len;
if (data_left == 0) {
data_left = (size_t)file_in_size;
tmp_src = src;
}
offset += (os_off_t)len;
count--;
} while (count > 0);
util_unmap(src, (size_t)file_in_size);
return 0;
}
/*
* ddmap_write -- (internal) write the string to the file
*/
static int
ddmap_write(const char *path, const char *str, size_t offset_in, size_t bytes,
size_t count)
{
/* calculate how many characters from the string are to be written */
size_t length;
size_t str_len = (str != NULL) ? strlen(str) + 1 : 0;
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if (len == 0)
length = str_len;
else
length = min(len, str_len);
/* write the string */
if (length > 0) {
if (ddmap_write_data(path, str, offset, length))
return -1;
}
/* zero the rest of requested range */
if (length < len) {
if (ddmap_zero(path, (size_t)offset + length, len - length))
return -1;
}
return 0;
}
/*
* ddmap_checksum -- (internal) compute checksum of a slice of an input file
*/
static int
ddmap_checksum(const char *path, size_t bytes, size_t count, size_t offset_in)
{
char *src;
uint64_t checksum;
ssize_t filesize = util_file_get_size(path);
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if ((size_t)filesize < len + (size_t)offset) {
outv_err("offset with length exceed file size");
return -1;
}
util_init();
src = util_file_map_whole(path);
util_checksum(src + offset, len, &checksum, 1, 0);
util_unmap(src, (size_t)filesize);
printf("%" PRIu64 "\n", checksum);
return 0;
}
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(struct ddmap_context *ctx, int argc, char *argv[])
{
int opt;
char *endptr;
size_t offset;
size_t count;
size_t bytes;
while ((opt = getopt_long(argc, argv, "i:o:d:s:q:b:n:crhv",
long_options, NULL)) != -1) {
switch (opt) {
case 'i':
ctx->file_in = optarg;
break;
case 'o':
ctx->file_out = optarg;
break;
case 'd':
ctx->str = optarg;
if (ctx->count == 0)
ctx->count = strlen(ctx->str);
if (ctx->bytes == 0)
ctx->bytes = 1;
break;
case 's':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid input offset",
optarg);
return -1;
}
ctx->offset_in = offset;
break;
case 'q':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid output offset",
optarg);
return -1;
}
ctx->offset_out = offset;
break;
case 'b':
errno = 0;
bytes = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid block size", optarg);
return -1;
}
ctx->bytes = bytes;
break;
case 'n':
errno = 0;
count = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid count", optarg);
return -1;
}
ctx->count = count;
break;
case 'c':
ctx->checksum = 1;
break;
case 'r':
ctx->runlen = 1;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
case 'v':
out_set_vlevel(1);
break;
default:
print_usage();
exit(EXIT_FAILURE);
}
}
return 0;
}
/*
* validate_args -- (internal) validate arguments
*/
static int
validate_args(struct ddmap_context *ctx)
{
if ((ctx->file_in == NULL) && (ctx->file_out == NULL)) {
outv_err("an input file and/or an output file must be "
"provided");
return -1;
} else if (ctx->file_out == NULL) {
if (ctx->bytes == 0) {
outv_err("number of bytes to read has to be provided");
return -1;
}
} else if (ctx->file_in == NULL) {
/* ddmap_write requirements */
if (ctx->str == NULL && (ctx->count * ctx->bytes) == 0) {
outv_err("when writing, 'data' or 'count' and 'bytes' "
"have to be provided");
return -1;
}
} else {
/* scenarios other than ddmap_write requirement */
if ((ctx->bytes * ctx->count) == 0) {
outv_err("number of bytes and count must be provided");
return -1;
}
}
return 0;
}
/*
* do_ddmap -- (internal) perform ddmap
*/
static int
do_ddmap(struct ddmap_context *ctx)
{
if ((ctx->file_in != NULL) && (ctx->file_out != NULL)) {
if (ddmap_write_from_file(ctx->file_in, ctx->file_out,
ctx->offset_in, ctx->offset_out, ctx->bytes,
ctx->count))
return -1;
return 0;
}
if ((ctx->checksum == 1) && (ctx->file_in != NULL)) {
if (ddmap_checksum(ctx->file_in, ctx->bytes, ctx->count,
ctx->offset_in))
return -1;
return 0;
}
if (ctx->file_in != NULL) {
if (ddmap_read(ctx->file_in, ctx->offset_in, ctx->bytes,
ctx->count, ctx->runlen))
return -1;
} else { /* ctx->file_out != NULL */
if (ddmap_write(ctx->file_out, ctx->str, ctx->offset_in,
ctx->bytes, ctx->count))
return -1;
}
return 0;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
int ret = 0;
struct ddmap_context ctx = ddmap_default;
if ((ret = parse_args(&ctx, argc, argv)))
goto out;
if ((ret = validate_args(&ctx)))
goto out;
if ((ret = do_ddmap(&ctx))) {
outv_err("failed to perform ddmap\n");
if (errno)
outv_err("errno: %s\n", strerror(errno));
ret = -1;
goto out;
}
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 13,362 | 23.83829 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/bttcreate/bttcreate.c
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bttcreate.c -- tool for generating BTT layout
*/
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include "set.h"
#include "pool_hdr.h"
#include "btt.h"
#include "btt_layout.h"
#include "pmemcommon.h"
#include "os.h"
#define BTT_CREATE_DEF_SIZE (20 * 1UL << 20) /* 20 MB */
#define BTT_CREATE_DEF_BLK_SIZE 512UL
#define BTT_CREATE_DEF_OFFSET_SIZE (4 * 1UL << 10) /* 4 KB */
struct btt_context {
void *addr;
uint64_t len;
};
struct bbtcreate_options {
const char *fpath;
size_t poolsize;
uint32_t blocksize;
unsigned maxlanes;
uuid_t uuid;
bool trunc;
bool verbose;
bool user_uuid;
};
/*
* nsread -- btt callback for reading
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count,
uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + count > nsc->len) {
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)nsc->addr + off, count);
return 0;
}
/*
* nswrite -- btt callback for writing
*/
static int
nswrite(void *ns, unsigned lane, const void *buf,
size_t count, uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + count > nsc->len) {
errno = EINVAL;
return -1;
}
memcpy((char *)nsc->addr + off, buf, count);
return 0;
}
/*
* nsmap -- btt callback for memory mapping
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len,
uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
assert((ssize_t)len >= 0);
if (off + len >= nsc->len) {
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)nsc->addr + off;
return (ssize_t)len;
}
/*
* nssync -- btt callback for memory synchronization
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
/* do nothing */
}
/*
* nszero -- btt callback for zeroing memory
*/
static int
nszero(void *ns, unsigned lane, size_t len, uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + len >= nsc->len) {
errno = EINVAL;
return -1;
}
memset((char *)nsc->addr + off, 0, len);
return 0;
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(char *name)
{
printf("Usage: %s [-s <pool_file_size>] [-b <block_size>] "
"[-l <max_lanes>] [-u <uuid>] [-t] [-v] "
"<pool_name>\n", name);
}
/*
* file_error -- handle file errors
*/
static int
file_error(const int fd, const char *fpath)
{
if (fd != -1)
(void) os_close(fd);
os_unlink(fpath);
return -1;
}
/*
* print_uuid -- print uuid
*/
static void
print_uuid(uuid_t uuid)
{
char uuidstr[POOL_HDR_UUID_STR_LEN];
if (util_uuid_to_string(uuid, uuidstr) == 0) {
printf("uuid\t\t%s\n", uuidstr);
}
}
/*
* print_result -- print result if verbose option is on
*/
static void
print_result(struct bbtcreate_options *opts)
{
if (opts->verbose) {
printf("BTT successfully created: %s\n", opts->fpath);
printf("poolsize\t%zuB\n", opts->poolsize);
printf("blocksize\t%uB\n", opts->blocksize);
printf("maxlanes\t%u\n", opts->maxlanes);
print_uuid(opts->uuid);
putchar('\n');
}
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
common_init("", "", "", 0, 0);
int opt;
size_t size;
int fd;
int res = 0;
struct bbtcreate_options opts = {
.poolsize = BTT_CREATE_DEF_SIZE,
.blocksize = BTT_CREATE_DEF_BLK_SIZE,
.maxlanes = BTT_DEFAULT_NFREE,
.trunc = false,
.verbose = false,
.user_uuid = false
};
/* parse option */
while ((opt = getopt(argc, argv, "s:b:l:u:tv")) != -1) {
switch (opt) {
case 's':
if (util_parse_size(optarg, &size) == 0) {
opts.poolsize = size;
} else {
fprintf(stderr, "Wrong size format in pool"
" size option\n");
res = 1;
goto out;
}
break;
case 'b':
if (util_parse_size(optarg, &size) == 0) {
opts.blocksize = (uint32_t)size;
} else {
fprintf(stderr, "Wrong size format in block"
" size option\n");
res = 1;
goto out;
}
break;
case 'l':
opts.maxlanes = (unsigned)strtoul(optarg, NULL, 0);
break;
case 'u':
if (util_uuid_from_string(optarg,
(struct uuid *)&opts.uuid) == 0) {
opts.user_uuid = true;
} else {
fprintf(stderr, "Wrong uuid format.");
res = 1;
goto out;
}
break;
case 't':
opts.trunc = true;
break;
case 'v':
opts.verbose = true;
break;
default:
print_usage(argv[0]);
res = 1;
goto out;
}
}
if (optind < argc) {
opts.fpath = argv[optind];
} else {
print_usage(argv[0]);
res = 1;
goto out;
}
/* check sizes */
if (opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE < BTT_MIN_SIZE) {
fprintf(stderr, "Pool size is less then %d MB\n",
BTT_MIN_SIZE >> 20);
res = 1;
goto out;
}
if (opts.blocksize < BTT_MIN_LBA_SIZE) {
fprintf(stderr, "Block size is less then %zu B\n",
BTT_MIN_LBA_SIZE);
res = 1;
goto out;
}
/* open file */
if ((fd = os_open(opts.fpath, O_RDWR|O_CREAT,
S_IRUSR|S_IWUSR)) < 0) {
perror(opts.fpath);
res = 1;
goto out;
}
/* allocate file */
if (!opts.trunc) {
if (os_posix_fallocate(fd, 0,
(os_off_t)opts.poolsize) != 0) {
perror("posix_fallocate");
res = file_error(fd, opts.fpath);
goto error;
}
} else {
if (os_ftruncate(fd, (os_off_t)opts.poolsize) != 0) {
perror("ftruncate");
res = file_error(fd, opts.fpath);
goto error;
}
}
/* map created file */
void *base = util_map(fd, opts.poolsize, MAP_SHARED, 0, 0, NULL);
if (!base) {
perror("util_map");
res = file_error(fd, opts.fpath);
goto error_map;
}
/* setup btt context */
struct btt_context btt_context = {
.addr = (void *)((uint64_t)base + BTT_CREATE_DEF_OFFSET_SIZE),
.len = opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE
};
/* generate uuid */
if (!opts.user_uuid) {
if (util_uuid_generate(opts.uuid) < 0) {
perror("util_uuid_generate");
res = -1;
goto error_map;
}
}
/* init callback structure */
static struct ns_callback btt_ns_callback = {
.nsread = nsread,
.nswrite = nswrite,
.nsmap = nsmap,
.nssync = nssync,
.nszero = nszero,
};
/* init btt in requested area */
struct btt *bttp = btt_init(opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE,
opts.blocksize, opts.uuid, opts.maxlanes,
(void *)&btt_context,
&btt_ns_callback);
if (!bttp) {
printf("Error: Cannot initialize BTT layer\n");
res = -1;
goto error_map;
}
/* initialize metadata */
if (btt_set_error(bttp, 0, 0)) {
perror("btt_set_error");
res = -1;
goto error_btt;
}
if (btt_set_zero(bttp, 0, 0)) {
perror("btt_set_zero");
res = -1;
goto error_btt;
}
/* print results */
print_result(&opts);
error_btt:
btt_fini(bttp);
error_map:
common_fini();
error:
os_close(fd);
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return res;
}
| 8,869 | 21.009926 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/dllview/dllview.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* dllview.c -- a simple utility displaying the list of symbols exported by DLL
*
* usage: dllview filename
*/
#include <windows.h>
#include <stdio.h>
#include <winnt.h>
#include <imagehlp.h>
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s dllname\n", argv[0]);
exit(1);
}
const char *dllname = argv[1];
LOADED_IMAGE img;
if (MapAndLoad(dllname, NULL, &img, 1, 1) == FALSE) {
fprintf(stderr, "cannot load DLL image\n");
exit(2);
}
IMAGE_EXPORT_DIRECTORY *dir;
ULONG dirsize;
dir = (IMAGE_EXPORT_DIRECTORY *)ImageDirectoryEntryToData(
img.MappedAddress, 0 /* mapped as image */,
IMAGE_DIRECTORY_ENTRY_EXPORT, &dirsize);
if (dir == NULL) {
fprintf(stderr, "cannot read image directory\n");
UnMapAndLoad(&img);
exit(3);
}
DWORD *rva;
rva = (DWORD *)ImageRvaToVa(img.FileHeader, img.MappedAddress,
dir->AddressOfNames, NULL);
for (DWORD i = 0; i < dir->NumberOfNames; i++) {
char *name = (char *)ImageRvaToVa(img.FileHeader,
img.MappedAddress, rva[i], NULL);
printf("%s\n", name);
}
UnMapAndLoad(&img);
return 0;
}
| 2,705 | 31.214286 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemalloc/pmemalloc.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemmalloc.c -- simple tool for allocating objects from pmemobj
*
* usage: pmemalloc [-r <size>] [-o <size>] [-t <type_num>]
* [-c <count>] [-e <num>] <file>
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <libpmemobj.h>
#include <util.h>
#define USAGE()\
printf("usage: pmemalloc"\
" [-r <size>] [-o <size>] [-t <type_num>]"\
" [-s] [-f] [-e a|f|s] <file>\n")
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
int opt;
int tmpi;
long long tmpl;
int ret = 0;
size_t size = 0;
size_t root_size = 0;
unsigned type_num = 0;
char exit_at = '\0';
int do_set = 0;
int do_free = 0;
size_t alloc_class_size = 0;
if (argc < 2) {
USAGE();
ret = 1;
goto end;
}
while ((opt = getopt(argc, argv, "r:o:c:t:e:sf")) != -1) {
switch (opt) {
case 'r':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
root_size = (size_t)tmpl;
break;
case 'o':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
size = (size_t)tmpl;
break;
case 'c':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
alloc_class_size = (size_t)tmpl;
break;
case 't':
tmpi = atoi(optarg);
if (tmpi < 0) {
USAGE();
ret = 1;
goto end;
}
type_num = (unsigned)tmpi;
break;
case 'e':
exit_at = optarg[0];
break;
case 's':
do_set = 1;
break;
case 'f':
do_free = 1;
break;
default:
USAGE();
ret = 1;
goto end;
}
}
char *file = argv[optind];
PMEMobjpool *pop;
if ((pop = pmemobj_open(file, NULL)) == NULL) {
fprintf(stderr, "pmemobj_open: %s\n", pmemobj_errormsg());
ret = 1;
goto end;
}
if (root_size) {
PMEMoid oid = pmemobj_root(pop, root_size);
if (OID_IS_NULL(oid)) {
fprintf(stderr, "pmemobj_root: %s\n",
pmemobj_errormsg());
ret = 1;
goto end;
}
}
if (alloc_class_size) {
PMEMoid oid;
struct pobj_alloc_class_desc desc;
desc.alignment = 0;
desc.class_id = 0;
desc.header_type = POBJ_HEADER_COMPACT;
desc.unit_size = alloc_class_size;
desc.units_per_block = 1;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &desc);
if (ret != 0)
goto end;
ret = pmemobj_xalloc(pop, &oid, 1, type_num,
POBJ_CLASS_ID(desc.class_id), NULL, NULL);
if (ret != 0)
goto end;
}
if (size) {
PMEMoid oid;
TX_BEGIN(pop) {
oid = pmemobj_tx_alloc(size, type_num);
if (exit_at == 'a')
exit(1);
} TX_END
if (OID_IS_NULL(oid)) {
fprintf(stderr, "pmemobj_tx_alloc: %s\n",
pmemobj_errormsg());
ret = 1;
goto end;
}
if (do_set) {
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, size);
if (exit_at == 's')
exit(1);
} TX_END
}
if (do_free) {
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
if (exit_at == 'f')
exit(1);
} TX_END
}
}
pmemobj_close(pop);
end:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 4,870 | 21.761682 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemdetect/pmemdetect.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemdetect.c -- detect PMEM/Device DAX device or Device DAX alignment
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <getopt.h>
#include <errno.h>
#include "mmap.h"
#include "libpmem.h"
#include "file.h"
#include "os.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
#define SIZE 4096
#define DEVDAX_DETECT (1 << 0)
#define DEVDAX_ALIGN (1 << 1)
#define MAP_SYNC_SUPP (1 << 2)
#define DAX_REGION_DETECT (1 << 3)
#define FILE_SIZE (1 << 4)
#define err(fmt, ...) fprintf(stderr, "pmemdetect: " fmt, __VA_ARGS__)
/* arguments */
static int Opts;
static char *Path;
static size_t Align;
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: pmemdetect [options] <path>\n");
printf("Valid options:\n");
printf("-d, --devdax - check if <path> is Device DAX\n");
printf("-a, --align=N - check Device DAX alignment\n");
printf("-r, --dax-region - check if Dev DAX <path> has region id\n");
printf("-s, --map-sync - check if <path> supports MAP_SYNC\n");
printf("-z, --size - print file/Device DAX size\n");
printf("-h, --help - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"devdax", no_argument, NULL, 'd'},
{"align", required_argument, NULL, 'a'},
{"dax-region", no_argument, NULL, 'r'},
{"map-sync", no_argument, NULL, 's'},
{"size", no_argument, NULL, 'z'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt_long(argc, argv, "a:dshrz",
long_options, NULL)) != -1) {
switch (opt) {
case 'd':
Opts |= DEVDAX_DETECT;
break;
case 'r':
Opts |= DAX_REGION_DETECT;
break;
case 'a':
Opts |= DEVDAX_ALIGN;
char *endptr;
errno = 0;
size_t align = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
err("'%s' -- invalid alignment", optarg);
return -1;
}
Align = align;
break;
case 's':
Opts |= MAP_SYNC_SUPP;
break;
case 'z':
Opts |= FILE_SIZE;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
default:
print_usage();
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
Path = argv[optind];
} else {
print_usage();
exit(EXIT_FAILURE);
}
return 0;
}
/*
* get_params -- get parameters for pmem_map_file
*/
static int
get_params(const char *path, int *flags, size_t *size)
{
int ret;
os_stat_t buf;
ret = os_stat(path, &buf);
if (ret && errno != ENOENT) {
/* error other than no such file */
perror(path);
return -1;
}
if (ret) {
/* no such file */
*flags = PMEM_FILE_CREATE;
*size = SIZE;
} else if (S_ISDIR(buf.st_mode)) {
*flags = PMEM_FILE_CREATE | PMEM_FILE_TMPFILE;
*size = SIZE;
} else {
/* file exist */
*size = 0;
*flags = 0;
}
return 0;
}
/*
* is_pmem -- checks if given path points to pmem-aware filesystem
*/
static int
is_pmem(const char *path)
{
int ret;
int flags;
size_t size;
ret = get_params(path, &flags, &size);
if (ret)
return ret;
int is_pmem;
void *addr = pmem_map_file(path, size, flags, 0, &size, &is_pmem);
if (addr == NULL) {
perror("pmem_map_file failed");
return -1;
}
pmem_unmap(addr, size);
return is_pmem;
}
/*
* is_dev_dax -- checks if given path points to Device DAX
*/
static int
is_dev_dax(const char *path)
{
enum file_type type = util_file_get_type(path);
if (type < 0) {
printf("%s -- not accessible\n", path);
return -1;
}
if (os_access(path, W_OK|R_OK)) {
printf("%s -- permission denied\n", path);
return -1;
}
if (type == TYPE_DEVDAX)
return 1;
printf("%s -- not device dax\n", path);
return 0;
}
/*
* is_dev_dax_align -- checks if Device DAX alignment is as specified
*/
static int
is_dev_dax_align(const char *path, size_t req_align)
{
if (is_dev_dax(path) != 1)
return -1;
size_t align = util_file_device_dax_alignment(path);
return (req_align == align) ? 1 : 0;
}
/*
* supports_map_sync -- checks if MAP_SYNC is supported on a filesystem
* from given path
*/
static int
supports_map_sync(const char *path)
{
int ret;
int flags;
size_t size;
ret = get_params(path, &flags, &size);
if (ret)
return ret;
int fd;
if (flags & PMEM_FILE_TMPFILE)
fd = util_tmpfile(path, "/pmemdetect.XXXXXX", 0);
else if (flags & PMEM_FILE_CREATE)
fd = os_open(path, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
else
fd = os_open(path, O_RDWR);
if (fd < 0) {
perror(path);
return -1;
}
if (flags & PMEM_FILE_CREATE) {
ret = os_ftruncate(fd, (off_t)size);
if (ret) {
perror(path);
os_close(fd);
return -1;
}
}
void *addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_SYNC|MAP_SHARED_VALIDATE, fd, 0);
if (addr != MAP_FAILED) {
ret = 1;
} else if (addr == MAP_FAILED &&
(errno == EOPNOTSUPP || errno == EINVAL)) {
ret = 0;
} else {
err("mmap: %s\n", strerror(errno));
ret = -1;
}
os_close(fd);
if (flags & PMEM_FILE_CREATE && !(flags & PMEM_FILE_TMPFILE))
util_unlink(path);
return ret;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
err("error during arguments conversion\n");
return 2;
}
}
#endif
int ret;
if (parse_args(argc, argv)) {
ret = 2;
goto out;
}
util_init();
util_mmap_init();
if (Opts & DEVDAX_DETECT)
ret = is_dev_dax(Path);
else if (Opts & DAX_REGION_DETECT) {
ret = util_ddax_region_find(Path);
if (ret < 0) {
printf("Sysfs id file for dax_region is not supported:"
" %s\n", Path);
ret = 0;
} else {
ret = 1;
}
} else if (Opts & DEVDAX_ALIGN) {
ret = is_dev_dax_align(Path, Align);
} else if (Opts & FILE_SIZE) {
printf("%zu", (size_t)util_file_get_size(Path));
ret = 1;
} else if (Opts & MAP_SYNC_SUPP) {
ret = supports_map_sync(Path);
} else {
ret = is_pmem(Path);
}
/*
* Return 0 on 'true'. Otherwise return 1.
* If any problem occurred return 2.
*/
switch (ret) {
case 0:
ret = 1;
break;
case 1:
ret = 0;
break;
default:
ret = 2;
break;
}
util_mmap_fini();
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 8,121 | 20.601064 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemspoil/spoil.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* spoil.c -- pmempool spoil command source file
*/
#include <features.h>
#ifndef __FreeBSD__
#define __USE_UNIX98
#endif
#include <unistd.h>
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <stdbool.h>
#include <inttypes.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <libgen.h>
#include <err.h>
#include <assert.h>
#include <endian.h>
#include <libpmem.h>
#include "common.h"
#include "output.h"
#include "btt.h"
#include "set.h"
#include "util.h"
#define STR(x) #x
/*
* Set of macros for parsing structures and fields.
*
* Example:
*
* PROCESS_BEGIN(psp, pfp) {
* PARSE_FIELD(my_struct, my_field, uint32_t);
* PARSE(struct_name, arg, max_index)
* } PROCESS_END
*
* return PROCESS_RET
*
* The PROCESS_STATE holds the state of processing.
* The PROCESS_INDEX holds the index of current field.
*/
/*
* State of processing fields.
*/
enum process_state {
PROCESS_STATE_NOT_FOUND,
PROCESS_STATE_FOUND,
PROCESS_STATE_FIELD,
PROCESS_STATE_FUNC,
PROCESS_STATE_ERROR_MSG,
PROCESS_STATE_ERROR,
};
#define PROCESS_BEGIN(psp, pfp) \
enum process_state PROCESS_STATE = PROCESS_STATE_NOT_FOUND;\
struct pmemspoil *_psp = (psp);\
struct pmemspoil_list *_pfp = (pfp);\
#define PROCESS_RET ((PROCESS_STATE == PROCESS_STATE_FOUND ||\
PROCESS_STATE == PROCESS_STATE_FIELD ||\
PROCESS_STATE == PROCESS_STATE_FUNC) ? 0 : -1)
#define PROCESS_INDEX (_pfp->cur->index)
#define PROCESS_END \
_process_end:\
switch (PROCESS_STATE) {\
case PROCESS_STATE_NOT_FOUND:\
outv_err("unknown field '%s'\n", _pfp->cur->name);\
break;\
case PROCESS_STATE_FIELD:\
outv(2, "spoil: %s\n", _pfp->str);\
break;\
case PROCESS_STATE_FUNC:\
outv(2, "spoil: %s\n", _pfp->str);\
break;\
case PROCESS_STATE_ERROR_MSG:\
outv_err("processing '%s'\n", _pfp->str);\
PROCESS_STATE = PROCESS_STATE_ERROR;\
break;\
default:\
break;\
}
/* _max - size of _arg if it is array (if not it must be 1) */
#define PROCESS(_name, _arg, _max, _type) do {\
if (pmemspoil_check_field(_pfp, STR(_name))) {\
PROCESS_STATE = PROCESS_STATE_FOUND;\
if (_pfp->cur->index >= (_max)) {\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
} else {\
_type a = _arg;\
pmemspoil_next_field(_pfp);\
if (pmemspoil_process_##_name(_psp, _pfp, a))\
PROCESS_STATE = PROCESS_STATE_ERROR;\
}\
goto _process_end;\
}\
} while (0)
#define PROCESS_FIELD(_ptr, _name, _type) do {\
if (pmemspoil_check_field(_pfp, STR(_name))) {\
pmemspoil_next_field(_pfp);\
if (pmemspoil_process_##_type(_psp, _pfp,\
(_type *)&((_ptr)->_name),\
sizeof((_ptr)->_name), 0))\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
else\
PROCESS_STATE = PROCESS_STATE_FIELD;\
goto _process_end;\
}\
} while (0)
#define PROCESS_FIELD_LE(_ptr, _name, _type) do {\
if (pmemspoil_check_field(_pfp, STR(_name))) {\
pmemspoil_next_field(_pfp);\
if (pmemspoil_process_##_type(_psp, _pfp,\
(_type *)&((_ptr)->_name),\
sizeof((_ptr)->_name), 1))\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
else\
PROCESS_STATE = PROCESS_STATE_FIELD;\
goto _process_end;\
}\
} while (0)
#define PROCESS_FUNC(_name, _func, _arg) do {\
if (pmemspoil_check_field(_pfp, (_name))) {\
PROCESS_STATE = PROCESS_STATE_FOUND;\
if (!_pfp->str) {\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
} else {\
if (pmemspoil_process_##_func(_psp, _pfp, (_arg)))\
PROCESS_STATE = PROCESS_STATE_ERROR;\
else\
PROCESS_STATE = PROCESS_STATE_FUNC;\
}\
goto _process_end;\
}\
} while (0)
#define PROCESS_FIELD_ARRAY(_ptr, _name, _type, _max) do {\
if (pmemspoil_check_field(_pfp, STR(_name))) {\
if (_pfp->cur->index >= (_max)) {\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
} else {\
uint64_t ind = PROCESS_INDEX;\
pmemspoil_next_field(_pfp);\
if (pmemspoil_process_##_type(_psp, _pfp,\
(_type *)&((_ptr)->_name[ind]),\
sizeof((_ptr)->_name), 0))\
PROCESS_STATE = PROCESS_STATE_ERROR_MSG;\
else\
PROCESS_STATE = PROCESS_STATE_FIELD;\
}\
goto _process_end;\
}\
} while (0)
/*
* struct field -- single field with name and id
*/
struct field {
struct field *next;
struct field *prev;
char *name;
uint32_t index;
int is_func;
};
/*
* struct pmemspoil_list -- all fields and value
*/
struct pmemspoil_list {
struct field *head;
struct field *tail;
struct field *cur;
char *value;
char *str;
};
/*
* struct pmemspoil -- context and args
*/
struct pmemspoil {
int verbose;
char *fname;
struct pool_set_file *pfile;
struct pmemspoil_list *args;
unsigned argc;
void *addr;
size_t size;
unsigned replica;
uint64_t arena_offset;
};
typedef enum chunk_type chunk_type_t;
/*
* struct chunk_pair -- chunk header and chunk
*/
struct chunk_pair {
struct chunk_header *hdr;
struct chunk *chunk;
};
/*
* struct list_pair -- list head and entry
*/
struct list_pair {
struct list_head *head;
struct list_entry *entry;
};
/*
* struct checksum_args -- arguments for checksum
*/
struct checksum_args {
void *ptr;
size_t len;
void *checksum;
size_t skip_off;
};
/*
* pmemspoil_default -- default context and args
*/
static const struct pmemspoil pmemspoil_default = {
.verbose = 1,
.fname = NULL,
.args = NULL,
.argc = 0,
.replica = 0,
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"%s common options:\n"
" -v, --verbose Increase verbose level\n"
" -?, --help Display this help and exit\n"
" -r, --replica <num> Replica index\n"
"\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, '?'},
{"replica", required_argument, NULL, 'r'},
{NULL, 0, NULL, 0 },
};
/*
* pmemspoil_persist -- flush data to persistence
*/
static void
pmemspoil_persist(void *addr, size_t size)
{
if (pmem_is_pmem(addr, size))
pmem_persist(addr, size);
else
pmem_msync(addr, size);
}
/*
* print_usage -- print application usage short description
*/
static void
print_usage(char *appname)
{
printf("Usage: %s <file> <field>=<value>\n", appname);
}
/*
* print_version -- print version string
*/
static void
print_version(char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmemspoil_help -- print help message for spoil command
*/
static void
pmemspoil_help(char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmemspoil_read -- read data from pool
*/
static int
pmemspoil_read(struct pmemspoil *psp, void *buff, size_t nbytes, uint64_t off)
{
return pool_set_file_read(psp->pfile, buff, nbytes, off);
}
/*
* pmemspoil_write -- write data to pool
*/
static int
pmemspoil_write(struct pmemspoil *psp, void *buff, size_t nbytes, uint64_t off)
{
return pool_set_file_write(psp->pfile, buff, nbytes, off);
}
/*
* pmemspoil_parse_field -- parse field name and id from str
*/
static char *
pmemspoil_parse_field(char *str, struct field *fieldp)
{
fieldp->is_func = 0;
if (!str)
return NULL;
char *f = strchr(str, '.');
if (!f)
f = strchr(str, '=');
if (!f) {
f = strchr(str, '(');
if (f && f[1] == ')')
fieldp->is_func = 1;
}
fieldp->index = 0;
fieldp->name = NULL;
if (f) {
*f = '\0';
size_t len = 0;
ssize_t ret;
char *secstr = malloc(strlen(str) + 1);
uint32_t secind;
/* search for pattern: <field_name>(<index>) */
if (secstr == NULL)
err(1, NULL);
if ((ret = sscanf(str, "%[^(](%d)", secstr, &secind) == 2)) {
len = strlen(secstr);
str[len] = '\0';
fieldp->index = secind;
}
fieldp->name = str;
free(secstr);
if (fieldp->is_func)
return f + 2;
return f + 1;
}
return NULL;
}
/*
* pmemspoil_free_fields -- free all fields
*/
static void
pmemspoil_free_fields(struct pmemspoil_list *fieldp)
{
struct field *cur = fieldp->head;
while (cur != NULL) {
struct field *next = cur->next;
free(cur);
cur = next;
}
free(fieldp->str);
}
/*
* pmemspoil_insert_field -- insert field
*/
static void
pmemspoil_insert_field(struct pmemspoil_list *listp, struct field *fieldp)
{
fieldp->next = NULL;
fieldp->prev = NULL;
if (listp->head == NULL) {
listp->head = fieldp;
listp->tail = fieldp;
} else {
listp->tail->next = fieldp;
fieldp->prev = listp->tail;
listp->tail = fieldp;
}
}
/*
* pmemspoil_parse_fields -- parse fields and value from str
*/
static int
pmemspoil_parse_fields(char *str, struct pmemspoil_list *listp)
{
struct field f;
char *nstr = NULL;
listp->str = strdup(str);
if (!listp->str)
return -1;
while ((nstr = pmemspoil_parse_field(str, &f)) != NULL) {
struct field *fp = malloc(sizeof(struct field));
if (!fp) {
pmemspoil_free_fields(listp);
err(1, NULL);
}
memcpy(fp, &f, sizeof(*fp));
pmemspoil_insert_field(listp, fp);
str = nstr;
}
listp->value = str;
listp->cur = listp->head;
return (listp->cur == NULL || listp->value == NULL);
}
/*
* pmempool_check_parse_args -- parse command line args
*/
static int
pmemspoil_parse_args(struct pmemspoil *psp, char *appname,
int argc, char *argv[])
{
int opt;
int t;
while ((opt = getopt_long(argc, argv, "v?r:",
long_options, NULL)) != -1) {
switch (opt) {
case 'v':
psp->verbose = 2;
break;
case '?':
pmemspoil_help(appname);
exit(EXIT_SUCCESS);
case 'r':
t = atoi(optarg);
if (t < 0) {
print_usage(appname);
exit(EXIT_FAILURE);
}
psp->replica = (unsigned)t;
break;
default:
print_usage(appname);
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
int ind = optind;
psp->fname = argv[ind];
ind++;
assert(argc >= ind);
psp->argc = (unsigned)(argc - ind);
psp->args = calloc(psp->argc, sizeof(struct pmemspoil_list));
if (!psp->args)
err(1, NULL);
unsigned i;
for (i = 0; i < psp->argc; i++) {
char *str = argv[ind];
if (pmemspoil_parse_fields(str, &psp->args[i])) {
outv_err("ivalid argument");
exit(EXIT_FAILURE);
}
ind += 1;
}
} else {
print_usage(appname);
exit(EXIT_FAILURE);
}
return 0;
}
/*
* pmemspoil_get_arena_offset -- get offset to arena of given id
*/
static uint64_t
pmemspoil_get_arena_offset(struct pmemspoil *psp, uint32_t id,
uint64_t start_offset)
{
struct btt_info *infop = calloc(sizeof(struct btt_info), 1);
if (!infop)
err(1, NULL);
infop->nextoff = start_offset;
uint64_t offset = 0;
ssize_t ret = 0;
id++;
while (id > 0) {
if (infop->nextoff == 0) {
free(infop);
return 0;
}
offset = offset + infop->nextoff;
if ((ret = pmemspoil_read(psp, infop,
sizeof(*infop), offset))) {
free(infop);
return 0;
}
btt_info_convert2h(infop);
id--;
}
free(infop);
return offset;
}
/*
* pmemspoil_check_field -- compares field name and moves pointer if the same
*/
static int
pmemspoil_check_field(struct pmemspoil_list *pfp, const char *fname)
{
if (pfp->cur != NULL && strcmp(pfp->cur->name, fname) == 0) {
return 1;
} else {
return 0;
}
}
/*
* pmemspoil_next_field -- move to next field
*/
static void
pmemspoil_next_field(struct pmemspoil_list *pfp)
{
pfp->cur = pfp->cur->next;
}
/*
* pmemspoil_process_char -- process value as string
*/
static int
pmemspoil_process_char(struct pmemspoil *psp, struct pmemspoil_list *pfp,
char *str, size_t len, int le)
{
len = min(len, strlen(pfp->value));
memcpy(str, pfp->value, len);
pmemspoil_persist(str, len);
return 0;
}
/*
* pmemspoil_process_uint8_t -- process value as uint8
*/
static int
pmemspoil_process_uint8_t(struct pmemspoil *psp, struct pmemspoil_list *pfp,
uint8_t *valp, size_t size, int le)
{
uint8_t v;
if (sscanf(pfp->value, "0x%" SCNx8, &v) != 1 &&
sscanf(pfp->value, "%" SCNu8, &v) != 1)
return -1;
*valp = v;
pmemspoil_persist(valp, sizeof(*valp));
return 0;
}
/*
* pmemspoil_process_uint16_t -- process value as uint16
*/
static int
pmemspoil_process_uint16_t(struct pmemspoil *psp, struct pmemspoil_list *pfp,
uint16_t *valp, size_t size, int le)
{
uint16_t v;
if (sscanf(pfp->value, "0x%" SCNx16, &v) != 1 &&
sscanf(pfp->value, "%" SCNu16, &v) != 1)
return -1;
if (le)
*valp = htole16(v);
else
*valp = v;
pmemspoil_persist(valp, sizeof(*valp));
return 0;
}
/*
* pmemspoil_process_uint32_t -- process value as uint32
*/
static int
pmemspoil_process_uint32_t(struct pmemspoil *psp, struct pmemspoil_list *pfp,
uint32_t *valp, size_t size, int le)
{
uint32_t v;
if (sscanf(pfp->value, "0x%" SCNx32, &v) != 1 &&
sscanf(pfp->value, "%" SCNu32, &v) != 1)
return -1;
if (le)
*valp = htole32(v);
else
*valp = v;
pmemspoil_persist(valp, sizeof(*valp));
return 0;
}
/*
* pmemspoil_process_uint64_t -- process value as uint64
*/
static int
pmemspoil_process_uint64_t(struct pmemspoil *psp, struct pmemspoil_list *pfp,
uint64_t *valp, size_t size, int le)
{
uint64_t v;
if (sscanf(pfp->value, "0x%" SCNx64, &v) != 1 &&
sscanf(pfp->value, "%" SCNu64, &v) != 1)
return -1;
if (le)
*valp = htole64(v);
else
*valp = v;
pmemspoil_persist(valp, sizeof(*valp));
return 0;
}
/*
* pmemspoil_process_chunk_type_t -- process chunk type
*/
static int
pmemspoil_process_chunk_type_t(struct pmemspoil *psp,
struct pmemspoil_list *pfp,
enum chunk_type *valp, size_t size, int le)
{
uint64_t types = 0;
if (util_parse_chunk_types(pfp->value, &types))
return -1;
if (util_popcount64(types) != 1)
return -1;
/* ignore 'le' */
*valp = (enum chunk_type)util_lssb_index64(types);
return 0;
}
/*
* pmemspoil_process_checksum_gen -- generate checksum
*/
static int
pmemspoil_process_checksum_gen(struct pmemspoil *psp,
struct pmemspoil_list *pfp, struct checksum_args args)
{
util_checksum(args.ptr, args.len, (uint64_t *)args.checksum,
1, args.skip_off);
return 0;
}
/*
* pmemspoil_process_shutdown_state -- process shutdown_state fields
*/
static int
pmemspoil_process_shutdown_state(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
struct shutdown_state *sds = arg;
PROCESS_BEGIN(psp, pfp) {
struct checksum_args checksum_args = {
.ptr = sds,
.len = sizeof(*sds),
.checksum = &sds->checksum,
.skip_off = 0,
};
PROCESS_FIELD_LE(sds, usc, uint64_t);
PROCESS_FIELD_LE(sds, uuid, uint64_t);
PROCESS_FIELD_LE(sds, dirty, uint64_t);
PROCESS_FIELD(sds, reserved, char);
PROCESS_FIELD_LE(sds, checksum, uint64_t);
PROCESS_FUNC("checksum_gen", checksum_gen, checksum_args);
} PROCESS_END;
return PROCESS_RET;
}
/*
* pmemspoil_process_features -- process features fields
*/
static int
pmemspoil_process_features(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
features_t *features = arg;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD_LE(features, compat, uint32_t);
PROCESS_FIELD_LE(features, incompat, uint32_t);
PROCESS_FIELD_LE(features, ro_compat, uint32_t);
} PROCESS_END;
return PROCESS_RET;
}
/*
* pmemspoil_process_pool_hdr -- process pool_hdr fields
*/
static int
pmemspoil_process_pool_hdr(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
struct pool_hdr pool_hdr;
if (pmemspoil_read(psp, &pool_hdr, sizeof(pool_hdr), 0))
return -1;
PROCESS_BEGIN(psp, pfp) {
struct checksum_args checksum_args = {
.ptr = &pool_hdr,
.len = sizeof(pool_hdr),
.checksum = &pool_hdr.checksum,
.skip_off = POOL_HDR_CSUM_END_OFF(&pool_hdr),
};
PROCESS_FIELD(&pool_hdr, signature, char);
PROCESS_FIELD(&pool_hdr, poolset_uuid, char);
PROCESS_FIELD(&pool_hdr, uuid, char);
PROCESS_FIELD(&pool_hdr, prev_part_uuid, char);
PROCESS_FIELD(&pool_hdr, next_part_uuid, char);
PROCESS_FIELD(&pool_hdr, prev_repl_uuid, char);
PROCESS_FIELD(&pool_hdr, next_repl_uuid, char);
PROCESS_FIELD(&pool_hdr, unused, char);
PROCESS_FIELD(&pool_hdr, unused2, char);
PROCESS_FIELD_LE(&pool_hdr, major, uint32_t);
PROCESS(features, &pool_hdr.features, 1, features_t *);
PROCESS_FIELD_LE(&pool_hdr, crtime, uint64_t);
PROCESS_FIELD(&pool_hdr, arch_flags, char); /* XXX */
PROCESS(shutdown_state, &pool_hdr.sds, 1,
struct shutdown_state *);
PROCESS_FIELD_LE(&pool_hdr, checksum, uint64_t);
PROCESS_FUNC("checksum_gen", checksum_gen, checksum_args);
} PROCESS_END
if (PROCESS_STATE == PROCESS_STATE_FIELD ||
PROCESS_STATE == PROCESS_STATE_FUNC ||
PROCESS_STATE == PROCESS_STATE_FOUND) {
if (pmemspoil_write(psp, &pool_hdr, sizeof(pool_hdr), 0))
return -1;
}
return PROCESS_RET;
}
/*
* pmemspoil_process_btt_info_struct -- process btt_info at given offset
*/
static int
pmemspoil_process_btt_info_struct(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint64_t offset)
{
struct btt_info btt_info;
if (pmemspoil_read(psp, &btt_info, sizeof(btt_info), offset))
return -1;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD(&btt_info, sig, char);
PROCESS_FIELD(&btt_info, uuid, char);
PROCESS_FIELD(&btt_info, parent_uuid, char);
PROCESS_FIELD_LE(&btt_info, flags, uint32_t);
PROCESS_FIELD_LE(&btt_info, major, uint16_t);
PROCESS_FIELD_LE(&btt_info, minor, uint16_t);
PROCESS_FIELD_LE(&btt_info, external_lbasize, uint32_t);
PROCESS_FIELD_LE(&btt_info, external_nlba, uint32_t);
PROCESS_FIELD_LE(&btt_info, internal_lbasize, uint32_t);
PROCESS_FIELD_LE(&btt_info, internal_nlba, uint32_t);
PROCESS_FIELD_LE(&btt_info, nfree, uint32_t);
PROCESS_FIELD_LE(&btt_info, infosize, uint32_t);
PROCESS_FIELD_LE(&btt_info, nextoff, uint64_t);
PROCESS_FIELD_LE(&btt_info, dataoff, uint64_t);
PROCESS_FIELD_LE(&btt_info, mapoff, uint64_t);
PROCESS_FIELD_LE(&btt_info, flogoff, uint64_t);
PROCESS_FIELD_LE(&btt_info, infooff, uint64_t);
PROCESS_FIELD(&btt_info, unused, char);
PROCESS_FIELD_LE(&btt_info, checksum, uint64_t);
} PROCESS_END
if (PROCESS_STATE == PROCESS_STATE_FIELD) {
if (pmemspoil_write(psp, &btt_info, sizeof(btt_info), offset))
return -1;
}
return PROCESS_RET;
}
/*
* pmemspoil_process_btt_info_backup -- process btt_info backup fields
*/
static int
pmemspoil_process_btt_info_backup(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint32_t index)
{
struct btt_info btt_info_backup;
if (pmemspoil_read(psp, &btt_info_backup, sizeof(btt_info_backup),
psp->arena_offset))
return -1;
uint64_t backup_offset = psp->arena_offset +
le64toh(btt_info_backup.infooff);
return pmemspoil_process_btt_info_struct(psp, pfp, backup_offset);
}
/*
* pmemspoil_process_btt_info -- process btt_info fields
*/
static int
pmemspoil_process_btt_info(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint32_t index)
{
return pmemspoil_process_btt_info_struct(psp, pfp, psp->arena_offset);
}
/*
* pmemspoil_process_btt_map -- process btt map fields
*/
static int
pmemspoil_process_btt_map(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint32_t index)
{
struct btt_info btt_info;
if (pmemspoil_read(psp, &btt_info, sizeof(btt_info),
psp->arena_offset))
return -1;
btt_info_convert2h(&btt_info);
uint64_t mapoff = psp->arena_offset + btt_info.mapoff;
uint64_t mapsize = roundup(btt_info.external_nlba * BTT_MAP_ENTRY_SIZE,
BTT_ALIGNMENT);
uint32_t *mapp = malloc(mapsize);
if (!mapp)
err(1, NULL);
int ret = 0;
if (pmemspoil_read(psp, mapp, mapsize, mapoff)) {
ret = -1;
} else {
uint32_t v;
if (sscanf(pfp->value, "0x%x", &v) != 1 &&
sscanf(pfp->value, "%u", &v) != 1) {
ret = -1;
} else {
mapp[index] = v;
if (pmemspoil_write(psp, mapp, mapsize, mapoff))
ret = -1;
}
}
free(mapp);
return ret;
}
/*
* pmemspoil_process_btt_flog -- process btt_flog first or second fields
*/
static int
pmemspoil_process_btt_nflog(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint64_t arena_offset, int off,
uint32_t index)
{
struct btt_info btt_info;
if (pmemspoil_read(psp, &btt_info, sizeof(btt_info), arena_offset))
return -1;
btt_info_convert2h(&btt_info);
uint64_t flogoff = arena_offset + btt_info.flogoff;
uint64_t flogsize = btt_info.nfree *
roundup(2 * sizeof(struct btt_flog), BTT_FLOG_PAIR_ALIGN);
flogsize = roundup(flogsize, BTT_ALIGNMENT);
uint8_t *flogp = malloc(flogsize);
if (!flogp)
err(1, NULL);
int ret = 0;
if (pmemspoil_read(psp, flogp, flogsize, flogoff)) {
ret = -1;
goto error;
}
struct btt_flog *flog_entryp = (struct btt_flog *)(flogp +
index * BTT_FLOG_PAIR_ALIGN);
if (off)
flog_entryp++;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD_LE(flog_entryp, lba, uint32_t);
PROCESS_FIELD_LE(flog_entryp, old_map, uint32_t);
PROCESS_FIELD_LE(flog_entryp, new_map, uint32_t);
PROCESS_FIELD_LE(flog_entryp, seq, uint32_t);
} PROCESS_END
if (PROCESS_STATE == PROCESS_STATE_FIELD) {
if (pmemspoil_write(psp, flogp, flogsize, flogoff)) {
ret = -1;
goto error;
}
}
ret = PROCESS_RET;
error:
free(flogp);
return ret;
}
/*
* pmemspoil_process_btt_flog -- process first btt flog entry
*/
static int
pmemspoil_process_btt_flog(struct pmemspoil *psp, struct pmemspoil_list *pfp,
uint32_t index)
{
return pmemspoil_process_btt_nflog(psp, pfp,
psp->arena_offset, 0, index);
}
/*
* pmemspoil_process_btt_flog_prime -- process second btt flog entry
*/
static int
pmemspoil_process_btt_flog_prime(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint32_t index)
{
return pmemspoil_process_btt_nflog(psp, pfp,
psp->arena_offset, 1, index);
}
/*
* pmemspoil_process_arena -- process arena fields
*/
static int
pmemspoil_process_arena(struct pmemspoil *psp,
struct pmemspoil_list *pfp, uint64_t arena_offset)
{
if (!arena_offset)
return -1;
struct btt_info btt_info;
if (pmemspoil_read(psp, &btt_info, sizeof(btt_info), arena_offset))
return -1;
btt_info_convert2h(&btt_info);
psp->arena_offset = arena_offset;
PROCESS_BEGIN(psp, pfp) {
PROCESS(btt_info, PROCESS_INDEX, 1, uint32_t);
PROCESS(btt_info_backup, PROCESS_INDEX, 1, uint32_t);
PROCESS(btt_map, PROCESS_INDEX, btt_info.external_nlba,
uint32_t);
PROCESS(btt_flog, PROCESS_INDEX, btt_info.nfree, uint32_t);
PROCESS(btt_flog_prime, PROCESS_INDEX, btt_info.nfree,
uint32_t);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_pmemblk -- process pmemblk fields
*/
static int
pmemspoil_process_pmemblk(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
struct pmemblk pmemblk;
if (pmemspoil_read(psp, &pmemblk, sizeof(pmemblk), 0))
return -1;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD_LE(&pmemblk, bsize, uint32_t);
PROCESS(arena,
pmemspoil_get_arena_offset(psp, PROCESS_INDEX,
2 * BTT_ALIGNMENT),
UINT32_MAX, uint64_t);
} PROCESS_END
if (PROCESS_STATE == PROCESS_STATE_FIELD) {
if (pmemspoil_write(psp, &pmemblk, sizeof(pmemblk), 0))
return -1;
}
return PROCESS_RET;
}
/*
* pmemspoil_process_bttdevice -- process btt device fields
*/
static int
pmemspoil_process_bttdevice(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
PROCESS_BEGIN(psp, pfp) {
PROCESS(arena,
pmemspoil_get_arena_offset(psp, PROCESS_INDEX,
BTT_ALIGNMENT),
UINT32_MAX, uint64_t);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_pmemlog -- process pmemlog fields
*/
static int
pmemspoil_process_pmemlog(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
struct pmemlog pmemlog;
if (pmemspoil_read(psp, &pmemlog, sizeof(pmemlog), 0))
return -1;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD_LE(&pmemlog, start_offset, uint64_t);
PROCESS_FIELD_LE(&pmemlog, end_offset, uint64_t);
PROCESS_FIELD_LE(&pmemlog, write_offset, uint64_t);
} PROCESS_END
if (PROCESS_STATE == PROCESS_STATE_FIELD) {
if (pmemspoil_write(psp, &pmemlog, sizeof(pmemlog), 0))
return -1;
}
return PROCESS_RET;
}
/*
* pmemspoil_process_run -- process pmemobj chunk as run
*/
static int
pmemspoil_process_run(struct pmemspoil *psp, struct pmemspoil_list *pfp,
struct chunk_pair cpair)
{
struct chunk_header *chdr = cpair.hdr;
struct chunk_run *run = (struct chunk_run *)cpair.chunk;
if (chdr->type != CHUNK_TYPE_RUN) {
outv_err("%s -- specified chunk is not run", pfp->str);
return -1;
}
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD(run, hdr.block_size, uint64_t);
PROCESS_FIELD_ARRAY(run, content, uint8_t, RUN_CONTENT_SIZE);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_chunk -- process pmemobj chunk structures
*/
static int
pmemspoil_process_chunk(struct pmemspoil *psp, struct pmemspoil_list *pfp,
struct chunk_pair cpair)
{
struct chunk_header *chdr = cpair.hdr;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD(chdr, type, chunk_type_t);
PROCESS_FIELD(chdr, flags, uint16_t);
PROCESS_FIELD(chdr, size_idx, uint32_t);
PROCESS(run, cpair, 1, struct chunk_pair);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_zone -- process pmemobj zone structures
*/
static int
pmemspoil_process_zone(struct pmemspoil *psp, struct pmemspoil_list *pfp,
struct zone *zone)
{
struct zone_header *zhdr = &zone->header;
PROCESS_BEGIN(psp, pfp) {
struct chunk_pair cpair = {
.hdr = &zone->chunk_headers[PROCESS_INDEX],
.chunk = &zone->chunks[PROCESS_INDEX],
};
PROCESS_FIELD(zhdr, magic, uint32_t);
PROCESS_FIELD(zhdr, size_idx, uint32_t);
PROCESS_FIELD(zhdr, reserved, char);
PROCESS(chunk, cpair, zhdr->size_idx, struct chunk_pair);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_heap -- process pmemobj heap structures
*/
static int
pmemspoil_process_heap(struct pmemspoil *psp, struct pmemspoil_list *pfp,
struct heap_layout *hlayout)
{
struct heap_header *hdr = &hlayout->header;
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD(hdr, signature, char);
PROCESS_FIELD(hdr, major, uint64_t);
PROCESS_FIELD(hdr, minor, uint64_t);
PROCESS_FIELD(hdr, unused, uint64_t);
PROCESS_FIELD(hdr, chunksize, uint64_t);
PROCESS_FIELD(hdr, chunks_per_zone, uint64_t);
PROCESS_FIELD(hdr, reserved, char);
PROCESS_FIELD(hdr, checksum, uint64_t);
PROCESS(zone, ZID_TO_ZONE(hlayout, PROCESS_INDEX),
util_heap_max_zone(psp->size), struct zone *);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_lane -- process pmemobj lanes
*/
static int
pmemspoil_process_lane(struct pmemspoil *psp, struct pmemspoil_list *pfp,
struct lane_layout *lane)
{
PROCESS_BEGIN(psp, pfp) {
PROCESS_FIELD_ARRAY(lane, undo.data,
uint8_t, LANE_UNDO_SIZE);
PROCESS_FIELD_ARRAY(lane, internal.data,
uint8_t, LANE_REDO_INTERNAL_SIZE);
PROCESS_FIELD_ARRAY(lane, external.data,
uint8_t, LANE_REDO_EXTERNAL_SIZE);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process_pmemobj -- process pmemobj data structures
*/
static int
pmemspoil_process_pmemobj(struct pmemspoil *psp,
struct pmemspoil_list *pfp, void *arg)
{
struct pmemobjpool *pop = psp->addr;
struct heap_layout *hlayout = (void *)((char *)pop + pop->heap_offset);
struct lane_layout *lanes = (void *)((char *)pop + pop->lanes_offset);
PROCESS_BEGIN(psp, pfp) {
struct checksum_args checksum_args = {
.ptr = pop,
.len = OBJ_DSC_P_SIZE,
.checksum = &pop->checksum,
.skip_off = 0,
};
PROCESS_FIELD(pop, layout, char);
PROCESS_FIELD(pop, lanes_offset, uint64_t);
PROCESS_FIELD(pop, nlanes, uint64_t);
PROCESS_FIELD(pop, heap_offset, uint64_t);
PROCESS_FIELD(pop, unused3, uint64_t);
PROCESS_FIELD(pop, unused, char);
PROCESS_FIELD(pop, checksum, uint64_t);
PROCESS_FIELD(pop, run_id, uint64_t);
PROCESS_FUNC("checksum_gen", checksum_gen, checksum_args);
PROCESS(heap, hlayout, 1, struct heap_layout *);
PROCESS(lane, &lanes[PROCESS_INDEX], pop->nlanes,
struct lane_layout *);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_process -- process headers
*/
static int
pmemspoil_process(struct pmemspoil *psp,
struct pmemspoil_list *pfp)
{
PROCESS_BEGIN(psp, pfp) {
PROCESS(pool_hdr, NULL, 1, void *);
PROCESS(pmemlog, NULL, 1, void *);
PROCESS(pmemblk, NULL, 1, void *);
PROCESS(pmemobj, NULL, 1, void *);
PROCESS(bttdevice, NULL, 1, void *);
} PROCESS_END
return PROCESS_RET;
}
/*
* pmemspoil_func -- main function for check command
*/
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
char *appname = basename(argv[0]);
util_init();
int ret = 0;
struct pmemspoil *psp = malloc(sizeof(struct pmemspoil));
if (!psp)
err(1, NULL);
/* initialize command line arguments and context to default values */
memcpy(psp, &pmemspoil_default, sizeof(*psp));
/* parse command line arguments */
ret = pmemspoil_parse_args(psp, appname, argc, argv);
if (ret)
goto error;
/* set verbose level */
out_set_vlevel(psp->verbose);
if (psp->fname == NULL) {
print_usage(appname);
exit(EXIT_FAILURE);
}
psp->pfile = pool_set_file_open(psp->fname, 0, 1);
if (!psp->pfile)
err(1, "%s", psp->fname);
if (pool_set_file_set_replica(psp->pfile, psp->replica)) {
outv_err("invalid replica argument max is %u\n",
psp->pfile->poolset ?
psp->pfile->poolset->nreplicas :
0);
return 1;
}
psp->addr = pool_set_file_map(psp->pfile, 0);
psp->size = psp->pfile->size;
out_set_prefix(psp->fname);
for (unsigned i = 0; i < psp->argc; i++) {
ret = pmemspoil_process(psp, &psp->args[i]);
if (ret)
goto error;
}
error:
if (psp->args) {
for (unsigned i = 0; i < psp->argc; i++)
pmemspoil_free_fields(&psp->args[i]);
free(psp->args);
}
pool_set_file_close(psp->pfile);
free(psp);
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 31,231 | 22.18634 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemwrite/write.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* write.c -- simple app for writing data to pool used by pmempool tests
*/
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <libgen.h>
#include <string.h>
#include <inttypes.h>
#include <err.h>
#include "common.h"
#include "output.h"
#include <libpmemlog.h>
#include <libpmemblk.h>
#include "mmap.h"
#include "queue.h"
/*
* pmemwrite -- context and arguments
*/
struct pmemwrite
{
char *fname; /* file name */
int nargs; /* number of arguments */
char **args; /* list of arguments */
};
static struct pmemwrite pmemwrite = {
.fname = NULL,
.nargs = 0,
.args = NULL,
};
/*
* print_usage -- print short description of usage
*/
static void
print_usage(char *appname)
{
printf("Usage: %s <file> <args>...\n", appname);
printf("Valid arguments:\n");
printf("<blockno>:w:<string> - write <string> to <blockno> block\n");
printf("<blockno>:z - set zero flag on <blockno> block\n");
printf("<blockno>:z - set error flag on <blockno> block\n");
}
/*
* pmemwrite_log -- write data to pmemlog pool file
*/
static int
pmemwrite_log(struct pmemwrite *pwp)
{
PMEMlogpool *plp = pmemlog_open(pwp->fname);
if (!plp) {
warn("%s", pwp->fname);
return -1;
}
int i;
int ret = 0;
for (i = 0; i < pwp->nargs; i++) {
size_t len = strlen(pwp->args[i]);
if (pmemlog_append(plp, pwp->args[i], len)) {
warn("%s", pwp->fname);
ret = -1;
break;
}
}
pmemlog_close(plp);
return ret;
}
/*
* pmemwrite_blk -- write data to pmemblk pool file
*/
static int
pmemwrite_blk(struct pmemwrite *pwp)
{
PMEMblkpool *pbp = pmemblk_open(pwp->fname, 0);
if (!pbp) {
warn("%s", pwp->fname);
return -1;
}
int i;
int ret = 0;
size_t blksize = pmemblk_bsize(pbp);
char *blk = malloc(blksize);
if (!blk) {
ret = -1;
outv_err("malloc(%lu) failed\n", blksize);
goto nomem;
}
for (i = 0; i < pwp->nargs; i++) {
int64_t blockno;
char *buff;
size_t buffsize = strlen(pwp->args[i]) + 1;
buff = malloc(buffsize);
if (buff == NULL) {
ret = -1;
outv_err("malloc(%lu) failed\n", buffsize);
goto end;
}
char flag;
/* <blockno>:w:<string> - write string to <blockno> */
if (sscanf(pwp->args[i], "%" SCNi64 ":w:%[^:]",
&blockno, buff) == 2) {
memset(blk, 0, blksize);
size_t bufflen = strlen(buff);
if (bufflen == 0) {
free(buff);
goto end;
}
if (bufflen > blksize) {
outv_err("String is longer than block size. "
"Truncating.\n");
bufflen = blksize;
}
memcpy(blk, buff, bufflen);
ret = pmemblk_write(pbp, blk, blockno);
free(buff);
if (ret)
goto end;
/* <blockno>:<flag> - set <flag> flag on <blockno> */
} else if (sscanf(pwp->args[i], "%" SCNi64 ":%c",
&blockno, &flag) == 2) {
free(buff);
switch (flag) {
case 'z':
ret = pmemblk_set_zero(pbp, blockno);
break;
case 'e':
ret = pmemblk_set_error(pbp, blockno);
break;
default:
outv_err("Invalid flag '%c'\n", flag);
ret = -1;
goto end;
}
if (ret) {
warn("%s", pwp->fname);
goto end;
}
} else {
free(buff);
outv_err("Invalid argument '%s'\n", pwp->args[i]);
ret = -1;
goto end;
}
}
end:
free(blk);
nomem:
pmemblk_close(pbp);
return ret;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
int opt;
int ret = 0;
util_init();
char *appname = basename(argv[0]);
while ((opt = getopt(argc, argv, "h")) != -1) {
switch (opt) {
case 'h':
print_usage(appname);
ret = 0;
goto end;
default:
print_usage(appname);
ret = 1;
goto end;
}
}
if (optind + 1 < argc) {
pmemwrite.fname = argv[optind];
optind++;
pmemwrite.nargs = argc - optind;
pmemwrite.args = &argv[optind];
} else {
print_usage(appname);
ret = 1;
goto end;
}
out_set_vlevel(1);
struct pmem_pool_params params;
/* parse pool type from file */
pmem_pool_parse_params(pmemwrite.fname, ¶ms, 1);
switch (params.type) {
case PMEM_POOL_TYPE_BLK:
ret = pmemwrite_blk(&pmemwrite);
break;
case PMEM_POOL_TYPE_LOG:
ret = pmemwrite_log(&pmemwrite);
break;
default:
ret = 1;
}
end:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 6,084 | 21.620818 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/extents/extents.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* extents -- extents listing
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include "extent.h"
#define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */
enum modes {
MODE_PRINT_ALL_EXTENTS = 0,
MODE_PRINT_ONE_PHY_OF_LOG,
};
static const char *usage_str =
"usage: %s "
"[-h] "
"[-l <logical_offset>] "
"<file>\n";
int
main(int argc, char *argv[])
{
long unsigned offset = 0;
unsigned extent = 0;
char *error;
int ret = -1;
int opt;
enum modes mode = MODE_PRINT_ALL_EXTENTS;
while ((opt = getopt(argc, argv, "hl:")) != -1) {
switch (opt) {
case 'h':
printf(usage_str, argv[0]);
return 0;
case 'l':
mode = MODE_PRINT_ONE_PHY_OF_LOG;
errno = 0;
offset = strtoul(optarg, &error, 10 /* base */);
if (errno || *error != '\0') {
if (errno)
perror("strtoul");
if (*error != '\0') {
fprintf(stderr,
"error: invalid character(s) in the given logical offset: %s\n",
error);
}
return -1;
}
break;
default:
fprintf(stderr, usage_str, argv[0]);
return -1;
}
}
if (optind + 1 < argc) {
fprintf(stderr, "error: unknown option: %s\n",
argv[optind + 1]);
fprintf(stderr, usage_str, argv[0]);
return -1;
}
if (optind >= argc) {
fprintf(stderr, usage_str, argv[0]);
return -1;
}
const char *file = argv[optind];
struct extents *exts = malloc(sizeof(struct extents));
if (exts == NULL)
return -1;
long count = os_extents_count(file, exts);
if (count < 0)
goto exit_free;
if (count == 0) {
ret = 0;
goto exit_free;
}
exts->extents = malloc(exts->extents_count * sizeof(struct extent));
if (exts->extents == NULL)
goto exit_free;
ret = os_extents_get(file, exts);
if (ret)
goto exit_free;
switch (mode) {
case MODE_PRINT_ALL_EXTENTS:
for (unsigned e = 0; e < exts->extents_count; e++) {
/* extents are in bytes, convert them to sectors */
printf("%lu %lu\n",
B2SEC(exts->extents[e].offset_physical),
B2SEC(exts->extents[e].length));
}
break;
case MODE_PRINT_ONE_PHY_OF_LOG:
/* print the physical offset of the given logical one */
for (unsigned e = 0; e < exts->extents_count; e++) {
if (B2SEC(exts->extents[e].offset_logical) > offset)
break;
extent = e;
}
if (extent == exts->extents_count - 1) {
long unsigned max_log;
max_log = B2SEC(exts->extents[extent].offset_logical) +
B2SEC(exts->extents[extent].length);
if (offset > max_log) {
fprintf(stderr,
"error: maximum logical offset is %lu\n",
max_log);
ret = -1;
goto exit_free;
}
}
offset += B2SEC(exts->extents[extent].offset_physical) -
B2SEC(exts->extents[extent].offset_logical);
printf("%lu\n", offset);
break;
default:
fprintf(stderr, usage_str, argv[0]);
return -1;
}
exit_free:
if (exts->extents)
free(exts->extents);
free(exts);
return ret;
}
| 4,484 | 23.642857 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/fallocate_detect/fallocate_detect.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fallocate_detect -- checks fallocate support on filesystem
*/
#define _GNU_SOURCE
#include "file.h"
#include "os.h"
#ifdef __linux__
#include <errno.h>
#include <fcntl.h>
#include <linux/magic.h>
#include <sys/vfs.h>
#ifndef XFS_SUPER_MAGIC
#define XFS_SUPER_MAGIC 0x58465342
#endif
/*
* posix_fallocate on Linux is implemented using fallocate
* syscall. This syscall requires file system-specific code on
* the kernel side and not all file systems have this code.
* So when posix_fallocate gets 'not supported' error from
* fallocate it falls back to just writing zeroes.
* Detect it and return information to the caller.
*/
static int
check_fallocate(const char *file)
{
int exit_code = 0;
int fd = os_open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
if (fd < 0) {
perror("os_open");
return 2;
}
if (fallocate(fd, 0, 0, 4096)) {
if (errno == EOPNOTSUPP) {
exit_code = 1;
goto exit;
}
perror("fallocate");
exit_code = 2;
goto exit;
}
struct statfs fs;
if (!fstatfs(fd, &fs)) {
if (fs.f_type != EXT4_SUPER_MAGIC && /* also ext2, ext3 */
fs.f_type != XFS_SUPER_MAGIC) {
/*
* On CoW filesystems, fallocate reserves _amount
* of_ space but doesn't allocate a specific block.
* As we're interested in DAX filesystems only, just
* skip these tests anywhere else.
*/
exit_code = 1;
goto exit;
}
}
exit:
os_close(fd);
os_unlink(file);
return exit_code;
}
#else
/* no support for fallocate in FreeBSD */
static int
check_fallocate(const char *file)
{
return 1;
}
#endif
int
main(int argc, char *argv[])
{
if (argc != 2) {
fprintf(stderr, "usage: %s filename\n", argv[0]);
return 1;
}
return check_fallocate(argv[1]);
}
| 3,308 | 26.575 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/cmpmap/cmpmap.c
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cmpmap -- a tool for comparing files using mmap
*/
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include <sys/mman.h>
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "file.h"
#include "fcntl.h"
#include "mmap.h"
#include "os.h"
#define CMPMAP_ZERO (1<<0)
#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
/* arguments */
static char *File1 = NULL; /* file1 name */
static char *File2 = NULL; /* file2 name */
static size_t Length = 0; /* number of bytes to read */
static os_off_t Offset = 0; /* offset from beginning of file */
static int Opts = 0; /* options flag */
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: cmpmap [options] file1 [file2]\n");
printf("Valid options:\n");
printf("-l, --length=N - compare up to N bytes\n");
printf("-o, --offset=N - skip N bytes at start of the files\n");
printf("-z, --zero - compare bytes of the file1 to NUL\n");
printf("-h, --help - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"length", required_argument, NULL, 'l'},
{"offset", required_argument, NULL, 'o'},
{"zero", no_argument, NULL, 'z'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(int argc, char *argv[])
{
int opt;
char *endptr;
os_off_t off;
ssize_t len;
while ((opt = getopt_long(argc, argv, "l:o:zh",
long_options, NULL)) != -1) {
switch (opt) {
case 'l':
errno = 0;
len = strtoll(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno || len < 0) {
fprintf(stderr, "'%s' -- invalid length",
optarg);
return -1;
}
Length = (size_t)len;
break;
case 'o':
errno = 0;
off = strtol(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno || off < 0) {
fprintf(stderr, "'%s' -- invalid offset",
optarg);
return -1;
}
Offset = off;
break;
case 'z':
Opts |= CMPMAP_ZERO;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
default:
print_usage();
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
File1 = argv[optind];
if (optind + 1 < argc)
File2 = argv[optind + 1];
} else {
print_usage();
exit(EXIT_FAILURE);
}
return 0;
}
/*
* validate_args -- (internal) validate arguments
*/
static int
validate_args(void)
{
if (File1 == NULL) {
fprintf(stderr, "no file provided");
return -1;
} else if (File2 == NULL && Length == 0) {
fprintf(stderr, "length of the file has to be provided");
return -1;
}
return 0;
}
/*
* do_cmpmap -- (internal) perform cmpmap
*/
static int
do_cmpmap(void)
{
int ret = EXIT_SUCCESS;
int fd1;
int fd2;
size_t size1;
size_t size2;
/* open the first file */
if ((fd1 = os_open(File1, O_RDONLY)) < 0) {
fprintf(stderr, "opening %s failed, errno %d\n", File1, errno);
exit(EXIT_FAILURE);
}
ssize_t size_tmp = util_file_get_size(File1);
if (size_tmp < 0) {
fprintf(stderr, "getting size of %s failed, errno %d\n", File1,
errno);
ret = EXIT_FAILURE;
goto out_close1;
}
size1 = (size_t)size_tmp;
int flag = MAP_SHARED;
if (Opts & CMPMAP_ZERO) {
/* when checking if bytes are zeroed */
fd2 = -1;
size2 = (size_t)Offset + Length;
flag |= MAP_ANONYMOUS;
} else if (File2 != NULL) {
/* when comparing two files */
/* open the second file */
if ((fd2 = os_open(File2, O_RDONLY)) < 0) {
fprintf(stderr, "opening %s failed, errno %d\n",
File2, errno);
ret = EXIT_FAILURE;
goto out_close1;
}
size_tmp = util_file_get_size(File2);
if (size_tmp < 0) {
fprintf(stderr, "getting size of %s failed, errno %d\n",
File2, errno);
ret = EXIT_FAILURE;
goto out_close2;
}
size2 = (size_t)size_tmp;
/* basic check */
size_t min_size = (size1 < size2) ? size1 : size2;
if ((size_t)Offset + Length > min_size) {
if (size1 != size2) {
fprintf(stdout, "%s %s differ in size: %zu"
" %zu\n", File1, File2, size1, size2);
ret = EXIT_FAILURE;
goto out_close2;
} else {
Length = min_size - (size_t)Offset;
}
}
} else {
assert(0);
}
/* initialize utils */
util_init();
/* map the first file */
void *addr1;
if ((addr1 = util_map(fd1, size1, MAP_SHARED,
1, 0, NULL)) == MAP_FAILED) {
fprintf(stderr, "mmap failed, file %s, length %zu, offset 0,"
" errno %d\n", File1, size1, errno);
ret = EXIT_FAILURE;
goto out_close2;
}
/* map the second file, or do anonymous mapping to get zeroed bytes */
void *addr2;
if ((addr2 = util_map(fd2, size2, flag, 1, 0, NULL)) == MAP_FAILED) {
fprintf(stderr, "mmap failed, file %s, length %zu, errno %d\n",
File2 ? File2 : "(anonymous)", size2, errno);
ret = EXIT_FAILURE;
goto out_unmap1;
}
/* compare bytes of memory */
if ((ret = memcmp(ADDR_SUM(addr1, Offset), ADDR_SUM(addr2, Offset),
Length))) {
if (Opts & CMPMAP_ZERO)
fprintf(stderr, "%s is not zeroed\n", File1);
else
fprintf(stderr, "%s %s differ\n", File1, File2);
ret = EXIT_FAILURE;
}
munmap(addr2, size2);
out_unmap1:
munmap(addr1, size1);
out_close2:
if (File2 != NULL)
(void) os_close(fd2);
out_close1:
(void) os_close(fd1);
exit(ret);
}
int
main(int argc, char *argv[])
{
if (parse_args(argc, argv))
exit(EXIT_FAILURE);
if (validate_args())
exit(EXIT_FAILURE);
do_cmpmap();
}
| 7,071 | 23.989399 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/fip/fip.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fip.c -- simple application which helps detecting libfabric providers
*
* usage: fip <addr> [<provider>]
*
* If no <provider> argument is specified returns 0 if any supported provider
* from libfabric is available. Otherwise returns 1;
*
* If <provider> argument is specified returns 0 if <provider> is supported
* by libfabric. Otherwise returns 1;
*
* On error returns -1.
*/
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
int
main(int argc, char *argv[])
{
struct rpmem_fip_probe probe;
int ret;
if (argc > 3 || argc < 2) {
fprintf(stderr, "usage: %s <addr> [<provider>]\n", argv[0]);
return -1;
}
char *addr = argv[1];
char *prov_str = NULL;
if (argc == 3)
prov_str = argv[2];
struct rpmem_target_info *info;
info = rpmem_target_parse(addr);
if (!info) {
fprintf(stderr, "error: cannot parse address -- '%s'", addr);
return -1;
}
ret = rpmem_fip_probe_get(info->node, &probe);
if (ret) {
fprintf(stderr, "error: probing on '%s' failed\n", info->node);
return -1;
}
if (!prov_str) {
if (!rpmem_fip_probe_any(probe)) {
printf("no providers found\n");
ret = 1;
goto out;
}
ret = 0;
goto out;
}
enum rpmem_provider prov = rpmem_provider_from_str(prov_str);
if (prov == RPMEM_PROV_UNKNOWN) {
fprintf(stderr, "error: unsupported provider '%s'\n",
prov_str);
ret = -1;
goto out;
}
if (!rpmem_fip_probe(probe, prov)) {
printf("'%s' provider not available at '%s'\n",
prov_str, info->node);
ret = 1;
goto out;
}
out:
rpmem_target_free(info);
return ret;
}
| 3,240 | 27.182609 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/sparsefile/sparsefile.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sparsefile.c -- a simple utility to create sparse files on Windows
*
* usage: sparsefile [options] filename len
* where options can be:
* -v - verbose output
* -s - do not create file if sparse files are not supported
* -f - overwrite file if already exists
*/
#include <windows.h>
#include <stdio.h>
#define MAXPRINT 8192
static int Opt_verbose;
static int Opt_sparse;
static int Opt_force;
/*
* out_err_vargs -- print error message
*/
static void
out_err_vargs(const wchar_t *fmt, va_list ap)
{
wchar_t errmsg[MAXPRINT];
DWORD lasterr = GetLastError();
vfwprintf(stderr, fmt, ap);
if (lasterr) {
size_t size = FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM,
NULL, lasterr,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
errmsg, MAXPRINT, NULL);
fwprintf(stderr, L": %s", errmsg);
} else {
fwprintf(stderr, L"\n");
}
SetLastError(0);
}
/*
* out_err -- print error message
*/
static void
out_err(const wchar_t *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
out_err_vargs(fmt, ap);
va_end(ap);
}
/*
* print_file_size -- prints file size and its size on disk
*/
static void
print_file_size(const wchar_t *filename)
{
LARGE_INTEGER filesize;
FILE_COMPRESSION_INFO fci;
HANDLE fh = CreateFileW(filename, GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (fh == INVALID_HANDLE_VALUE) {
out_err(L"CreateFile");
return;
}
BOOL ret = GetFileSizeEx(fh, &filesize);
if (ret == FALSE) {
out_err(L"GetFileSizeEx");
goto err;
}
ret = GetFileInformationByHandleEx(fh, FileCompressionInfo,
&fci, sizeof(fci));
if (ret == FALSE) {
out_err(L"GetFileInformationByHandleEx");
goto err;
}
if (filesize.QuadPart < 65536)
fwprintf(stderr, L"\ntotal size: %lluB",
filesize.QuadPart);
else
fwprintf(stderr, L"\ntotal size: %lluKB",
filesize.QuadPart / 1024);
if (fci.CompressedFileSize.QuadPart < 65536)
fwprintf(stderr, L", actual size on disk: %lluKB\n",
fci.CompressedFileSize.QuadPart);
else
fwprintf(stderr, L", actual size on disk: %lluKB\n",
fci.CompressedFileSize.QuadPart / 1024);
err:
CloseHandle(fh);
}
/*
* create_sparse_file -- creates sparse file of given size
*/
static int
create_sparse_file(const wchar_t *filename, size_t len)
{
/* create zero-length file */
DWORD create = Opt_force ? CREATE_ALWAYS : CREATE_NEW;
HANDLE fh = CreateFileW(filename, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
create, FILE_ATTRIBUTE_NORMAL, NULL);
if (fh == INVALID_HANDLE_VALUE) {
out_err(L"CreateFile");
return -1;
}
SetLastError(0);
/* check if sparse files are supported */
DWORD flags = 0;
BOOL ret = GetVolumeInformationByHandleW(fh, NULL, 0, NULL, NULL,
&flags, NULL, 0);
if (ret == FALSE) {
if (Opt_verbose || Opt_sparse)
out_err(L"GetVolumeInformationByHandle");
} else if ((flags & FILE_SUPPORTS_SPARSE_FILES) == 0) {
if (Opt_verbose || Opt_sparse)
out_err(L"Volume does not support sparse files.");
if (Opt_sparse)
goto err;
}
/* mark file as sparse */
if (flags & FILE_SUPPORTS_SPARSE_FILES) {
DWORD nbytes;
ret = DeviceIoControl(fh, FSCTL_SET_SPARSE, NULL, 0, NULL,
0, &nbytes, NULL);
if (ret == FALSE) {
if (Opt_verbose || Opt_sparse)
out_err(L"DeviceIoControl");
if (Opt_sparse)
goto err;
}
}
/* set file length */
LARGE_INTEGER llen;
llen.QuadPart = len;
DWORD ptr = SetFilePointerEx(fh, llen, NULL, FILE_BEGIN);
if (ptr == INVALID_SET_FILE_POINTER) {
out_err(L"SetFilePointerEx");
goto err;
}
ret = SetEndOfFile(fh);
if (ret == FALSE) {
out_err(L"SetEndOfFile");
goto err;
}
CloseHandle(fh);
return 0;
err:
CloseHandle(fh);
DeleteFileW(filename);
return -1;
}
int
wmain(int argc, const wchar_t *argv[])
{
if (argc < 2) {
fwprintf(stderr, L"Usage: %s filename len\n", argv[0]);
exit(1);
}
int i = 1;
while (i < argc && argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v':
Opt_verbose = 1;
break;
case 's':
Opt_sparse = 1;
break;
case 'f':
Opt_force = 1;
break;
default:
out_err(L"Unknown option: \'%c\'.", argv[i][1]);
exit(2);
}
++i;
}
const wchar_t *filename = argv[i];
long long len = _wtoll(argv[i + 1]);
if (len < 0) {
out_err(L"Invalid file length: %lld.\n", len);
exit(3);
}
if (create_sparse_file(filename, len) < 0) {
out_err(L"File creation failed.");
exit(4);
}
if (Opt_verbose)
print_file_size(filename);
return 0;
}
| 6,110 | 23.542169 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/obj_verify/obj_verify.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_verify.c -- tool for creating and verifying a pmemobj pool
*/
#include <stddef.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "libpmemobj.h"
#include "set.h"
#define SIGNATURE_LEN 10
#define NUMBER_LEN 10
#define FILL_SIZE 245 /* so that size of one record is 1024 bytes */
#define SKIP_OFFSET offsetof(struct data_s, checksum)
static const char *Signature = "OBJ_VERIFY";
POBJ_LAYOUT_BEGIN(obj_verify);
POBJ_LAYOUT_ROOT(obj_verify, struct root_s);
POBJ_LAYOUT_ROOT(obj_verify, struct data_s);
POBJ_LAYOUT_END(obj_verify);
struct data_s {
char signature[SIGNATURE_LEN];
char number_str[NUMBER_LEN];
uint64_t number;
uint32_t fill[FILL_SIZE];
uint64_t checksum;
};
struct root_s {
uint64_t count;
};
/*
* record_constructor -- constructor of a list element
*/
static int
record_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct data_s *rec = (struct data_s *)ptr;
uint64_t *count = arg;
memcpy(rec->signature, Signature, sizeof(rec->signature));
snprintf(rec->number_str, NUMBER_LEN, "%09lu", *count);
rec->number = *count;
for (int i = 0; i < FILL_SIZE; i++)
rec->fill[i] = (uint32_t)rand();
util_checksum(rec, sizeof(*rec), &rec->checksum,
1 /* insert */, SKIP_OFFSET);
pmemobj_persist(pop, rec, sizeof(*rec));
(*count)++;
pmemobj_persist(pop, count, sizeof(*count));
return 0;
}
/*
* do_create -- (internal) create a pool to be verified
*/
static void
do_create(const char *path, const char *layout)
{
struct pobj_alloc_class_desc class;
PMEMobjpool *pop;
PMEMoid oid;
uint64_t count;
srand((unsigned int)time(NULL));
if ((pop = pmemobj_create(path, layout, 0,
S_IWUSR | S_IRUSR)) == NULL) {
if (errno != EEXIST) {
out("!%s: pmemobj_create: %s",
path, pmemobj_errormsg());
exit(-1);
}
if ((pop = pmemobj_open(path, layout)) == NULL) {
out("!%s: pmemobj_open: %s",
path, pmemobj_errormsg());
exit(-1);
}
}
TOID(struct root_s) root = POBJ_ROOT(pop, struct root_s);
class.header_type = POBJ_HEADER_NONE;
class.unit_size = sizeof(struct data_s);
class.alignment = 0;
class.units_per_block = 1000;
if (pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &class) != 0) {
pmemobj_close(pop);
out("!pmemobj_ctl_set: %s", path);
exit(-1);
}
out("create(%s): allocating records in the pool ...", path);
count = D_RO(root)->count;
while (pmemobj_xalloc(pop, &oid, class.unit_size, 0,
POBJ_CLASS_ID(class.class_id),
record_constructor, &D_RW(root)->count) == 0)
;
count = D_RO(root)->count - count;
if (count) {
out("create(%s): allocated %lu records (of size %zu)",
path, count, sizeof(struct data_s));
} else {
out("create(%s): pool is full", path);
}
pmemobj_close(pop);
}
/*
* do_verify -- (internal) verify a poolset
*/
static void
do_verify(const char *path, const char *layout)
{
PMEMobjpool *pop;
PMEMoid oid;
uint64_t count = 0;
int error = 0;
if ((pop = pmemobj_open(path, layout)) == NULL) {
out("!%s: pmemobj_open: %s",
path, pmemobj_errormsg());
exit(-1);
}
TOID(struct root_s) root = POBJ_ROOT(pop, struct root_s);
TOID(struct data_s) rec;
POBJ_FOREACH(pop, oid) {
TOID_ASSIGN(rec, oid);
if (!util_checksum(D_RW(rec), sizeof(*D_RW(rec)),
&D_RW(rec)->checksum,
0 /* verify */, SKIP_OFFSET)) {
out("verify(%s): incorrect record: %s (#%lu)",
path, D_RW(rec)->signature, count);
error = 1;
break;
}
count++;
}
if (D_RO(root)->count != count) {
out(
"verify(%s): incorrect number of records (is: %lu, should be: %lu)",
path, count, D_RO(root)->count);
error = 1;
}
pmemobj_close(pop);
if (error) {
out("verify(%s): pool file contains error", path);
exit(-1);
}
out(
"verify(%s): pool file successfully verified (%lu records of size %zu)",
path, count, sizeof(struct data_s));
}
int
main(int argc, char *argv[])
{
util_init();
out_init("OBJ_VERIFY", "OBJ_VERIFY", "", 1, 0);
if (argc < 4) {
out("Usage: %s <obj_pool> <layout> <op:c|v>\n"
"Options:\n"
" c - create\n"
" v - verify\n",
argv[0]);
exit(-1);
}
const char *path = argv[1];
const char *layout = argv[2];
const char *op;
/* go through all arguments one by one */
for (int arg = 3; arg < argc; arg++) {
op = argv[arg];
if (op[1] != '\0') {
out("op must be c or v (c=create, v=verify)");
exit(-1);
}
switch (op[0]) {
case 'c': /* create and verify (no debug) */
do_create(path, layout);
break;
case 'v': /* verify (no debug) */
do_verify(path, layout);
break;
default:
out("op must be c or v (c=create, v=verify)");
exit(-1);
break;
}
}
out_fini();
return 0;
}
| 6,251 | 23.232558 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/ctrld/signals_linux.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* signals_linux.h - Signal definitions for Linux
*/
#ifndef _SIGNALS_LINUX_H
#define _SIGNALS_LINUX_H 1
#define SIGNAL_2_STR(sig) [sig] = #sig
static const char *signal2str[] = {
SIGNAL_2_STR(SIGHUP), /* 1 */
SIGNAL_2_STR(SIGINT), /* 2 */
SIGNAL_2_STR(SIGQUIT), /* 3 */
SIGNAL_2_STR(SIGILL), /* 4 */
SIGNAL_2_STR(SIGTRAP), /* 5 */
SIGNAL_2_STR(SIGABRT), /* 6 */
SIGNAL_2_STR(SIGBUS), /* 7 */
SIGNAL_2_STR(SIGFPE), /* 8 */
SIGNAL_2_STR(SIGKILL), /* 9 */
SIGNAL_2_STR(SIGUSR1), /* 10 */
SIGNAL_2_STR(SIGSEGV), /* 11 */
SIGNAL_2_STR(SIGUSR2), /* 12 */
SIGNAL_2_STR(SIGPIPE), /* 13 */
SIGNAL_2_STR(SIGALRM), /* 14 */
SIGNAL_2_STR(SIGTERM), /* 15 */
SIGNAL_2_STR(SIGSTKFLT), /* 16 */
SIGNAL_2_STR(SIGCHLD), /* 17 */
SIGNAL_2_STR(SIGCONT), /* 18 */
SIGNAL_2_STR(SIGSTOP), /* 19 */
SIGNAL_2_STR(SIGTSTP), /* 20 */
SIGNAL_2_STR(SIGTTIN), /* 21 */
SIGNAL_2_STR(SIGTTOU), /* 22 */
SIGNAL_2_STR(SIGURG), /* 23 */
SIGNAL_2_STR(SIGXCPU), /* 24 */
SIGNAL_2_STR(SIGXFSZ), /* 25 */
SIGNAL_2_STR(SIGVTALRM), /* 26 */
SIGNAL_2_STR(SIGPROF), /* 27 */
SIGNAL_2_STR(SIGWINCH), /* 28 */
SIGNAL_2_STR(SIGPOLL), /* 29 */
SIGNAL_2_STR(SIGPWR), /* 30 */
SIGNAL_2_STR(SIGSYS) /* 31 */
};
#define SIGNALMAX SIGSYS
#endif
| 2,837 | 36.342105 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/ctrld/signals_freebsd.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* signals_fbsd.h - Signal definitions for FreeBSD
*/
#ifndef _SIGNALS_FBSD_H
#define _SIGNALS_FBSD_H 1
#define SIGNAL_2_STR(sig) [sig] = #sig
static const char *signal2str[] = {
SIGNAL_2_STR(SIGHUP), /* 1 */
SIGNAL_2_STR(SIGINT), /* 2 */
SIGNAL_2_STR(SIGQUIT), /* 3 */
SIGNAL_2_STR(SIGILL), /* 4 */
SIGNAL_2_STR(SIGTRAP), /* 5 */
SIGNAL_2_STR(SIGABRT), /* 6 */
SIGNAL_2_STR(SIGEMT), /* 7 */
SIGNAL_2_STR(SIGFPE), /* 8 */
SIGNAL_2_STR(SIGKILL), /* 9 */
SIGNAL_2_STR(SIGBUS), /* 10 */
SIGNAL_2_STR(SIGSEGV), /* 11 */
SIGNAL_2_STR(SIGSYS), /* 12 */
SIGNAL_2_STR(SIGPIPE), /* 13 */
SIGNAL_2_STR(SIGALRM), /* 14 */
SIGNAL_2_STR(SIGTERM), /* 15 */
SIGNAL_2_STR(SIGURG), /* 16 */
SIGNAL_2_STR(SIGSTOP), /* 17 */
SIGNAL_2_STR(SIGTSTP), /* 18 */
SIGNAL_2_STR(SIGCONT), /* 19 */
SIGNAL_2_STR(SIGCHLD), /* 20 */
SIGNAL_2_STR(SIGTTIN), /* 21 */
SIGNAL_2_STR(SIGTTOU), /* 22 */
SIGNAL_2_STR(SIGIO), /* 23 */
SIGNAL_2_STR(SIGXCPU), /* 24 */
SIGNAL_2_STR(SIGXFSZ), /* 25 */
SIGNAL_2_STR(SIGVTALRM), /* 26 */
SIGNAL_2_STR(SIGPROF), /* 27 */
SIGNAL_2_STR(SIGWINCH), /* 28 */
SIGNAL_2_STR(SIGINFO), /* 29 */
SIGNAL_2_STR(SIGUSR1), /* 30 */
SIGNAL_2_STR(SIGUSR2), /* 31 */
SIGNAL_2_STR(SIGTHR), /* 32 */
SIGNAL_2_STR(SIGLIBRT) /* 33 */
};
#define SIGNALMAX SIGLIBRT
#endif
| 2,901 | 35.734177 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/ctrld/ctrld.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctrld.c -- simple application which helps running tests on remote node.
*
* XXX - wait_port is not supported on FreeBSD because there are currently
* no test cases that require it.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <dirent.h>
#include <signal.h>
#include <limits.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <stdarg.h>
#include "os.h"
#ifdef __FreeBSD__
#include "signals_freebsd.h"
#else
#include "signals_linux.h"
#endif
#define APP_NAME "ctrld"
#define BUFF_SIZE 4096
#define S_MINUTE (60) /* seconds in one minute */
#define S_HOUR (60 * 60) /* seconds in one hour */
#define S_DAY (60 * 60 * 24) /* seconds in one day */
static FILE *log_fh;
static void
log_err(const char *file, int lineno, const char *fmt, ...)
{
FILE *fh = log_fh ? log_fh : stderr;
va_list ap;
fprintf(fh, "[%s:%d] ", file, lineno);
char *prefix = "";
char *errstr = "";
if (*fmt == '!') {
fmt++;
prefix = ": ";
errstr = strerror(errno);
}
va_start(ap, fmt);
vfprintf(fh, fmt, ap);
va_end(ap);
fprintf(fh, "%s%s\n", prefix, errstr);
fflush(fh);
}
#define CTRLD_LOG(...) log_err(__FILE__, __LINE__, __VA_ARGS__)
struct inode_item {
LIST_ENTRY(inode_item) next;
unsigned long inode;
};
struct inodes {
LIST_HEAD(inode_items, inode_item) head;
};
/*
* usage -- print usage and exit with failure code
*/
static void
usage(void)
{
CTRLD_LOG("usage: %s <pid file> <cmd> [<arg>]", APP_NAME);
CTRLD_LOG("commands:");
CTRLD_LOG(" exe <command> [<args...>] -- "
"run specified command");
CTRLD_LOG(" run <timeout> <command> [<args...>] -- "
"run specified command with given timeout");
CTRLD_LOG(" wait [<timeout>] -- "
"wait for command");
#ifndef __FreeBSD_
CTRLD_LOG(" wait_port <port> -- "
"wait until a port is opened");
#endif
CTRLD_LOG(" kill <signal> -- "
"send a signal to command");
exit(EXIT_FAILURE);
}
/*
* alloc_argv -- allocate NULL terminated list of arguments at specified offset
*/
static char **
alloc_argv(unsigned argc, char *argv[], unsigned off)
{
if (argc < off)
return NULL;
unsigned nargc = argc - off;
char **nargv = malloc((nargc + 1) * sizeof(char *));
if (!nargv)
return NULL;
for (unsigned i = 0; i < nargc; i++)
nargv[i] = argv[i + off];
nargv[nargc] = NULL;
return nargv;
}
/*
* do_run_or_exe -- execute the 'run' or the 'exe' command
*
* if timeout is equal to 0 cmd will be just executed (the 'exe' command)
* otherwise it will be run and wait with timeout (the 'run' command)
*/
static int
do_run_or_exe(const char *pid_file, char *cmd, char *argv[], unsigned timeout)
{
int rv = -1;
FILE *fh = os_fopen(pid_file, "w+");
if (!fh) {
CTRLD_LOG("!%s", pid_file);
return -1;
}
int fd = fileno(fh);
if (fd == -1) {
CTRLD_LOG("!fileno");
goto err;
}
if (os_flock(fd, LOCK_EX | LOCK_NB)) {
CTRLD_LOG("!flock");
goto err;
}
if (timeout != 0) {
if (daemon(1, 0)) {
CTRLD_LOG("!daemon");
goto err;
}
}
int child = fork();
switch (child) {
case -1:
CTRLD_LOG("!fork");
fprintf(fh, "-1r%d", errno);
goto err;
case 0:
execvp(cmd, argv);
CTRLD_LOG("!execvp(%s)", cmd);
goto err;
default:
break;
}
if (fprintf(fh, "%d", child) < 0) {
CTRLD_LOG("!fprintf");
goto err;
}
if (fflush(fh)) {
CTRLD_LOG("!fflush");
goto err;
}
int child_timeout = -1;
if (timeout != 0) {
child_timeout = fork();
switch (child_timeout) {
case -1:
CTRLD_LOG("!fork");
fprintf(fh, "-1r%d", errno);
goto err;
case 0:
fclose(fh);
sleep(timeout);
return 0;
default:
break;
}
}
int ret = 0;
int pid = wait(&ret);
if (pid == child) {
if (timeout != 0) {
/* kill the timeout child */
kill(child_timeout, SIGTERM);
}
if (WIFSIGNALED(ret)) {
ret = 128 + WTERMSIG(ret);
} else {
ret = WEXITSTATUS(ret);
}
if (fseek(fh, 0, SEEK_SET)) {
CTRLD_LOG("!fseek");
goto err;
}
if (os_ftruncate(fileno(fh), 0)) {
CTRLD_LOG("!ftruncate");
goto err;
}
fprintf(fh, "%dr%d", child, ret);
} else if (timeout != 0 && pid == child_timeout) {
CTRLD_LOG("run: timeout");
if (kill(child, SIGTERM) && errno != ESRCH) {
CTRLD_LOG("!kill");
goto err;
}
CTRLD_LOG("run: process '%s' killed (PID %i)", cmd, child);
} else {
CTRLD_LOG("!wait");
goto err;
}
rv = 0;
err:
fclose(fh);
return rv;
}
/*
* do_wait -- execute the 'wait' command
*/
static int
do_wait(char *pid_file, int timeout)
{
int fd = os_open(pid_file, O_RDONLY);
if (fd < 0) {
perror(pid_file);
return 1;
}
int ret;
int t = 0;
while ((timeout == -1 || t < timeout) &&
os_flock(fd, LOCK_EX | LOCK_NB)) {
sleep(1);
t++;
}
FILE *fh = os_fdopen(fd, "r");
if (!fh) {
CTRLD_LOG("!fdopen");
ret = 1;
goto err;
}
pid_t pid;
char r;
int n = fscanf(fh, "%d%c%d", &pid, &r, &ret);
if (n < 0) {
CTRLD_LOG("!fscanf");
ret = 1;
goto err;
}
if (n == 2 || (n == 3 && r != 'r')) {
CTRLD_LOG("invalid format of PID file");
ret = 1;
goto err;
}
if (n == 1) {
if (timeout >= 0) {
ret = -1;
goto err;
} else {
CTRLD_LOG("missing return value");
ret = 1;
goto err;
}
}
err:
os_close(fd);
fclose(fh);
return ret;
}
/*
* do_kill -- execute the 'kill' command
*/
static int
do_kill(char *pid_file, int signo)
{
FILE *fh = os_fopen(pid_file, "r");
if (!fh) {
CTRLD_LOG("!%s", pid_file);
return 1;
}
int ret;
pid_t pid;
int n = fscanf(fh, "%d", &pid);
if (n == 0) {
ret = 0;
goto out;
}
/* do not fail if such process already does not exist */
if (kill(pid, signo) && errno != ESRCH) {
CTRLD_LOG("!kill");
ret = 1;
goto out;
}
ret = 0;
out:
fclose(fh);
return ret;
}
#ifndef __FreeBSD__ /* XXX wait_port support */
/*
* contains_inode -- check if list contains specified inode
*/
static int
contains_inode(struct inodes *inodes, unsigned long inode)
{
struct inode_item *inode_item;
LIST_FOREACH(inode_item, &inodes->head, next) {
if (inode_item->inode == inode)
return 1;
}
return 0;
}
/*
* has_port_inode -- check if /proc/net/tcp has an entry with specified
* port and inode
*/
static int
has_port_inode(unsigned short port, struct inodes *inodes)
{
/* format of /proc/net/tcp entries */
const char * const tcp_fmt =
"%*d: "
"%*64[0-9A-Fa-f]:%X "
"%*64[0-9A-Fa-f]:%*X "
"%*X %*X:%*X %*X:%*X "
"%*X %*d %*d %lu %*s\n";
char buff[BUFF_SIZE];
FILE *fh = os_fopen("/proc/net/tcp", "r");
if (!fh) {
CTRLD_LOG("!%s", "/proc/net/tcp");
return -1;
}
int ret;
/* read heading */
char *s = fgets(buff, 4096, fh);
if (!s) {
ret = -1;
goto out;
}
while (1) {
s = fgets(buff, 4096, fh);
if (!s)
break;
/* read port number and inode number */
unsigned p;
unsigned long inode;
if (sscanf(s, tcp_fmt, &p, &inode) != 2) {
ret = -1;
goto out;
}
/*
* if port matches and inode is on a list
* the process has this port opened
*/
if (p == port && contains_inode(inodes, inode)) {
ret = 1;
goto out;
}
}
ret = 0;
out:
fclose(fh);
return ret;
}
/*
* get_inodes -- get list of inodes
*/
static int
get_inodes(pid_t pid, struct inodes *inodes)
{
char path[PATH_MAX];
char link[PATH_MAX];
int ret;
/* set a path to opened files of specified process */
if ((ret = snprintf(path, PATH_MAX, "/proc/%d/fd", pid)) < 0) {
CTRLD_LOG("snprintf: %d", ret);
return -1;
}
/* open dir with all opened files */
DIR *d = opendir(path);
if (!d) {
CTRLD_LOG("!%s", path);
ret = -1;
goto out_dir;
}
/* read all directory entries */
struct dirent *dent;
while ((dent = readdir(d)) != NULL) {
/* create a full path to file */
if ((ret = snprintf(path, PATH_MAX,
"/proc/%d/fd/%s", pid, dent->d_name)) < 0) {
CTRLD_LOG("snprintf: %d", ret);
ret = -1;
goto out_dir;
}
/* read symbolic link */
ssize_t sret = readlink(path, link, PATH_MAX - 1);
if (sret <= 0)
continue;
link[sret] = '\0';
/* check if this is a socket, read inode number if so */
unsigned long inode;
if (sscanf(link, "socket:[%lu]", &inode) != 1)
continue;
/* add inode to a list */
struct inode_item *inode_item = malloc(sizeof(*inode_item));
if (!inode_item) {
CTRLD_LOG("!malloc inode item");
exit(1);
}
inode_item->inode = inode;
LIST_INSERT_HEAD(&inodes->head, inode_item, next);
}
ret = 0;
out_dir:
closedir(d);
return ret;
}
/*
* clear_inodes -- clear list of inodes
*/
static void
clear_inodes(struct inodes *inodes)
{
while (!LIST_EMPTY(&inodes->head)) {
struct inode_item *inode_item = LIST_FIRST(&inodes->head);
LIST_REMOVE(inode_item, next);
free(inode_item);
}
}
/*
* has_port -- check if process has the specified tcp port opened
*/
static int
has_port(pid_t pid, unsigned short port)
{
struct inodes inodes;
memset(&inodes, 0, sizeof(inodes));
int ret = get_inodes(pid, &inodes);
if (ret < 0)
return -1;
if (!LIST_EMPTY(&inodes.head)) {
ret = has_port_inode(port, &inodes);
clear_inodes(&inodes);
}
return ret;
}
/*
* do_wait_port -- wait until process opens a specified tcp port
*/
static int
do_wait_port(char *pid_file, unsigned short port)
{
FILE *fh = os_fopen(pid_file, "r");
if (!fh) {
CTRLD_LOG("!%s", pid_file);
return 1;
}
int ret;
pid_t pid;
char r;
int n = fscanf(fh, "%d%c%d", &pid, &r, &ret);
if (n < 0) {
CTRLD_LOG("!fscanf");
ret = 1;
goto err;
}
if (n == 2 || (n == 3 && r != 'r')) {
CTRLD_LOG("invalid format of PID file");
ret = 1;
goto err;
}
if (n == 3) {
CTRLD_LOG("process already terminated");
ret = 1;
goto err;
}
int hp;
do {
hp = has_port(pid, port);
if (hp < 0) {
ret = 1;
goto err;
}
} while (!hp);
return 0;
err:
fclose(fh);
return -1;
}
#endif /* __FreeBSD__ wait_port support */
/*
* convert_signal_name -- convert a signal name to a signal number
*/
static int
convert_signal_name(const char *signal_name)
{
for (int sig = SIGHUP; sig <= SIGNALMAX; sig++)
if (strcmp(signal_name, signal2str[sig]) == 0)
return sig;
return -1;
}
/*
* log_run -- print run command with arguments
*/
static void
log_run(const char *pid_file, char *cmd, char *argv[])
{
char buff[BUFF_SIZE];
buff[0] = '\0';
size_t cnt = 0;
size_t i = 0;
char *arg = argv[0];
while (arg) {
int ret = snprintf(&buff[cnt], BUFF_SIZE - cnt,
" %s", arg);
if (ret < 0) {
CTRLD_LOG("snprintf: %d", ret);
exit(EXIT_FAILURE);
}
cnt += (size_t)ret;
i++;
arg = argv[i];
}
CTRLD_LOG("run %s%s", pid_file, buff);
}
/*
* convert_timeout -- convert a floating point number with an optional suffix
* to unsigned integer: 's' for seconds (the default),
* 'm' for minutes, 'h' for hours or 'd' for days.
*/
static unsigned
convert_timeout(char *str)
{
char *endptr;
float ftimeout = strtof(str, &endptr);
switch (*endptr) {
case 'm':
ftimeout *= S_MINUTE;
break;
case 'h':
ftimeout *= S_HOUR;
break;
case 'd':
ftimeout *= S_DAY;
break;
default:
break;
}
return (unsigned)ftimeout;
}
int
main(int argc, char *argv[])
{
if (argc < 3)
usage();
int ret = 0;
char *pid_file = argv[1];
char *cmd = argv[2];
char buff[BUFF_SIZE];
if (snprintf(buff, BUFF_SIZE, "%s.%s.%s.log",
pid_file, cmd, APP_NAME) < 0) {
perror("snprintf");
return -1;
}
log_fh = os_fopen(buff, "a");
if (!log_fh) {
perror(buff);
return -1;
}
if (strcmp(cmd, "exe") == 0) {
if (argc < 4)
usage();
char *command = argv[3];
char **nargv = alloc_argv((unsigned)argc, argv, 3);
if (!nargv) {
CTRLD_LOG("!get_argv");
return 1;
}
log_run(pid_file, command, nargv);
ret = do_run_or_exe(pid_file, command, nargv, 0 /* timeout */);
free(nargv);
} else if (strcmp(cmd, "run") == 0) {
if (argc < 5)
usage();
unsigned timeout = convert_timeout(argv[3]);
char *command = argv[4];
char **nargv = alloc_argv((unsigned)argc, argv, 4);
if (!nargv) {
CTRLD_LOG("!get_argv");
return 1;
}
log_run(pid_file, command, nargv);
ret = do_run_or_exe(pid_file, command, nargv, timeout);
free(nargv);
} else if (strcmp(cmd, "wait") == 0) {
if (argc != 3 && argc != 4)
usage();
int timeout = -1;
if (argc == 4)
timeout = atoi(argv[3]);
CTRLD_LOG("wait %s %d", pid_file, timeout);
ret = do_wait(pid_file, timeout);
} else if (strcmp(cmd, "kill") == 0) {
if (argc != 4)
usage();
int signo = atoi(argv[3]);
if (signo == 0) {
signo = convert_signal_name(argv[3]);
if (signo == -1) {
CTRLD_LOG("Invalid signal name or number"
" (%s)", argv[3]);
return 1;
}
}
CTRLD_LOG("kill %s %s", pid_file, argv[3]);
ret = do_kill(pid_file, signo);
#ifndef __FreeBSD__
} else if (strcmp(cmd, "wait_port") == 0) {
if (argc != 4)
usage();
unsigned short port = (unsigned short)atoi(argv[3]);
CTRLD_LOG("wait_port %s %u", pid_file, port);
ret = do_wait_port(pid_file, port);
#endif
} else {
usage();
}
return ret;
}
| 14,734 | 18.568393 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/vmem_calloc/vmem_calloc.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem_calloc.c -- unit test for vmem_calloc
*
* usage: vmem_calloc [directory]
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
const int test_value = 123456;
char *dir = NULL;
void *mem_pool = NULL;
VMEM *vmp;
START(argc, argv, "vmem_calloc");
if (argc == 2) {
dir = argv[1];
} else if (argc > 2) {
UT_FATAL("usage: %s [directory]", argv[0]);
}
if (dir == NULL) {
/* allocate memory for function vmem_create_in_region() */
mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20);
vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
if (vmp == NULL)
UT_FATAL("!vmem_create_in_region");
} else {
vmp = vmem_create(dir, VMEM_MIN_POOL);
if (vmp == NULL)
UT_FATAL("!vmem_create");
}
int *test = vmem_calloc(vmp, 1, sizeof(int));
UT_ASSERTne(test, NULL);
/* pool_calloc should return zeroed memory */
UT_ASSERTeq(*test, 0);
*test = test_value;
UT_ASSERTeq(*test, test_value);
/* check that pointer came from mem_pool */
if (dir == NULL) {
UT_ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
}
vmem_free(vmp, test);
vmem_delete(vmp);
DONE(NULL);
}
| 2,717 | 29.2 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_locks/obj_locks.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_locks.c -- unit test for PMEMmutex, PMEMrwlock and PMEMcond
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "obj_locks"
#define NUM_THREADS 16
#define MAX_FUNC 5
TOID_DECLARE(struct locks, 0);
struct locks {
PMEMobjpool *pop;
PMEMmutex mtx;
PMEMrwlock rwlk;
PMEMcond cond;
int data;
};
struct thread_args {
os_thread_t t;
TOID(struct locks) lock;
int t_id;
};
typedef void *(*fn_lock)(void *arg);
static struct thread_args threads[NUM_THREADS];
/*
* do_mutex_lock -- lock and unlock the mutex
*/
static void *
do_mutex_lock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
return NULL;
}
/*
* do_rwlock_wrlock -- lock and unlock the write rwlock
*/
static void *
do_rwlock_wrlock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_rwlock_wrlock(lock->pop, &lock->rwlk);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_rwlock_unlock(lock->pop, &lock->rwlk);
return NULL;
}
/*
* do_rwlock_rdlock -- lock and unlock the read rwlock
*/
static void *
do_rwlock_rdlock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_rwlock_rdlock(lock->pop, &lock->rwlk);
pmemobj_rwlock_unlock(lock->pop, &lock->rwlk);
return NULL;
}
/*
* do_cond_signal -- lock block on a condition variables,
* and unlock them by signal
*/
static void *
do_cond_signal(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
if (t->t_id == 0) {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
while (lock->data < (NUM_THREADS - 1))
pmemobj_cond_wait(lock->pop, &lock->cond,
&lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
} else {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_cond_signal(lock->pop, &lock->cond);
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
}
return NULL;
}
/*
* do_cond_broadcast -- lock block on a condition variables and unlock
* by broadcasting
*/
static void *
do_cond_broadcast(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
if (t->t_id < (NUM_THREADS / 2)) {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
while (lock->data < (NUM_THREADS / 2))
pmemobj_cond_wait(lock->pop, &lock->cond,
&lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
} else {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_cond_broadcast(lock->pop, &lock->cond);
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
}
return NULL;
}
static fn_lock do_lock[MAX_FUNC] = {do_mutex_lock, do_rwlock_wrlock,
do_rwlock_rdlock, do_cond_signal,
do_cond_broadcast};
/*
* do_lock_init -- initialize all types of locks
*/
static void
do_lock_init(struct locks *lock)
{
pmemobj_mutex_zero(lock->pop, &lock->mtx);
pmemobj_rwlock_zero(lock->pop, &lock->rwlk);
pmemobj_cond_zero(lock->pop, &lock->cond);
}
/*
* do_lock_mt -- perform multithread lock operations
*/
static void
do_lock_mt(TOID(struct locks) lock, unsigned f_num)
{
D_RW(lock)->data = 0;
for (int i = 0; i < NUM_THREADS; ++i) {
threads[i].lock = lock;
threads[i].t_id = i;
PTHREAD_CREATE(&threads[i].t, NULL, do_lock[f_num],
&threads[i]);
}
for (int i = 0; i < NUM_THREADS; ++i)
PTHREAD_JOIN(&threads[i].t, NULL);
/*
* If all threads passed function properly and used every lock, there
* should be every element in data array incremented exactly one time
* by every thread.
*/
UT_ASSERT((D_RO(lock)->data == NUM_THREADS) ||
(D_RO(lock)->data == 0));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_locks");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct locks) lock;
POBJ_ALLOC(pop, &lock, struct locks, sizeof(struct locks), NULL, NULL);
D_RW(lock)->pop = pop;
do_lock_init(D_RW(lock));
for (unsigned i = 0; i < MAX_FUNC; i++)
do_lock_mt(lock, i);
POBJ_FREE(&lock);
pmemobj_close(pop);
DONE(NULL);
}
| 6,338 | 26.56087 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/log_basic/log_basic.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* log_basic.c -- unit test for pmemlog_*
*
* usage: log_basic file operation:...
*
* operations are 'n' or 'a' or 'v' or 't' or 'r' or 'w'
*
*/
#include "unittest.h"
/*
* do_nbyte -- call pmemlog_nbyte() & print result
*/
static void
do_nbyte(PMEMlogpool *plp)
{
size_t nbyte = pmemlog_nbyte(plp);
UT_OUT("usable size: %zu", nbyte);
}
/*
* do_append -- call pmemlog_append() & print result
*/
static void
do_append(PMEMlogpool *plp)
{
const char *str[6] = {
"1st test string\n",
"2nd test string\n",
"3rd test string\n",
"4th test string\n",
"5th test string\n",
"6th test string\n"
};
for (int i = 0; i < 6; ++i) {
int rv = pmemlog_append(plp, str[i], strlen(str[i]));
switch (rv) {
case 0:
UT_OUT("append str[%i] %s", i, str[i]);
break;
case -1:
UT_OUT("!append str[%i] %s", i, str[i]);
break;
default:
UT_OUT("!append: wrong return value");
break;
}
}
}
/*
* do_appendv -- call pmemlog_appendv() & print result
*/
static void
do_appendv(PMEMlogpool *plp)
{
struct iovec iov[9] = {
{
.iov_base = "1st test string\n",
.iov_len = 16
},
{
.iov_base = "2nd test string\n",
.iov_len = 16
},
{
.iov_base = "3rd test string\n",
.iov_len = 16
},
{
.iov_base = "4th test string\n",
.iov_len = 16
},
{
.iov_base = "5th test string\n",
.iov_len = 16
},
{
.iov_base = "6th test string\n",
.iov_len = 16
},
{
.iov_base = "7th test string\n",
.iov_len = 16
},
{
.iov_base = "8th test string\n",
.iov_len = 16
},
{
.iov_base = "9th test string\n",
.iov_len = 16
}
};
int rv = pmemlog_appendv(plp, iov, 9);
switch (rv) {
case 0:
UT_OUT("appendv");
break;
case -1:
UT_OUT("!appendv");
break;
default:
UT_OUT("!appendv: wrong return value");
break;
}
rv = pmemlog_appendv(plp, iov, 0);
UT_ASSERTeq(rv, 0);
errno = 0;
rv = pmemlog_appendv(plp, iov, -3);
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(rv, -1);
}
/*
* do_tell -- call pmemlog_tell() & print result
*/
static void
do_tell(PMEMlogpool *plp)
{
os_off_t tell = pmemlog_tell(plp);
UT_OUT("tell %zu", tell);
}
/*
* do_rewind -- call pmemlog_rewind() & print result
*/
static void
do_rewind(PMEMlogpool *plp)
{
pmemlog_rewind(plp);
UT_OUT("rewind");
}
/*
* printit -- print out the 'buf' of length 'len'.
*
* It is a walker function for pmemlog_walk
*/
static int
printit(const void *buf, size_t len, void *arg)
{
char *str = MALLOC(len + 1);
strncpy(str, buf, len);
str[len] = '\0';
UT_OUT("%s", str);
FREE(str);
return 1;
}
/*
* do_walk -- call pmemlog_walk() & print result
*
* pmemlog_walk() is called twice: for chunk size 0 and 16
*/
static void
do_walk(PMEMlogpool *plp)
{
pmemlog_walk(plp, 0, printit, NULL);
UT_OUT("walk all at once");
pmemlog_walk(plp, 16, printit, NULL);
UT_OUT("walk by 16");
}
int
main(int argc, char *argv[])
{
PMEMlogpool *plp;
int result;
START(argc, argv, "log_basic");
if (argc < 3)
UT_FATAL("usage: %s file-name op:n|a|v|t|r|w", argv[0]);
const char *path = argv[1];
if ((plp = pmemlog_create(path, 0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemlog_create: %s", path);
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
/* Scan the character of each argument. */
if (strchr("navtrw", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be n or a or v or t or r or w");
switch (argv[arg][0]) {
case 'n':
do_nbyte(plp);
break;
case 'a':
do_append(plp);
break;
case 'v':
do_appendv(plp);
break;
case 't':
do_tell(plp);
break;
case 'r':
do_rewind(plp);
break;
case 'w':
do_walk(plp);
break;
}
}
pmemlog_close(plp);
/* check consistency again */
result = pmemlog_check(path);
if (result < 0)
UT_OUT("!%s: pmemlog_check", path);
else if (result == 0)
UT_OUT("%s: pmemlog_check: not consistent", path);
DONE(NULL);
}
| 5,552 | 19.87594 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/libpmempool_feature/common.sh
|
#!/usr/bin/env bash
#
# Copyright 2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# src/test/libpmempool_feature/common.sh -- common part of libpmempool_feature tests
#
OUT=out${UNITTEST_NUM}.log
LOG=grep${UNITTEST_NUM}.log
QUERY_PATTERN="query"
ERROR_PATTERN="<1> \\[feature.c:.*\\]"
exit_func=expect_normal_exit
# libpmempool_feature_query -- query feature
#
# usage: libpmempool_feature_query <enum-pmempool_feature>
function libpmempool_feature_query() {
# query feature
expect_normal_exit ./libpmempool_feature$EXESUFFIX $DIR/pool.obj q $1
cat $OUT | grep "$QUERY_PATTERN" >> $LOG
# verify query with pmempool info
set +e
count=$(expect_normal_exit $PMEMPOOL$EXESUFFIX info $DIR/pool.obj | grep -c "$1")
set -e
if [ "$count" = "0" ]; then
echo "pmempool info: $1 is NOT set" >> $LOG
else
echo "pmempool info: $1 is set" >> $LOG
fi
# check if pool is still valid
expect_normal_exit $PMEMPOOL$EXESUFFIX check $DIR/pool.obj >> /dev/null
}
# libpmempool_feature_enable -- enable feature
#
# usage: libpmempool_feature_enable <enum-pmempool_feature> [no-query]
function libpmempool_feature_enable() {
$exit_func ./libpmempool_feature$EXESUFFIX $DIR/pool.obj e $1 &>> $LOG
if [ "$exit_func" == "expect_abnormal_exit" ]; then
if [ -f "$PMEMPOOL_LOG_FILE" ]; then
cat $PMEMPOOL_LOG_FILE | grep "$ERROR_PATTERN" >> $LOG
fi
fi
if [ "x$2" != "xno-query" ]; then
libpmempool_feature_query $1
fi
}
# libpmempool_feature_disable -- disable feature
#
# usage: libpmempool_feature_disable <enum-pmempool_feature> [no-query]
function libpmempool_feature_disable() {
$exit_func ./libpmempool_feature$EXESUFFIX $DIR/pool.obj d $1 &>> $LOG
if [ "$exit_func" == "expect_abnormal_exit" ]; then
if [ -f "$PMEMPOOL_LOG_FILE" ]; then
cat $PMEMPOOL_LOG_FILE | grep "$ERROR_PATTERN" >> $LOG
fi
fi
if [ "x$2" != "xno-query" ]; then
libpmempool_feature_query $1
fi
}
| 3,400 | 34.061856 | 84 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/libpmempool_feature/libpmempool_feature.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmempool_feature -- pmempool_feature_(enable|disable|query) test
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include "libpmempool.h"
#include "pool_hdr.h"
#include "unittest.h"
#define EMPTY_FLAGS 0
/*
* print_usage -- print usage of program
*/
static void
print_usage(const char *name)
{
UT_OUT("usage: %s <pool_path> (e|d|q) <feature-name>", name);
UT_OUT("feature-name: SINGLEHDR, CKSUM_2K, SHUTDOWN_STATE");
}
/*
* str2pmempool_feature -- convert feature name to pmempool_feature enum
*/
static enum pmempool_feature
str2pmempool_feature(const char *app, const char *str)
{
uint32_t fval = util_str2pmempool_feature(str);
if (fval == UINT32_MAX) {
print_usage(app);
UT_FATAL("unknown feature: %s", str);
}
return (enum pmempool_feature)fval;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_feature");
if (argc < 4) {
print_usage(argv[0]);
UT_FATAL("insufficient number of arguments: %d", argc - 1);
}
const char *path = argv[1];
char cmd = argv[2][0];
enum pmempool_feature feature = str2pmempool_feature(argv[0], argv[3]);
int ret;
switch (cmd) {
case 'e':
return pmempool_feature_enable(path, feature, EMPTY_FLAGS);
case 'd':
return pmempool_feature_disable(path, feature, EMPTY_FLAGS);
case 'q':
ret = pmempool_feature_query(path, feature, EMPTY_FLAGS);
if (ret < 0)
return 1;
UT_OUT("query %s result is %d", argv[3], ret);
return 0;
default:
print_usage(argv[0]);
UT_FATAL("unknown command: %c", cmd);
}
DONE(NULL);
}
| 3,137 | 28.603774 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_tx_flow/obj_tx_flow.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_tx_flow.c -- unit test for transaction flow
*/
#include "unittest.h"
#define LAYOUT_NAME "direct"
#define TEST_VALUE_A 5
#define TEST_VALUE_B 10
#define TEST_VALUE_C 15
#define OPS_NUM 8
TOID_DECLARE(struct test_obj, 1);
struct test_obj {
int a;
int b;
int c;
};
static void
do_tx_macro_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
TX_BEGIN(pop) {
D_RW(*obj)->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
D_RW(*obj)->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
TX_BEGIN(pop) {
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_B;
pmemobj_tx_abort(EINVAL);
D_RW(*obj)->b = TEST_VALUE_A;
} TX_ONCOMMIT { /* not called */
D_RW(*obj)->a = TEST_VALUE_B;
} TX_ONABORT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
TX_BEGIN(pop) {
TX_BEGIN(pop) {
D_RW(*obj)->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_END
} TX_ONCOMMIT {
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
volatile int a = 0;
volatile int b = 0;
volatile int c = 0;
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
TX_BEGIN(pop) {
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_B;
a = TEST_VALUE_C;
TX_BEGIN(pop) {
D_RW(*obj)->b = TEST_VALUE_C;
a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
a = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(a == TEST_VALUE_A);
b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(b == TEST_VALUE_B);
c = TEST_VALUE_C;
} TX_END
a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
UT_ASSERT(a == TEST_VALUE_A);
c = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(a == TEST_VALUE_A);
UT_ASSERT(b == TEST_VALUE_B);
UT_ASSERT(c == TEST_VALUE_C);
b = TEST_VALUE_A;
} TX_FINALLY {
UT_ASSERT(b == TEST_VALUE_A);
D_RW(*obj)->c = TEST_VALUE_C;
a = TEST_VALUE_B;
} TX_END
UT_ASSERT(a == TEST_VALUE_B);
}
static void
do_tx_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
D_RW(*obj)->a = TEST_VALUE_A;
TX_ADD(*obj);
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_A;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
D_RW(*obj)->b = TEST_VALUE_B;
TX_ADD(*obj);
D_RW(*obj)->a = 0;
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->a = 0;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->b = 0;
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
typedef void (*fn_op)(PMEMobjpool *pop, TOID(struct test_obj) *obj);
static fn_op tx_op[OPS_NUM] = {do_tx_macro_commit, do_tx_macro_abort,
do_tx_macro_commit_nested, do_tx_macro_abort_nested,
do_tx_commit, do_tx_commit_nested, do_tx_abort,
do_tx_abort_nested};
static void
do_tx_process(PMEMobjpool *pop)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
}
static void
do_tx_process_nested(PMEMobjpool *pop)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_flow");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct test_obj) obj;
POBJ_ZNEW(pop, &obj, struct test_obj);
for (int i = 0; i < OPS_NUM; i++) {
D_RW(obj)->a = 0;
D_RW(obj)->b = 0;
D_RW(obj)->c = 0;
tx_op[i](pop, &obj);
UT_ASSERT(D_RO(obj)->a == TEST_VALUE_A);
UT_ASSERT(D_RO(obj)->b == TEST_VALUE_B);
UT_ASSERT(D_RO(obj)->c == TEST_VALUE_C);
}
do_tx_process(pop);
do_tx_process_nested(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 8,006 | 26.515464 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/util_pool_hdr/util_pool_hdr.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util_pool_hdr.c -- unit test for pool_hdr layout and default values
*
* This test should be modified after every layout change. It's here to prevent
* any accidental layout changes.
*/
#include "util.h"
#include "unittest.h"
#include "pool_hdr.h"
#define POOL_HDR_SIG_LEN_V1 (8)
#define POOL_HDR_UNUSED_LEN_V1 (1904)
#define POOL_HDR_UNUSED2_LEN_V1 (1976)
#define POOL_HDR_2K_CHECKPOINT (2048UL)
#define FEATURES_T_SIZE_V1 (12)
#define ARCH_FLAGS_SIZE_V1 (16)
#define ARCH_FLAGS_RESERVED_LEN_V1 (4)
#define SHUTDOWN_STATE_SIZE_V1 (64)
#define SHUTDOWN_STATE_RESERVED_LEN_V1 (39)
/*
* test_layout -- test pool_hdr layout
*/
static void
test_layout()
{
ASSERT_ALIGNED_BEGIN(struct pool_hdr);
ASSERT_ALIGNED_FIELD(struct pool_hdr, signature);
ASSERT_FIELD_SIZE(signature, POOL_HDR_SIG_LEN_V1);
ASSERT_ALIGNED_FIELD(struct pool_hdr, major);
ASSERT_ALIGNED_FIELD(struct pool_hdr, features);
ASSERT_ALIGNED_FIELD(struct pool_hdr, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_part_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, next_part_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_repl_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, next_repl_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, crtime);
ASSERT_ALIGNED_FIELD(struct pool_hdr, arch_flags);
ASSERT_ALIGNED_FIELD(struct pool_hdr, unused);
ASSERT_FIELD_SIZE(unused, POOL_HDR_UNUSED_LEN_V1);
ASSERT_OFFSET_CHECKPOINT(struct pool_hdr, POOL_HDR_2K_CHECKPOINT);
ASSERT_ALIGNED_FIELD(struct pool_hdr, unused2);
ASSERT_FIELD_SIZE(unused2, POOL_HDR_UNUSED2_LEN_V1);
ASSERT_ALIGNED_FIELD(struct pool_hdr, sds);
ASSERT_ALIGNED_FIELD(struct pool_hdr, checksum);
ASSERT_ALIGNED_CHECK(struct pool_hdr);
ASSERT_ALIGNED_BEGIN(features_t);
ASSERT_ALIGNED_FIELD(features_t, compat);
ASSERT_ALIGNED_FIELD(features_t, incompat);
ASSERT_ALIGNED_FIELD(features_t, ro_compat);
ASSERT_ALIGNED_CHECK(features_t);
UT_COMPILE_ERROR_ON(sizeof(features_t) != FEATURES_T_SIZE_V1);
ASSERT_ALIGNED_BEGIN(struct arch_flags);
ASSERT_ALIGNED_FIELD(struct arch_flags, alignment_desc);
ASSERT_ALIGNED_FIELD(struct arch_flags, machine_class);
ASSERT_ALIGNED_FIELD(struct arch_flags, data);
ASSERT_ALIGNED_FIELD(struct arch_flags, reserved);
ASSERT_FIELD_SIZE(reserved, ARCH_FLAGS_RESERVED_LEN_V1);
ASSERT_ALIGNED_FIELD(struct arch_flags, machine);
ASSERT_ALIGNED_CHECK(struct arch_flags);
UT_COMPILE_ERROR_ON(sizeof(struct arch_flags) != ARCH_FLAGS_SIZE_V1);
ASSERT_ALIGNED_BEGIN(struct shutdown_state);
ASSERT_ALIGNED_FIELD(struct shutdown_state, usc);
ASSERT_ALIGNED_FIELD(struct shutdown_state, uuid);
ASSERT_ALIGNED_FIELD(struct shutdown_state, dirty);
ASSERT_ALIGNED_FIELD(struct shutdown_state, reserved);
ASSERT_FIELD_SIZE(reserved, SHUTDOWN_STATE_RESERVED_LEN_V1);
ASSERT_ALIGNED_FIELD(struct shutdown_state, checksum);
ASSERT_ALIGNED_CHECK(struct shutdown_state);
UT_COMPILE_ERROR_ON(sizeof(struct shutdown_state) !=
SHUTDOWN_STATE_SIZE_V1);
}
/* incompat features - final values */
#define POOL_FEAT_SINGLEHDR_FINAL 0x0001U
#define POOL_FEAT_CKSUM_2K_FINAL 0x0002U
#define POOL_FEAT_SDS_FINAL 0x0004U
/* incompat features effective values */
#ifdef _WIN32
#ifdef SDS_ENABLED
#define POOL_E_FEAT_SDS_FINAL POOL_FEAT_SDS_FINAL
#else
#define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */
#endif
#endif
#ifdef _WIN32
#define POOL_FEAT_INCOMPAT_DEFAULT_V1 \
(POOL_FEAT_CKSUM_2K_FINAL | POOL_E_FEAT_SDS_FINAL)
#else
/*
* shutdown state support on Linux requires root access
* so it is disabled by default
*/
#define POOL_FEAT_INCOMPAT_DEFAULT_V1 \
(POOL_FEAT_CKSUM_2K_FINAL)
#endif
/*
* test_default_values -- test default values
*/
static void
test_default_values()
{
UT_COMPILE_ERROR_ON(POOL_FEAT_SINGLEHDR != POOL_FEAT_SINGLEHDR_FINAL);
UT_COMPILE_ERROR_ON(POOL_FEAT_CKSUM_2K != POOL_FEAT_CKSUM_2K_FINAL);
UT_COMPILE_ERROR_ON(POOL_FEAT_SDS != POOL_FEAT_SDS_FINAL);
UT_COMPILE_ERROR_ON(POOL_FEAT_INCOMPAT_DEFAULT !=
POOL_FEAT_INCOMPAT_DEFAULT_V1);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_pool_hdr");
test_layout();
test_default_values();
DONE(NULL);
}
| 5,743 | 34.02439 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/util_map_proc/util_map_proc.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util_map_proc.c -- unit test for util_map() /proc parsing
*
* usage: util_map_proc maps_file len [len]...
*/
#define _GNU_SOURCE
#include <dlfcn.h>
#include "unittest.h"
#include "util.h"
#include "mmap.h"
#define GIGABYTE ((uintptr_t)1 << 30)
#define TERABYTE ((uintptr_t)1 << 40)
int
main(int argc, char *argv[])
{
START(argc, argv, "util_map_proc");
util_init();
util_mmap_init();
if (argc < 3)
UT_FATAL("usage: %s maps_file len [len]...", argv[0]);
Mmap_mapfile = argv[1];
UT_OUT("redirecting " OS_MAPFILE " to %s", Mmap_mapfile);
for (int arg = 2; arg < argc; arg++) {
size_t len = (size_t)strtoull(argv[arg], NULL, 0);
size_t align = 2 * MEGABYTE;
if (len >= 2 * GIGABYTE)
align = GIGABYTE;
void *h1 =
util_map_hint_unused((void *)TERABYTE, len, GIGABYTE);
void *h2 = util_map_hint(len, 0);
if (h1 != MAP_FAILED && h1 != NULL)
UT_ASSERTeq((uintptr_t)h1 & (GIGABYTE - 1), 0);
if (h2 != MAP_FAILED && h2 != NULL)
UT_ASSERTeq((uintptr_t)h2 & (align - 1), 0);
if (h1 == NULL) /* XXX portability */
UT_OUT("len %zu: (nil) %p", len, h2);
else if (h2 == NULL)
UT_OUT("len %zu: %p (nil)", len, h1);
else
UT_OUT("len %zu: %p %p", len, h1, h2);
}
util_mmap_fini();
DONE(NULL);
}
| 2,850 | 31.397727 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/win_lists/win_lists.c
|
/*
* Copyright 2015-2017, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_lists.c -- test list routines used in windows implementation
*/
#include "unittest.h"
#include "queue.h"
typedef struct TEST_LIST_NODE {
LIST_ENTRY(TEST_LIST_NODE) ListEntry;
int dummy;
} *PTEST_LIST_NODE;
LIST_HEAD(TestList, TEST_LIST_NODE);
static void
dump_list(struct TestList *head)
{
PTEST_LIST_NODE pNode = NULL;
pNode = (PTEST_LIST_NODE)LIST_FIRST(head);
while (pNode != NULL) {
UT_OUT("Node value: %d", pNode->dummy);
pNode = (PTEST_LIST_NODE)LIST_NEXT(pNode, ListEntry);
}
}
static int
get_list_count(struct TestList *head)
{
PTEST_LIST_NODE pNode = NULL;
int listCount = 0;
pNode = (PTEST_LIST_NODE)LIST_FIRST(head);
while (pNode != NULL) {
listCount++;
pNode = (PTEST_LIST_NODE)LIST_NEXT(pNode, ListEntry);
}
return listCount;
}
/*
* test_list - Do some basic list manipulations and output to log for
* script comparison. Only testing the macros we use.
*/
static void
test_list(void)
{
PTEST_LIST_NODE pNode = NULL;
struct TestList head = LIST_HEAD_INITIALIZER(head);
LIST_INIT(&head);
UT_ASSERT_rt(LIST_EMPTY(&head));
pNode = MALLOC(sizeof(struct TEST_LIST_NODE));
pNode->dummy = 0;
LIST_INSERT_HEAD(&head, pNode, ListEntry);
UT_ASSERTeq_rt(1, get_list_count(&head));
dump_list(&head);
/* Remove one node */
LIST_REMOVE(pNode, ListEntry);
UT_ASSERTeq_rt(0, get_list_count(&head));
dump_list(&head);
free(pNode);
/* Add a bunch of nodes */
for (int i = 1; i < 10; i++) {
pNode = MALLOC(sizeof(struct TEST_LIST_NODE));
pNode->dummy = i;
LIST_INSERT_HEAD(&head, pNode, ListEntry);
}
UT_ASSERTeq_rt(9, get_list_count(&head));
dump_list(&head);
/* Remove all of them */
while (!LIST_EMPTY(&head)) {
pNode = (PTEST_LIST_NODE)LIST_FIRST(&head);
LIST_REMOVE(pNode, ListEntry);
free(pNode);
}
UT_ASSERTeq_rt(0, get_list_count(&head));
dump_list(&head);
}
typedef struct TEST_SORTEDQ_NODE {
SORTEDQ_ENTRY(TEST_SORTEDQ_NODE) queue_link;
int dummy;
} TEST_SORTEDQ_NODE, *PTEST_SORTEDQ_NODE;
SORTEDQ_HEAD(TEST_SORTEDQ, TEST_SORTEDQ_NODE);
static int
sortedq_node_comparer(TEST_SORTEDQ_NODE *a, TEST_SORTEDQ_NODE *b)
{
return a->dummy - b->dummy;
}
struct TEST_DATA_SORTEDQ {
int count;
int data[10];
};
/*
* test_sortedq - Do some basic operations on SORTEDQ and make sure that the
* queue is sorted for different input sequences.
*/
void
test_sortedq(void)
{
PTEST_SORTEDQ_NODE node = NULL;
struct TEST_SORTEDQ head = SORTEDQ_HEAD_INITIALIZER(head);
struct TEST_DATA_SORTEDQ test_data[] = {
{5, {5, 7, 9, 100, 101}},
{7, {1, 2, 3, 4, 5, 6, 7}},
{5, {100, 90, 80, 70, 40}},
{6, {10, 9, 8, 7, 6, 5}},
{5, {23, 13, 27, 4, 15}},
{5, {2, 2, 2, 2, 2}}
};
SORTEDQ_INIT(&head);
UT_ASSERT_rt(SORTEDQ_EMPTY(&head));
for (int i = 0; i < _countof(test_data); i++) {
for (int j = 0; j < test_data[i].count; j++) {
node = MALLOC(sizeof(TEST_SORTEDQ_NODE));
node->dummy = test_data[i].data[j];
SORTEDQ_INSERT(&head, node, queue_link,
TEST_SORTEDQ_NODE, sortedq_node_comparer);
}
int prev = MININT;
int num_entries = 0;
SORTEDQ_FOREACH(node, &head, queue_link) {
UT_ASSERT(prev <= node->dummy);
num_entries++;
}
UT_ASSERT(num_entries == test_data[i].count);
while (!SORTEDQ_EMPTY(&head)) {
node = SORTEDQ_FIRST(&head);
SORTEDQ_REMOVE(&head, node, queue_link);
FREE(node);
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "win_lists - testing %s",
(argc > 1) ? argv[1] : "list");
if (argc == 1 || (stricmp(argv[1], "list") == 0))
test_list();
if (argc > 1 && (stricmp(argv[1], "sortedq") == 0))
test_sortedq();
DONE(NULL);
}
| 5,267 | 26.295337 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/cto_pool_win/cto_pool_win.c
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cto_pool.c -- unit test for pmemcto_create() and pmemcto_open()
*
* usage: cto_pool op path layout [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const wchar_t *path, const wchar_t *layout, size_t poolsize,
unsigned mode)
{
char *upath = ut_toUTF8(path);
PMEMctopool *pop = pmemcto_createW(path, layout, poolsize, mode);
if (pop == NULL)
UT_OUT("!%s: pmemcto_create", upath);
else {
os_stat_t stbuf;
STATW(path, &stbuf);
UT_OUT("%s: file size %zu mode 0%o",
upath, stbuf.st_size,
stbuf.st_mode & 0777);
pmemcto_close(pop);
int result = pmemcto_checkW(path, layout);
if (result < 0)
UT_OUT("!%s: pmemcto_check", upath);
else if (result == 0)
UT_OUT("%s: pmemcto_check: not consistent", upath);
}
free(upath);
}
static void
pool_open(const wchar_t *path, const wchar_t *layout)
{
char *upath = ut_toUTF8(path);
PMEMctopool *pop = pmemcto_openW(path, layout);
if (pop == NULL) {
UT_OUT("!%s: pmemcto_open", upath);
} else {
UT_OUT("%s: pmemcto_open: Success", upath);
pmemcto_close(pop);
}
free(upath);
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "cto_pool_win");
if (argc < 4)
UT_FATAL("usage: %s op path layout [poolsize mode]",
ut_toUTF8(argv[0]));
wchar_t *layout = NULL;
size_t poolsize;
unsigned mode;
if (wcscmp(argv[3], L"EMPTY") == 0)
layout = L"";
else if (wcscmp(argv[3], L"NULL") != 0)
layout = argv[3];
switch (argv[1][0]) {
case 'c':
poolsize = wcstoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = wcstoul(argv[5], NULL, 8);
pool_create(argv[2], layout, poolsize, mode);
break;
case 'o':
pool_open(argv[2], layout);
break;
default:
UT_FATAL("unknown operation");
}
DONEW(NULL);
}
| 3,475 | 26.15625 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_pool/obj_pool.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pool.c -- unit test for pmemobj_create() and pmemobj_open()
*
* usage: obj_pool op path layout [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const char *path, const char *layout, size_t poolsize,
unsigned mode)
{
PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode);
if (pop == NULL)
UT_OUT("!%s: pmemobj_create: %s", path, pmemobj_errormsg());
else {
os_stat_t stbuf;
STAT(path, &stbuf);
UT_OUT("%s: file size %zu mode 0%o",
path, stbuf.st_size,
stbuf.st_mode & 0777);
pmemobj_close(pop);
int result = pmemobj_check(path, layout);
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
}
}
static void
pool_open(const char *path, const char *layout)
{
PMEMobjpool *pop = pmemobj_open(path, layout);
if (pop == NULL)
UT_OUT("!%s: pmemobj_open: %s", path, pmemobj_errormsg());
else {
UT_OUT("%s: pmemobj_open: Success", path);
pmemobj_close(pop);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pool");
if (argc < 4)
UT_FATAL("usage: %s op path layout [poolsize mode]", argv[0]);
char *layout = NULL;
size_t poolsize;
unsigned mode;
if (strcmp(argv[3], "EMPTY") == 0)
layout = "";
else if (strcmp(argv[3], "NULL") != 0)
layout = argv[3];
switch (argv[1][0]) {
case 'c':
poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
pool_create(argv[2], layout, poolsize, mode);
break;
case 'o':
pool_open(argv[2], layout);
break;
case 'f':
os_setenv("PMEMOBJ_CONF", "invalid-query", 1);
pool_open(argv[2], layout);
os_unsetenv("PMEMOBJ_CONF");
pool_open(argv[2], layout);
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 3,540 | 26.88189 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/vmem_create/vmem_create.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem_create.c -- unit test for vmem_create
*
* usage: vmem_create directory
*/
#include "unittest.h"
static VMEM *Vmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
UT_OUT("signal: %s", os_strsignal(sig));
vmem_delete(Vmp);
DONEW(NULL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "vmem_create");
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s directory", argv[0]);
Vmp = vmem_create(argv[1], VMEM_MIN_POOL);
if (Vmp == NULL) {
UT_OUT("!vmem_create");
} else {
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
if (SIGACTION(SIGSEGV, &v, NULL) != 0)
UT_FATAL("!sigaction");
/* try to dereference the opaque handle */
char x = *(char *)Vmp;
UT_OUT("x = %c", x);
}
UT_FATAL("no signal received");
}
| 2,442 | 28.433735 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/sync-remotes/copy-to-remote-nodes.sh
|
#!/usr/bin/env bash
#
# Copyright 2016-2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# copy-to-remote-nodes.sh -- helper script used to sync remote nodes
#
set -e
if [ ! -f ../testconfig.sh ]; then
echo "SKIP: testconfig.sh does not exist"
exit 0
fi
# defined only to be able to source unittest.sh
UNITTEST_NAME=sync-remotes
UNITTEST_NUM=0
# Override default FS (any).
# This is not a real test, so it should not depend on whether
# PMEM_FS_DIR/NON_PMEM_FS_DIR are set.
FS=none
. ../unittest/unittest.sh
COPY_TYPE=$1
shift
case "$COPY_TYPE" in
common)
copy_common_to_remote_nodes $* > /dev/null
exit 0
;;
test)
copy_test_to_remote_nodes $* > /dev/null
exit 0
;;
esac
echo "Error: unknown copy type: $COPY_TYPE"
exit 1
| 2,276 | 31.070423 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_root/obj_root.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_root.c -- unit tests for pmemobj_root
*/
#include "unittest.h"
#define FILE_SIZE ((size_t)0x440000000) /* 17 GB */
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_root");
if (argc < 2)
UT_FATAL("usage: obj_root <file> [l]");
const char *path = argv[1];
PMEMobjpool *pop = NULL;
os_stat_t st;
int long_test = 0;
if (argc >= 3 && argv[2][0] == 'l')
long_test = 1;
os_stat(path, &st);
UT_ASSERTeq(st.st_size, FILE_SIZE);
if ((pop = pmemobj_create(path, NULL, 0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
errno = 0;
PMEMoid oid = pmemobj_root(pop, 0);
UT_ASSERT(OID_EQUALS(oid, OID_NULL));
UT_ASSERTeq(errno, EINVAL);
if (long_test) {
oid = pmemobj_root(pop, PMEMOBJ_MAX_ALLOC_SIZE);
UT_ASSERT(!OID_EQUALS(oid, OID_NULL));
}
oid = pmemobj_root(pop, 1);
UT_ASSERT(!OID_EQUALS(oid, OID_NULL));
oid = pmemobj_root(pop, 0);
UT_ASSERT(!OID_EQUALS(oid, OID_NULL));
errno = 0;
oid = pmemobj_root(pop, FILE_SIZE);
UT_ASSERT(OID_EQUALS(oid, OID_NULL));
UT_ASSERTeq(errno, ENOMEM);
errno = 0;
oid = pmemobj_root(pop, SIZE_MAX);
UT_ASSERT(OID_EQUALS(oid, OID_NULL));
UT_ASSERTeq(errno, ENOMEM);
pmemobj_close(pop);
DONE(NULL);
}
| 2,813 | 29.586957 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_pmalloc_basic/obj_pmalloc_basic.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmalloc_basic.c -- unit test for pmalloc interface
*/
#include <stdint.h>
#include "heap.h"
#include "obj.h"
#include "pmalloc.h"
#include "unittest.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE (PMEMOBJ_MIN_POOL * 3)
#define TEST_MEGA_ALLOC_SIZE (10 * 1024 * 1024)
#define TEST_HUGE_ALLOC_SIZE (4 * 255 * 1024)
#define TEST_SMALL_ALLOC_SIZE (1000)
#define TEST_MEDIUM_ALLOC_SIZE (1024 * 200)
#define TEST_TINY_ALLOC_SIZE (64)
#define TEST_RUNS 2
#define MAX_MALLOC_FREE_LOOP 1000
#define MALLOC_FREE_SIZE 8000
struct mock_pop {
PMEMobjpool p;
char lanes[LANE_TOTAL_SIZE];
char padding[1024]; /* to page boundary */
uint64_t ptr;
};
static struct mock_pop *addr;
static PMEMobjpool *mock_pop;
/*
* drain_empty -- (internal) empty function for drain on non-pmem memory
*/
static void
drain_empty(void)
{
/* do nothing */
}
/*
* obj_persist -- pmemobj version of pmem_persist w/o replication
*/
static int
obj_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
pop->persist_local(addr, len);
return 0;
}
/*
* obj_flush -- pmemobj version of pmem_flush w/o replication
*/
static int
obj_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
pop->flush_local(addr, len);
return 0;
}
/*
* obj_drain -- pmemobj version of pmem_drain w/o replication
*/
static void
obj_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
pop->drain_local();
}
static void
obj_msync_nofail(const void *addr, size_t size)
{
if (pmem_msync(addr, size))
UT_FATAL("!pmem_msync");
}
/*
* obj_memcpy -- pmemobj version of memcpy w/o replication
*/
static void *
obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
pmem_memcpy(dest, src, len, flags);
return dest;
}
static void *
obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
pmem_memset(ptr, c, sz, flags);
return ptr;
}
static size_t
test_oom_allocs(size_t size)
{
uint64_t max_allocs = MOCK_POOL_SIZE / size;
uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs));
size_t count = 0;
for (;;) {
if (pmalloc(mock_pop, &addr->ptr, size, 0, 0)) {
break;
}
UT_ASSERT(addr->ptr != 0);
allocs[count++] = addr->ptr;
}
for (int i = 0; i < count; ++i) {
addr->ptr = allocs[i];
pfree(mock_pop, &addr->ptr);
UT_ASSERT(addr->ptr == 0);
}
UT_ASSERT(count != 0);
FREE(allocs);
return count;
}
static size_t
test_oom_resrv(size_t size)
{
uint64_t max_allocs = MOCK_POOL_SIZE / size;
uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs));
struct pobj_action *resvs = CALLOC(max_allocs, sizeof(*resvs));
size_t count = 0;
for (;;) {
if (palloc_reserve(&mock_pop->heap, size, NULL, NULL, 0, 0, 0,
&resvs[count]) != 0)
break;
allocs[count] = resvs[count].heap.offset;
UT_ASSERT(allocs[count] != 0);
count++;
}
for (size_t i = 0; i < count; ) {
size_t nresv = MIN(count - i, 10);
struct operation_context *ctx =
pmalloc_operation_hold(mock_pop);
palloc_publish(&mock_pop->heap, &resvs[i], nresv, ctx);
pmalloc_operation_release(mock_pop);
i += nresv;
}
for (int i = 0; i < count; ++i) {
addr->ptr = allocs[i];
pfree(mock_pop, &addr->ptr);
UT_ASSERT(addr->ptr == 0);
}
UT_ASSERT(count != 0);
FREE(allocs);
FREE(resvs);
return count;
}
static void
test_malloc_free_loop(size_t size)
{
int err;
for (int i = 0; i < MAX_MALLOC_FREE_LOOP; ++i) {
err = pmalloc(mock_pop, &addr->ptr, size, 0, 0);
UT_ASSERTeq(err, 0);
pfree(mock_pop, &addr->ptr);
}
}
static void
test_realloc(size_t org, size_t dest)
{
int err;
struct palloc_heap *heap = &mock_pop->heap;
err = pmalloc(mock_pop, &addr->ptr, org, 0, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= org);
err = prealloc(mock_pop, &addr->ptr, dest, 0, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= dest);
pfree(mock_pop, &addr->ptr);
}
#define PMALLOC_EXTRA 20
#define PALLOC_FLAG (1 << 15)
#define FIRST_SIZE 1 /* use the first allocation class */
#define FIRST_USIZE 112 /* the usable size is 128 - 16 */
static void
test_pmalloc_extras(PMEMobjpool *pop)
{
uint64_t val;
int ret = pmalloc(pop, &val, FIRST_SIZE, PMALLOC_EXTRA, PALLOC_FLAG);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(palloc_extra(&pop->heap, val), PMALLOC_EXTRA);
UT_ASSERT((palloc_flags(&pop->heap, val) & PALLOC_FLAG) == PALLOC_FLAG);
UT_ASSERT(palloc_usable_size(&pop->heap, val) == FIRST_USIZE);
pfree(pop, &val);
}
#define PMALLOC_ELEMENTS 20
static void
test_pmalloc_first_next(PMEMobjpool *pop)
{
uint64_t vals[PMALLOC_ELEMENTS];
for (unsigned i = 0; i < PMALLOC_ELEMENTS; ++i) {
int ret = pmalloc(pop, &vals[i], FIRST_SIZE, i, i);
UT_ASSERTeq(ret, 0);
}
uint64_t off = palloc_first(&pop->heap);
UT_ASSERTne(off, 0);
int nvalues = 0;
do {
UT_ASSERTeq(vals[nvalues], off);
UT_ASSERTeq(palloc_extra(&pop->heap, off), nvalues);
UT_ASSERTeq(palloc_flags(&pop->heap, off), nvalues);
UT_ASSERT(palloc_usable_size(&pop->heap, off) == FIRST_USIZE);
nvalues ++;
} while ((off = palloc_next(&pop->heap, off)) != 0);
UT_ASSERTeq(nvalues, PMALLOC_ELEMENTS);
for (int i = 0; i < PMALLOC_ELEMENTS; ++i)
pfree(pop, &vals[i]);
}
static void
test_mock_pool_allocs(void)
{
addr = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align);
mock_pop = &addr->p;
mock_pop->addr = addr;
mock_pop->rdonly = 0;
mock_pop->is_pmem = 0;
mock_pop->heap_offset = offsetof(struct mock_pop, ptr);
UT_ASSERTeq(mock_pop->heap_offset % Ut_pagesize, 0);
mock_pop->nlanes = 1;
mock_pop->lanes_offset = sizeof(PMEMobjpool);
mock_pop->is_master_replica = 1;
mock_pop->persist_local = obj_msync_nofail;
mock_pop->flush_local = obj_msync_nofail;
mock_pop->drain_local = drain_empty;
mock_pop->p_ops.persist = obj_persist;
mock_pop->p_ops.flush = obj_flush;
mock_pop->p_ops.drain = obj_drain;
mock_pop->p_ops.memcpy = obj_memcpy;
mock_pop->p_ops.memset = obj_memset;
mock_pop->p_ops.base = mock_pop;
mock_pop->set = MALLOC(sizeof(*(mock_pop->set)));
mock_pop->set->options = 0;
mock_pop->set->directory_based = 0;
void *heap_start = (char *)mock_pop + mock_pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - mock_pop->heap_offset;
struct stats *s = stats_new(mock_pop);
UT_ASSERTne(s, NULL);
heap_init(heap_start, heap_size, &mock_pop->heap_size,
&mock_pop->p_ops);
heap_boot(&mock_pop->heap, heap_start, heap_size, &mock_pop->heap_size,
mock_pop, &mock_pop->p_ops, s, mock_pop->set);
heap_buckets_init(&mock_pop->heap);
/* initialize runtime lanes structure */
mock_pop->lanes_desc.runtime_nlanes = (unsigned)mock_pop->nlanes;
lane_boot(mock_pop);
UT_ASSERTne(mock_pop->heap.rt, NULL);
test_pmalloc_extras(mock_pop);
test_pmalloc_first_next(mock_pop);
test_malloc_free_loop(MALLOC_FREE_SIZE);
size_t medium_resv = test_oom_resrv(TEST_MEDIUM_ALLOC_SIZE);
/*
* Allocating till OOM and freeing the objects in a loop for different
* buckets covers basically all code paths except error cases.
*/
size_t medium0 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE);
size_t mega0 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE);
size_t huge0 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE);
size_t small0 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE);
size_t tiny0 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
size_t huge1 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE);
size_t small1 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE);
size_t mega1 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE);
size_t tiny1 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
size_t medium1 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE);
UT_ASSERTeq(mega0, mega1);
UT_ASSERTeq(huge0, huge1);
UT_ASSERTeq(small0, small1);
UT_ASSERTeq(tiny0, tiny1);
UT_ASSERTeq(medium0, medium1);
UT_ASSERTeq(medium0, medium_resv);
/* realloc to the same size shouldn't affect anything */
for (size_t i = 0; i < tiny1; ++i)
test_realloc(TEST_TINY_ALLOC_SIZE, TEST_TINY_ALLOC_SIZE);
size_t tiny2 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
UT_ASSERTeq(tiny1, tiny2);
test_realloc(TEST_SMALL_ALLOC_SIZE, TEST_MEDIUM_ALLOC_SIZE);
test_realloc(TEST_HUGE_ALLOC_SIZE, TEST_MEGA_ALLOC_SIZE);
stats_delete(mock_pop, s);
lane_cleanup(mock_pop);
heap_cleanup(&mock_pop->heap);
FREE(mock_pop->set);
MUNMAP_ANON_ALIGNED(addr, MOCK_POOL_SIZE);
}
static void
test_spec_compliance(void)
{
uint64_t max_alloc = MAX_MEMORY_BLOCK_SIZE -
sizeof(struct allocation_header_legacy);
UT_ASSERTeq(max_alloc, PMEMOBJ_MAX_ALLOC_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_basic");
for (int i = 0; i < TEST_RUNS; ++i)
test_mock_pool_allocs();
test_spec_compliance();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 10,417 | 25.110276 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/vmem_check/vmem_check.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem_check.c -- unit test for vmem_check
*
* usage: vmem_check [directory]
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
char *dir = NULL;
void *mem_pool = NULL;
VMEM *vmp;
START(argc, argv, "vmem_check");
if (argc == 2) {
dir = argv[1];
} else if (argc > 2) {
UT_FATAL("usage: %s [directory]", argv[0]);
}
if (dir == NULL) {
/* allocate memory for function vmem_create_in_region() */
mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL * 2, 4 << 20);
vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
if (vmp == NULL)
UT_FATAL("!vmem_create_in_region");
} else {
vmp = vmem_create(dir, VMEM_MIN_POOL);
if (vmp == NULL)
UT_FATAL("!vmem_create");
}
UT_ASSERTeq(1, vmem_check(vmp));
/* create pool in this same memory region */
if (dir == NULL) {
void *mem_pool2 = (void *)(((uintptr_t)mem_pool +
VMEM_MIN_POOL / 2) & ~(Ut_mmap_align - 1));
VMEM *vmp2 = vmem_create_in_region(mem_pool2,
VMEM_MIN_POOL);
if (vmp2 == NULL)
UT_FATAL("!vmem_create_in_region");
/* detect memory range collision */
UT_ASSERTne(1, vmem_check(vmp));
UT_ASSERTne(1, vmem_check(vmp2));
vmem_delete(vmp2);
UT_ASSERTne(1, vmem_check(vmp2));
}
vmem_delete(vmp);
/* for vmem_create() memory unmapped after delete pool */
if (!dir)
UT_ASSERTne(1, vmem_check(vmp));
DONE(NULL);
}
| 2,949 | 28.79798 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/pmempool_check/config.sh
|
#!/usr/bin/env bash
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# pmempool_check/config.sh -- test configuration
#
# Extend timeout for TEST5, as it may take a few minutes
# when run on a non-pmem file system.
CONF_TIMEOUT[5]='10m'
| 1,757 | 40.857143 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/pmempool_check/common.sh
|
#!/usr/bin/env bash
#
# Copyright 2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# pmempool_check/common.sh -- checking pools helpers
#
LOG=out${UNITTEST_NUM}.log
rm -f $LOG && touch $LOG
LAYOUT=OBJ_LAYOUT$SUFFIX
POOLSET=$DIR/poolset
# pmemspoil_corrupt_replica_sds -- corrupt shutdown state
#
# usage: pmemspoil_corrupt_replica_sds <replica>
function pmemspoil_corrupt_replica_sds() {
local replica=$1
expect_normal_exit $PMEMSPOIL --replica $replica $POOLSET \
pool_hdr.shutdown_state.usc=999 \
pool_hdr.shutdown_state.dirty=1 \
"pool_hdr.shutdown_state.checksum_gen\(\)"
}
# pmempool_check_sds_init -- shutdown state unittest init
#
# usage: pmempool_check_sds [enable-sds]
function pmempool_check_sds_init() {
# initialize poolset
create_poolset $POOLSET \
8M:$DIR/part00:x \
r 8M:$DIR/part10:x
expect_normal_exit $PMEMPOOL$EXESUFFIX create --layout=$LAYOUT obj $POOLSET
# enable SHUTDOWN_STATE feature
if [ "x$1" == "xenable-sds" ]; then
expect_normal_exit $PMEMPOOL$EXESUFFIX feature \
--enable "SHUTDOWN_STATE" $POOLSET
fi
}
# pmempool_check_sds -- perform shutdown state unittest
#
# usage: pmempool_check_sds <scenario>
function pmempool_check_sds() {
# corrupt poolset replicas
pmemspoil_corrupt_replica_sds 0
pmemspoil_corrupt_replica_sds 1
# verify it is corrupted
expect_abnormal_exit $PMEMPOOL$EXESUFFIX check $POOLSET >> $LOG
exit_func=expect_normal_exit
# perform fixes
case "$1" in
fix_second_replica_only)
echo -e "n\ny\n" | expect_normal_exit $PMEMPOOL$EXESUFFIX check -vr $POOLSET >> $LOG
;;
fix_first_replica)
echo -e "y\n" | expect_normal_exit $PMEMPOOL$EXESUFFIX check -vr $POOLSET >> $LOG
;;
fix_no_replicas)
echo -e "n\nn\n" | expect_abnormal_exit $PMEMPOOL$EXESUFFIX check -vr $POOLSET >> $LOG
exit_func=expect_abnormal_exit
;;
*)
fatal "unittest_sds: undefined scenario '$1'"
;;
esac
#verify result
$exit_func $PMEMPOOL$EXESUFFIX check $POOLSET >> $LOG
}
| 3,452 | 32.524272 | 88 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/blk_pool_win/blk_pool_win.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_pool_win.c -- unit test for pmemblk_create() and pmemblk_open()
*
* usage: blk_pool_win op path bsize [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const wchar_t *path, size_t bsize, size_t poolsize, unsigned mode)
{
char *upath = ut_toUTF8(path);
UT_ASSERTne(upath, NULL);
PMEMblkpool *pbp = pmemblk_createW(path, bsize, poolsize, mode);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_create", upath);
else {
os_stat_t stbuf;
STATW(path, &stbuf);
UT_OUT("%s: file size %zu usable blocks %zu mode 0%o",
upath, stbuf.st_size,
pmemblk_nblock(pbp),
stbuf.st_mode & 0777);
pmemblk_close(pbp);
int result = pmemblk_checkW(path, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", upath);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", upath);
else
UT_ASSERTeq(pmemblk_checkW(path, bsize * 2), -1);
free(upath);
}
}
static void
pool_open(const wchar_t *path, size_t bsize)
{
char *upath = ut_toUTF8(path);
UT_ASSERTne(upath, NULL);
PMEMblkpool *pbp = pmemblk_openW(path, bsize);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_open", upath);
else {
UT_OUT("%s: pmemblk_open: Success", upath);
pmemblk_close(pbp);
}
free(upath);
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "blk_pool_win");
if (argc < 4)
UT_FATAL("usage: %s op path bsize [poolsize mode]",
ut_toUTF8(argv[0]));
size_t bsize = wcstoul(argv[3], NULL, 0);
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = wcstoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = wcstoul(argv[5], NULL, 8);
pool_create(argv[2], bsize, poolsize, mode);
break;
case 'o':
pool_open(argv[2], bsize);
break;
default:
UT_FATAL("unknown operation");
}
DONEW(NULL);
}
| 3,522 | 26.310078 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/win_common/win_common.c
|
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_common.c -- test common POSIX or Linux API that were implemented
* for Windows by our library.
*/
#include "unittest.h"
/*
* test_setunsetenv - test the setenv and unsetenv APIs
*/
static void
test_setunsetenv(void)
{
os_unsetenv("TEST_SETUNSETENV_ONE");
/* set a new variable without overwriting - expect the new value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_one", 0) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_one") == 0);
/* set an existing variable without overwriting - expect old value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_two", 0) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_one") == 0);
/* set an existing variable with overwriting - expect the new value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_two", 1) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_two") == 0);
/* unset our test value - expect it to be empty */
UT_ASSERT(os_unsetenv("TEST_SETUNSETENV_ONE") == 0);
UT_ASSERT(os_getenv("TEST_SETUNSETENV_ONE") == NULL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "win_common - testing %s",
(argc > 1) ? argv[1] : "setunsetenv");
if (argc == 1 || (stricmp(argv[1], "setunsetenv") == 0))
test_setunsetenv();
DONE(NULL);
}
| 3,036 | 35.590361 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_realloc/obj_realloc.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_realloc.c -- unit test for pmemobj_realloc and pmemobj_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "heap.h"
#include "alloc_class.h"
#include "obj.h"
#include "util.h"
#define MAX_ALLOC_MUL 8
#define MAX_ALLOC_CLASS 5
POBJ_LAYOUT_BEGIN(realloc);
POBJ_LAYOUT_ROOT(realloc, struct root);
POBJ_LAYOUT_TOID(realloc, struct object);
POBJ_LAYOUT_END(realloc);
struct object {
size_t value;
char data[];
};
struct root {
TOID(struct object) obj;
char data[CHUNKSIZE - sizeof(TOID(struct object))];
};
static struct alloc_class_collection *alloc_classes;
/*
* test_alloc -- test allocation using realloc
*/
static void
test_alloc(PMEMobjpool *pop, size_t size)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size,
TOID_TYPE_NUM(struct object));
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
UT_ASSERT(pmemobj_alloc_usable_size(D_RO(root)->obj.oid) >= size);
}
/*
* test_free -- test free using realloc
*/
static void
test_free(PMEMobjpool *pop)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, 0,
TOID_TYPE_NUM(struct object));
UT_ASSERTeq(ret, 0);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
static int check_integrity = 1;
/*
* fill_buffer -- fill buffer with random data and return its checksum
*/
static uint16_t
fill_buffer(unsigned char *buf, size_t size)
{
for (size_t i = 0; i < size; ++i)
buf[i] = rand() % 255;
pmem_persist(buf, size);
return ut_checksum(buf, size);
}
/*
* test_realloc -- test single reallocation
*/
static void
test_realloc(PMEMobjpool *pop, size_t size_from, size_t size_to,
unsigned type_from, unsigned type_to, int zrealloc)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret;
if (zrealloc)
ret = pmemobj_zalloc(pop, &D_RW(root)->obj.oid,
size_from, type_from);
else
ret = pmemobj_alloc(pop, &D_RW(root)->obj.oid,
size_from, type_from, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
size_t usable_size_from =
pmemobj_alloc_usable_size(D_RO(root)->obj.oid);
UT_ASSERT(usable_size_from >= size_from);
size_t check_size;
uint16_t checksum;
if (zrealloc) {
UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj),
size_from));
} else if (check_integrity) {
check_size = size_to >= usable_size_from ?
usable_size_from : size_to;
checksum = fill_buffer((unsigned char *)D_RW(D_RW(root)->obj),
check_size);
}
if (zrealloc) {
ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid,
size_to, type_to);
} else {
ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid,
size_to, type_to);
}
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
size_t usable_size_to =
pmemobj_alloc_usable_size(D_RO(root)->obj.oid);
UT_ASSERT(usable_size_to >= size_to);
if (size_to < size_from) {
UT_ASSERT(usable_size_to <= usable_size_from);
}
if (zrealloc) {
UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_to));
} else if (check_integrity) {
uint16_t checksum2 = ut_checksum(
(uint8_t *)D_RW(D_RW(root)->obj), check_size);
if (checksum2 != checksum)
UT_ASSERTinfo(0, "memory corruption");
}
pmemobj_free(&D_RW(root)->obj.oid);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
/*
* test_realloc_sizes -- test reallocations from/to specified sizes
*/
static void
test_realloc_sizes(PMEMobjpool *pop, unsigned type_from,
unsigned type_to, int zrealloc, unsigned size_diff)
{
for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = alloc_class_by_id(alloc_classes, i);
if (c == NULL)
continue;
size_t header_size = header_type_to_size[c->header_type];
size_t size_from = c->unit_size - header_size - size_diff;
for (unsigned j = 2; j <= MAX_ALLOC_MUL; j++) {
size_t inc_size_to = c->unit_size * j - header_size;
test_realloc(pop, size_from, inc_size_to,
type_from, type_to, zrealloc);
size_t dec_size_to = c->unit_size / j;
if (dec_size_to <= header_size)
dec_size_to = header_size;
else
dec_size_to -= header_size;
test_realloc(pop, size_from, dec_size_to,
type_from, type_to, zrealloc);
for (int k = 0; k < MAX_ALLOC_CLASS; k++) {
struct alloc_class *ck = alloc_class_by_id(
alloc_classes, k);
if (c == NULL)
continue;
size_t header_sizek =
header_type_to_size[c->header_type];
size_t prev_size = ck->unit_size - header_sizek;
test_realloc(pop, size_from, prev_size,
type_from, type_to, zrealloc);
}
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_realloc");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(realloc) != 1);
if (argc < 2)
UT_FATAL("usage: %s file [check_integrity]", argv[0]);
PMEMobjpool *pop = pmemobj_open(argv[1], POBJ_LAYOUT_NAME(realloc));
if (!pop)
UT_FATAL("!pmemobj_open");
if (argc >= 3)
check_integrity = atoi(argv[2]);
alloc_classes = alloc_class_collection_new();
/* test alloc and free */
test_alloc(pop, 16);
test_free(pop);
/* test realloc without changing type number */
test_realloc_sizes(pop, 0, 0, 0, 0);
/* test realloc with changing type number */
test_realloc_sizes(pop, 0, 1, 0, 0);
/* test zrealloc without changing type number... */
test_realloc_sizes(pop, 0, 0, 1, 8);
test_realloc_sizes(pop, 0, 0, 1, 0);
/* test zrealloc with changing type number... */
test_realloc_sizes(pop, 0, 1, 1, 8);
test_realloc_sizes(pop, 0, 1, 1, 0);
alloc_class_collection_delete(alloc_classes);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
extern "C" {
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
}
#endif
| 7,547 | 26.249097 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/pmem_deep_persist/pmem_deep_persist.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_deep_persist.c -- unit test for pmem_deep_persist()
*
* usage: pmem_deep_persist file type deep_persist_size offset
*
* type is one of:
* p - call pmem_map_file()
* m - call mmap()
* o - call pmemobj_create()
*/
#include <string.h>
#include "unittest.h"
#include "file.h"
#include "os.h"
#include "file.h"
#include "set.h"
#include "obj.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "deep_persist"
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_deep_persist");
if (argc != 5)
UT_FATAL("usage: %s file type deep_persist_size offset",
argv[0]);
char *addr;
size_t mapped_len;
size_t persist_size;
size_t offset;
const char *path;
int is_pmem;
int ret = -1;
path = argv[1];
persist_size = (size_t)atoi(argv[3]);
offset = (size_t)atoi(argv[4]);
switch (*argv[2]) {
case 'p':
if ((addr = pmem_map_file(path, 0, 0,
0, &mapped_len, &is_pmem)) == NULL) {
UT_FATAL("!pmem_map_file");
}
if (persist_size == -1)
persist_size = mapped_len;
ret = pmem_deep_persist(addr + offset, persist_size);
break;
case 'm':
{
int fd = OPEN(path, O_RDWR);
ssize_t size = util_file_get_size(path);
if (size < 0)
UT_FATAL("!util_file_get_size: %s", path);
size_t file_size = (size_t)size;
/* XXX: add MAP_SYNC flag */
addr = MMAP(NULL, file_size, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
UT_ASSERTne(addr, MAP_FAILED);
CLOSE(fd);
if (persist_size == -1)
persist_size = file_size;
ret = pmem_deep_persist(addr + offset, persist_size);
break;
}
case 'o':
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
void *start = (void *)((uintptr_t)pop + offset);
int flush = 1;
VALGRIND_DO_MAKE_MEM_DEFINED(start, persist_size);
ret = util_replica_deep_common(start, persist_size,
pop->set, 0, flush);
pmemobj_close(pop);
}
}
UT_OUT("deep_persist %d", ret);
DONE(NULL);
}
/*
* open -- open mock because of Dev DAX without deep_flush
* sysfs file, eg. DAX on emulated pmem
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strstr(path, "/sys/bus/nd/devices/region") &&
strstr(path, "/deep_flush")) {
UT_OUT("mocked open, path %s", path);
if (access(path, R_OK))
return 999;
}
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* write -- write mock
*/
FUNC_MOCK(write, int, int fd, const void *buffer, size_t count)
FUNC_MOCK_RUN_DEFAULT {
if (fd == 999) {
UT_OUT("mocked write, path %d", fd);
return 1;
}
return _FUNC_REAL(write)(fd, buffer, count);
}
FUNC_MOCK_END
| 4,376 | 26.018519 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_tx_free/obj_tx_free.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_tx_free.c -- unit test for pmemobj_tx_free
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_free"
#define OBJ_SIZE (200 * 1024)
enum type_number {
TYPE_FREE_NO_TX,
TYPE_FREE_WRONG_UUID,
TYPE_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_FREE_COMMIT_NESTED1,
TYPE_FREE_COMMIT_NESTED2,
TYPE_FREE_ABORT_NESTED1,
TYPE_FREE_ABORT_NESTED2,
TYPE_FREE_ABORT_AFTER_NESTED1,
TYPE_FREE_ABORT_AFTER_NESTED2,
TYPE_FREE_OOM,
TYPE_FREE_ALLOC,
TYPE_FREE_AFTER_ABORT,
TYPE_FREE_MANY_TIMES,
};
TOID_DECLARE(struct object, 0);
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_free_wrong_uuid -- try to free object with invalid uuid
*/
static void
do_tx_free_wrong_uuid(PMEMobjpool *pop)
{
volatile int ret = 0;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID);
oid.pool_uuid_lo = ~oid.pool_uuid_lo;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, -1);
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_null_oid -- call pmemobj_tx_free with OID_NULL
*/
static void
do_tx_free_null_oid(PMEMobjpool *pop)
{
volatile int ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(OID_NULL);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, 0);
}
/*
* do_tx_free_commit -- do the basic transactional deallocation of object
*/
static void
do_tx_free_commit(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_COMMIT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort -- abort deallocation of object
*/
static void
do_tx_free_abort(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_ABORT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_commit_nested -- do allocation in nested transaction
*/
static void
do_tx_free_commit_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED1));
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED2));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_nested -- abort allocation in nested transaction
*/
static void
do_tx_free_abort_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_after_nested -- abort transaction after nested
* pmemobj_tx_free
*/
static void
do_tx_free_abort_after_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and abort transaction
*/
static void
do_tx_free_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and commit transaction
*/
static void
do_tx_free_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_free - allocate a new object, perform a transactional free
* in an aborted transaction and then to actually free the object.
*
* This can expose any issues with not properly handled free undo log.
*/
static void
do_tx_free_abort_free(PMEMobjpool *pop)
{
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_AFTER_ABORT);
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_free_many_times -- free enough objects to trigger vector array alloc
*/
static void
do_tx_free_many_times(PMEMobjpool *pop)
{
#define TX_FREE_COUNT ((1 << 3) + 1)
PMEMoid oids[TX_FREE_COUNT];
for (int i = 0; i < TX_FREE_COUNT; ++i)
oids[i] = do_tx_alloc(pop, TYPE_FREE_MANY_TIMES);
TX_BEGIN(pop) {
for (int i = 0; i < TX_FREE_COUNT; ++i)
pmemobj_tx_free(oids[i]);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#undef TX_FREE_COUNT
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_free");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_free_wrong_uuid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_null_oid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_free(pop);
VALGRIND_WRITE_STATS;
do_tx_free_many_times(pop);
VALGRIND_WRITE_STATS;
pmemobj_close(pop);
DONE(NULL);
}
| 9,431 | 21.782609 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/vmmalloc_malloc/vmmalloc_malloc.c
|
/*
* Copyright 2014-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmmalloc_malloc.c -- unit test for libvmmalloc malloc
*
* usage: vmmalloc_malloc
*/
#include "unittest.h"
#define MIN_SIZE (sizeof(int))
#define SIZE 20
#define MAX_SIZE (MIN_SIZE << SIZE)
int
main(int argc, char *argv[])
{
const int test_value = 12345;
size_t size;
int *ptr[SIZE];
int i = 0;
size_t sum_alloc = 0;
START(argc, argv, "vmmalloc_malloc");
/* test with multiple size of allocations from 4MB to sizeof(int) */
for (size = MAX_SIZE; size > MIN_SIZE; size /= 2) {
ptr[i] = malloc(size);
if (ptr[i] == NULL)
continue;
*ptr[i] = test_value;
UT_ASSERTeq(*ptr[i], test_value);
sum_alloc += size;
i++;
}
/* at least one allocation for each size must succeed */
UT_ASSERTeq(size, MIN_SIZE);
/* allocate more than half of pool size */
UT_ASSERT(sum_alloc * 2 > VMEM_MIN_POOL);
while (i > 0)
free(ptr[--i]);
DONE(NULL);
}
| 2,480 | 29.62963 | 74 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.