Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/out_err_win/out_err_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* out_err_win.c -- unit test for error messages
*/
#define LOG_PREFIX "trace"
#define LOG_LEVEL_VAR "TRACE_LOG_LEVEL"
#define LOG_FILE_VAR "TRACE_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#include <sys/types.h>
#include <stdarg.h>
#include "unittest.h"
#include "pmemcommon.h"
int
wmain(int argc, wchar_t *argv[])
{
char buff[UT_MAX_ERR_MSG];
STARTW(argc, argv, "out_err_win");
/* Execute test */
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
errno = 0;
ERR("ERR #%d", 1);
UT_OUT("%S", out_get_errormsgW());
errno = 0;
ERR("!ERR #%d", 2);
UT_OUT("%S", out_get_errormsgW());
errno = EINVAL;
ERR("!ERR #%d", 3);
UT_OUT("%S", out_get_errormsgW());
errno = EBADF;
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
out_err(__FILE__, 100, __func__,
"ERR1: %s:%d", buff, 1234);
UT_OUT("%S", out_get_errormsgW());
errno = EBADF;
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
out_err(NULL, 0, NULL,
"ERR2: %s:%d", buff, 1234);
UT_OUT("%S", out_get_errormsgW());
/* Cleanup */
common_fini();
DONEW(NULL);
}
| 1,174 | 18.915254 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_include/pmem_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* pmem_include.c -- include test for libpmem
*
* this is only a compilation test - do not run this program
*/
#include <libpmem.h>
int
main(int argc, char *argv[])
{
return 0;
}
| 275 | 15.235294 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ulog_size/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
import testframework as t
@t.require_build(['debug', 'release'])
class BASE(t.Test):
test_type = t.Medium
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile')
filepath1 = ctx.create_holey_file(16 * t.MiB, 'testfile1')
ctx.exec('obj_ulog_size', filepath, filepath1)
@t.require_valgrind_disabled('memcheck', 'pmemcheck')
class TEST0(BASE):
pass
@t.require_valgrind_enabled('memcheck')
class TEST1(BASE):
pass
@t.require_valgrind_enabled('pmemcheck')
class TEST2(BASE):
pass
| 644 | 19.15625 | 66 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ulog_size/obj_ulog_size.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_ulog_size.c -- unit tests for pmemobj_action API and
* redo, undo logs
*/
#include <sys/param.h>
#include <string.h>
#include <stddef.h>
#include "unittest.h"
#include "util.h"
/*
* tx.h -- needed for TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT,
* TX_SNAPSHOT_LOG_BUFFER_OVERHEAD, TX_SNAPSHOT_LOG_ENTRY_OVERHEAD,
* TX_INTENT_LOG_BUFFER_ALIGNMENT, TX_INTENT_LOG_BUFFER_OVERHEAD,
* TX_INTENT_LOG_ENTRY_OVERHEAD
*/
#include "tx.h"
/* needed for LANE_REDO_EXTERNAL_SIZE and LANE_UNDO_SIZE */
#include "lane.h"
#define LAYOUT_NAME "obj_ulog_size"
#define MIN_ALLOC 64
#define MAX_ALLOC (1024 * 1024)
#define HALF_OF_DEFAULT_UNDO_SIZE (LANE_UNDO_SIZE / 2)
#define ARRAY_SIZE_COMMON 3
/* the ranges of indices are describing the use of some allocations */
#define LOG_BUFFER 0
#define LOG_BUFFER_NUM 6
#define RANGE (LOG_BUFFER + LOG_BUFFER_NUM)
#define RANGE_NUM 6
#define MIN_NOIDS (RANGE + RANGE_NUM)
/*
* REDO_OVERFLOW -- size for trigger out of memory
* during redo log extension
*/
#define REDO_OVERFLOW ((size_t)((LANE_REDO_EXTERNAL_SIZE\
/ TX_INTENT_LOG_ENTRY_OVERHEAD) + 1))
#define APPEND_SIZE SIZEOF_ALIGNED_ULOG(CACHELINE_SIZE)
/*
* free_pool -- frees the pool from all allocated objects
* and releases oids dynamic array
*/
static void
free_pool(PMEMoid *oids, size_t noids)
{
for (size_t i = 0; i < noids; i++) {
pmemobj_free(&oids[i]);
UT_ASSERT(OID_IS_NULL(oids[i]));
}
FREE(oids);
}
/*
* fill_pool -- fills provided pmemobj pool with as many allocations
* as possible. Returns array of PMEMoids allocated from the
* provided pool. The number of valid allocation stored in the
* returned array is stored in the noids output argument.
*/
static PMEMoid *
fill_pool(PMEMobjpool *pop, size_t *noids)
{
size_t oids_size = 2048; /* let's start with something big enough */
PMEMoid *oids = (PMEMoid *)MALLOC(oids_size * sizeof(PMEMoid));
*noids = 0;
int ret;
/* alloc as much space as possible */
for (size_t size = MAX_ALLOC; size >= MIN_ALLOC; size /= 2) {
ret = 0;
while (ret == 0) {
ret = pmemobj_alloc(pop, &oids[*noids], size,
0, NULL, NULL);
if (!ret)
(*noids)++;
if (*noids == oids_size) {
oids_size *= 2;
oids = (PMEMoid *)REALLOC(oids, oids_size *
sizeof(PMEMoid));
}
}
}
return oids;
}
/*
* do_tx_max_alloc_tx_publish_abort -- fills the pool and then tries
* to overfill redo log - transaction abort expected
*/
static void
do_tx_max_alloc_tx_publish_abort(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_tx_publish_abort");
PMEMoid *allocated = NULL;
PMEMoid reservations[REDO_OVERFLOW];
size_t nallocated = 0;
struct pobj_action act[REDO_OVERFLOW];
for (int i = 0; i < REDO_OVERFLOW; i++) {
reservations[i] = pmemobj_reserve(pop, &act[i], MIN_ALLOC, 0);
UT_ASSERT(!OID_IS_NULL(reservations[i]));
}
allocated = fill_pool(pop, &nallocated);
/*
* number of allocated buffers is not important
* they are not used anyway
*/
/* it should abort - cannot extend redo log */
TX_BEGIN(pop) {
pmemobj_tx_publish(act, REDO_OVERFLOW);
} TX_ONABORT {
UT_OUT("!Cannot extend redo log - the pool is full");
} TX_ONCOMMIT {
UT_FATAL("Can extend redo log despite the pool is full");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_xpublish(act, REDO_OVERFLOW, POBJ_XPUBLISH_NO_ABORT);
} TX_ONABORT {
ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, ENOMEM);
UT_OUT("!Cannot extend redo log - the pool is full");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_publish(act, REDO_OVERFLOW);
} TX_ONABORT {
ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, ENOMEM);
UT_OUT("!Cannot extend redo log - the pool is full");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xpublish(act, REDO_OVERFLOW, 0);
} TX_ONABORT {
ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, ENOMEM);
UT_OUT("!Cannot extend redo log - the pool is full");
} TX_END
free_pool(allocated, nallocated);
pmemobj_cancel(pop, act, REDO_OVERFLOW);
}
/*
* do_tx_max_alloc_no_user_alloc_snap -- fills the pool and tries to do
* snapshot which is bigger than ulog size
*/
static void
do_tx_max_alloc_no_user_alloc_snap(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_no_user_alloc_snap");
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated >= MIN_NOIDS);
size_t range_size = pmemobj_alloc_usable_size(allocated[LOG_BUFFER]);
UT_ASSERT(range_size > LANE_UNDO_SIZE);
void *range_addr = pmemobj_direct(allocated[LOG_BUFFER]);
pmemobj_memset(pop, range_addr, 0, range_size, 0);
TX_BEGIN(pop) {
/* it should abort - cannot extend undo log */
pmemobj_tx_add_range(allocated[LOG_BUFFER], 0, range_size);
} TX_ONABORT {
UT_OUT("!Cannot extend undo log - the pool is full");
} TX_ONCOMMIT {
UT_FATAL("Can extend undo log despite the pool is full");
} TX_END
free_pool(allocated, nallocated);
}
/*
* do_tx_max_alloc_user_alloc_snap -- fills the pool, appends allocated
* buffer and tries to do snapshot which is bigger than ulog size
*/
static void
do_tx_max_alloc_user_alloc_snap(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_user_alloc_snap");
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated >= MIN_NOIDS);
size_t buff_size = pmemobj_alloc_usable_size(allocated[LOG_BUFFER]);
void *buff_addr = pmemobj_direct(allocated[LOG_BUFFER]);
size_t range_size = pmemobj_alloc_usable_size(allocated[RANGE]);
UT_ASSERT(range_size > LANE_UNDO_SIZE);
void *range_addr = pmemobj_direct(allocated[RANGE]);
pmemobj_memset(pop, range_addr, 0, range_size, 0);
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addr, buff_size);
pmemobj_tx_add_range(allocated[RANGE], 0, range_size);
} TX_ONABORT {
UT_FATAL("!Cannot use the user appended undo log buffer");
} TX_ONCOMMIT {
UT_OUT("Can use the user appended undo log buffer");
} TX_END
free_pool(allocated, nallocated);
}
/*
* do_tx_max_alloc_user_alloc_nested -- example of buffer appending
* allocated by the user in a nested transaction
*/
static void
do_tx_max_alloc_user_alloc_nested(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_user_alloc_nested");
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated >= MIN_NOIDS);
size_t buff_size = pmemobj_alloc_usable_size(allocated[LOG_BUFFER]);
void *buff_addr = pmemobj_direct(allocated[LOG_BUFFER]);
size_t range_size = pmemobj_alloc_usable_size(allocated[RANGE]);
void *range_addr = pmemobj_direct(allocated[RANGE]);
pmemobj_memset(pop, range_addr, 0, range_size, 0);
TX_BEGIN(pop) {
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addr, buff_size);
pmemobj_tx_add_range(allocated[RANGE], 0, range_size);
} TX_ONABORT {
UT_FATAL(
"Cannot use the undo log appended by the user in a nested transaction");
} TX_ONCOMMIT {
UT_OUT(
"Can use the undo log appended by the user in a nested transaction");
} TX_END
} TX_END
free_pool(allocated, nallocated);
}
/*
* do_tx_max_alloc_user_alloc_snap_multi -- appending of many buffers
* in one transaction
*/
static void
do_tx_max_alloc_user_alloc_snap_multi(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_user_alloc_snap_multi");
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated >= MIN_NOIDS);
size_t buff_sizes[ARRAY_SIZE_COMMON];
void *buff_addrs[ARRAY_SIZE_COMMON];
size_t range_sizes[ARRAY_SIZE_COMMON];
void *range_addrs[ARRAY_SIZE_COMMON];
/*
* The maximum value of offset used in the for-loop below is
* i_max == (ARRAY_SIZE_COMMON - 1) * 2.
* It will cause using LOG_BUFFER + i_max and RANGE + i_max indices so
* i_max has to be less than LOG_BUFFER_NUM and
* i_max has to be less than RANGE_NUM.
*/
UT_COMPILE_ERROR_ON((ARRAY_SIZE_COMMON - 1) * 2 >= LOG_BUFFER_NUM);
UT_COMPILE_ERROR_ON((ARRAY_SIZE_COMMON - 1) * 2 >= RANGE_NUM);
for (unsigned long i = 0; i < ARRAY_SIZE_COMMON; i++) {
/* we multiply the value to not use continuous memory blocks */
buff_sizes[i] = pmemobj_alloc_usable_size(
allocated[LOG_BUFFER + (i *2)]);
buff_addrs[i] = pmemobj_direct(
allocated[LOG_BUFFER + (i * 2)]);
range_sizes[i] = pmemobj_alloc_usable_size(
allocated[RANGE + (i * 2)]);
range_addrs[i] = pmemobj_direct(allocated[RANGE + (i * 2)]);
pmemobj_memset(pop, range_addrs[i], 0, range_sizes[i], 0);
}
errno = 0;
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addrs[0], buff_sizes[0]);
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addrs[1], buff_sizes[1]);
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addrs[2], buff_sizes[1]);
for (unsigned long i = 0; i < ARRAY_SIZE_COMMON; i++) {
pmemobj_tx_add_range(allocated[RANGE + (i * 2)], 0,
range_sizes[i]);
}
} TX_ONABORT {
UT_FATAL("!Cannot use multiple user appended undo log buffers");
} TX_ONCOMMIT {
UT_OUT("Can use multiple user appended undo log buffers");
} TX_END
/* check if all user allocated buffers are used */
errno = 0;
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addrs[0], buff_sizes[0]);
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addrs[1], buff_sizes[1]);
/*
* do not append last buffer to make sure it is needed for this
* transaction to succeed
*/
pmemobj_tx_add_range(allocated[RANGE + 0], 0, range_sizes[0]);
pmemobj_tx_add_range(allocated[RANGE + 2], 0, range_sizes[1]);
pmemobj_tx_add_range(allocated[RANGE + 4], 0, range_sizes[2]);
} TX_ONABORT {
UT_OUT("!All user appended undo log buffers are used");
} TX_ONCOMMIT {
UT_FATAL(
"Not all user appended undo log buffers are required - too small ranges");
} TX_END
free_pool(allocated, nallocated);
}
/*
* do_tx_auto_alloc_disabled -- blocking of automatic expansion
* of ulog. When auto expansion of ulog is off, snapshot with size
* of default undo log is going to fail, because of buffer overhead
* (size of internal undo log and header size).
*/
static void
do_tx_auto_alloc_disabled(PMEMobjpool *pop)
{
UT_OUT("do_tx_auto_alloc_disabled");
PMEMoid oid0, oid1;
int ret = pmemobj_zalloc(pop, &oid0, HALF_OF_DEFAULT_UNDO_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid1, HALF_OF_DEFAULT_UNDO_SIZE, 0);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
pmemobj_tx_log_auto_alloc(TX_LOG_TYPE_SNAPSHOT, 0);
pmemobj_tx_add_range(oid0, 0, HALF_OF_DEFAULT_UNDO_SIZE);
/* it should abort - cannot extend ulog (first entry is full) */
pmemobj_tx_add_range(oid1, 0, HALF_OF_DEFAULT_UNDO_SIZE);
} TX_ONABORT {
UT_OUT("!Disabled auto alloc prevented the undo log grow");
} TX_ONCOMMIT {
UT_FATAL(
"Disabled auto alloc did not prevent the undo log grow");
} TX_END
pmemobj_free(&oid0);
pmemobj_free(&oid1);
}
/*
* do_tx_max_alloc_wrong_pop_addr -- allocates two pools and tries to
* do transaction with the first pool and address from the second
* pool. Abort expected - cannot allocate from different pool.
*/
static void
do_tx_max_alloc_wrong_pop_addr(PMEMobjpool *pop, PMEMobjpool *pop2)
{
UT_OUT("do_tx_max_alloc_wrong_pop_addr");
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
/*
* number of allocated buffers is not important
* they are not used anyway
*/
PMEMoid oid2;
int ret = pmemobj_alloc(pop2, &oid2, MAX_ALLOC, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
/* pools are allocated now, let's try to get address from wrong pool */
size_t buff2_size = pmemobj_alloc_usable_size(oid2);
void *buff2_addr = pmemobj_direct(oid2);
/* abort expected - cannot allocate from different pool */
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff2_addr, buff2_size);
} TX_ONABORT {
UT_OUT(
"!Cannot append an undo log buffer from a different memory pool");
} TX_ONCOMMIT {
UT_FATAL(
"Can append an undo log buffer from a different memory pool");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_xlog_append_buffer(TX_LOG_TYPE_SNAPSHOT, buff2_addr,
buff2_size, POBJ_XLOG_APPEND_BUFFER_NO_ABORT);
} TX_ONABORT {
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_OUT(
"!Cannot append an undo log buffer from a different memory pool");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_SNAPSHOT, buff2_addr,
buff2_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_OUT(
"!Cannot append an undo log buffer from a different memory pool");
} TX_END
/* it should fail without abort transaction */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xlog_append_buffer(TX_LOG_TYPE_SNAPSHOT, buff2_addr,
buff2_size, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_OUT(
"!Cannot append an undo log buffer from a different memory pool");
} TX_END
free_pool(allocated, nallocated);
pmemobj_free(&oid2);
}
/*
* do_tx_buffer_currently_used -- the same buffer cannot be used
* twice in the same time.
*/
static void
do_tx_buffer_currently_used(PMEMobjpool *pop)
{
UT_OUT("do_tx_buffer_currently_used");
PMEMoid oid_buff;
int verify_user_buffers = 1;
/* by default verify_user_buffers should be 0 */
int ret = pmemobj_ctl_get(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(verify_user_buffers, 0);
int err = pmemobj_alloc(pop, &oid_buff, MAX_ALLOC, 0, NULL, NULL);
UT_ASSERTeq(err, 0);
/* this buffer we will try to use twice */
size_t buff_size = pmemobj_alloc_usable_size(oid_buff);
void *buff_addr = pmemobj_direct(oid_buff);
/* changes verify_user_buffers value */
verify_user_buffers = 1;
ret = pmemobj_ctl_set(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
verify_user_buffers = 99;
/* check if verify_user_buffers has changed */
ret = pmemobj_ctl_get(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(verify_user_buffers, 1);
/* if verify_user_buffers is set we should abort tx */
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addr, buff_size);
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_SNAPSHOT, buff_addr, buff_size);
} TX_ONABORT {
UT_OUT("!User cannot append the same undo log buffer twice");
} TX_ONCOMMIT {
UT_FATAL("User can append the same undo log buffer twice");
} TX_END
pmemobj_free(&oid_buff);
/* restore the default and verify */
verify_user_buffers = 0;
ret = pmemobj_ctl_set(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
verify_user_buffers = 99;
ret = pmemobj_ctl_get(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(verify_user_buffers, 0);
}
/*
* do_tx_max_alloc_tx_publish -- fills the pool and then tries
* to overfill redo log with appended buffer
*/
static void
do_tx_max_alloc_tx_publish(PMEMobjpool *pop)
{
UT_OUT("do_tx_max_alloc_tx_publish");
PMEMoid *allocated = NULL;
PMEMoid reservations[REDO_OVERFLOW];
size_t nallocated = 0;
struct pobj_action act[REDO_OVERFLOW];
for (int i = 0; i < REDO_OVERFLOW; i++) {
reservations[i] = pmemobj_reserve(pop, &act[i], MIN_ALLOC, 0);
UT_ASSERT(!OID_IS_NULL(reservations[i]));
}
allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated >= MIN_NOIDS);
size_t buff_size = pmemobj_alloc_usable_size(allocated[LOG_BUFFER]);
void *buff_addr = pmemobj_direct(allocated[LOG_BUFFER]);
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_INTENT, buff_addr, buff_size);
pmemobj_tx_publish(act, REDO_OVERFLOW);
} TX_ONABORT {
UT_FATAL("!Cannot extend redo log despite appended buffer");
} TX_ONCOMMIT {
UT_OUT("Can extend redo log with appended buffer");
} TX_END
free_pool(allocated, nallocated);
for (int i = 0; i < REDO_OVERFLOW; ++i) {
pmemobj_free(&reservations[i]);
}
}
/*
* do_tx_user_buffer_atomic_alloc -- checks if finish of atomic
* allocation inside transaction will not break state of the ulog
* with appended user buffer
*/
static void
do_tx_user_buffer_atomic_alloc(PMEMobjpool *pop)
{
UT_OUT("do_tx_user_buffer_atomic_alloc");
PMEMoid user_buffer_oid;
PMEMoid atomic_alloc_oid;
PMEMoid reservations[REDO_OVERFLOW];
struct pobj_action act[REDO_OVERFLOW];
/*
* we have to fill out first ulog in the redo log
* to make sure that the user buffer will be needed
* to proceed
*/
for (int i = 0; i < REDO_OVERFLOW; i++) {
reservations[i] = pmemobj_reserve(pop, &act[i], MIN_ALLOC, 0);
UT_ASSERT(!OID_IS_NULL(reservations[i]));
}
/* allocs some space for intent user buffer */
int ret = pmemobj_alloc(pop, &user_buffer_oid, MAX_ALLOC,
0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t buff_size = pmemobj_alloc_usable_size(user_buffer_oid);
void *buff_addr = pmemobj_direct(user_buffer_oid);
TX_BEGIN(pop) {
/* disable automatic ulog reservation and add user buffer */
pmemobj_tx_log_auto_alloc(TX_LOG_TYPE_INTENT, 0);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
buff_addr, buff_size);
/* perform atomic allocation in the middle of transaction */
pmemobj_alloc(pop, &atomic_alloc_oid, MAX_ALLOC,
0, NULL, NULL);
/* user buffer should be sill valid, so we try to use it */
pmemobj_tx_publish(act, REDO_OVERFLOW);
} TX_ONCOMMIT {
UT_OUT(
"The transaction state is consistent after atomic allocation");
} TX_ONABORT {
UT_FATAL(
"The transaction state is consistent after atomic allocation");
} TX_END
pmemobj_free(&user_buffer_oid);
}
/*
* do_tx_buffer_overlapping -- checks if user buffer overlap detection works
*/
static void
do_tx_buffer_overlapping(PMEMobjpool *pop)
{
UT_OUT("do_tx_buffer_overlapping");
/* changes verify_user_buffers value */
int verify_user_buffers = 1;
int ret = pmemobj_ctl_set(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
PMEMoid oid = OID_NULL;
pmemobj_alloc(pop, &oid, MAX_ALLOC, 0, NULL, NULL);
UT_ASSERT(!OID_IS_NULL(oid));
char *ptr = (char *)pmemobj_direct(oid);
ptr = (char *)ALIGN_UP((size_t)ptr, CACHELINE_SIZE);
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr + APPEND_SIZE, APPEND_SIZE);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_OUT("Overlap not detected");
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr + APPEND_SIZE, APPEND_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_OUT("Overlap not detected");
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
} TX_ONABORT {
UT_OUT("Overlap detected");
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr + 128, APPEND_SIZE);
} TX_ONABORT {
UT_OUT("Overlap detected");
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr + 128, APPEND_SIZE);
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_INTENT,
ptr, APPEND_SIZE);
} TX_ONABORT {
UT_OUT("Overlap detected");
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
pmemobj_free(&oid);
verify_user_buffers = 0;
ret = pmemobj_ctl_set(pop, "tx.debug.verify_user_buffers",
&verify_user_buffers);
UT_ASSERTeq(ret, 0);
}
/*
* do_log_intents_max_size_limits -- test the pmemobj_tx_log_intents_max_size
* function argument processing
*/
static void
do_log_intents_max_size_limits(void)
{
UT_OUT("do_log_intent_max_size_limits");
size_t size = 0;
/* 1st case */
size = pmemobj_tx_log_intents_max_size(0);
UT_ASSERT(size > 0);
UT_ASSERTne(size, SIZE_MAX);
/* 2nd case */
size = pmemobj_tx_log_intents_max_size(
SIZE_MAX / TX_INTENT_LOG_ENTRY_OVERHEAD);
UT_ASSERTeq(size, SIZE_MAX);
UT_ASSERTeq(errno, ERANGE);
/* 3rd case */
const size_t toobign =
(SIZE_MAX - TX_INTENT_LOG_BUFFER_OVERHEAD)
/ TX_INTENT_LOG_ENTRY_OVERHEAD + 1;
size = pmemobj_tx_log_intents_max_size(toobign);
UT_ASSERTeq(size, SIZE_MAX);
UT_ASSERTeq(errno, ERANGE);
}
/*
* do_log_intents_max_size -- verify pmemobj_tx_log_intents_max_size reported
* size is sufficient
*/
static void
do_log_intents_max_size(PMEMobjpool *pop)
{
UT_OUT("do_log_intent_max_size");
const size_t nintents = 15; /* an arbitrarily picked number */
/* query a required log size */
size_t req_buff_size = pmemobj_tx_log_intents_max_size(nintents);
UT_ASSERTne(req_buff_size, SIZE_MAX);
/* alloc the intent buffer */
PMEMoid buff_oid = OID_NULL;
int ret = pmemobj_alloc(pop, &buff_oid, req_buff_size, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
void *buff_addr = pmemobj_direct(buff_oid);
size_t buff_size = pmemobj_alloc_usable_size(buff_oid);
UT_ASSERT(buff_size >= req_buff_size);
/* make an assumed number of reservations */
PMEMoid reservations[nintents];
struct pobj_action act[nintents];
for (size_t i = 0; i < nintents; i++) {
reservations[i] = pmemobj_reserve(pop, &act[i], MIN_ALLOC, 0);
UT_ASSERT(!OID_IS_NULL(reservations[i]));
}
TX_BEGIN(pop) {
pmemobj_tx_log_auto_alloc(TX_LOG_TYPE_INTENT, 0);
pmemobj_tx_log_append_buffer(
TX_LOG_TYPE_INTENT, buff_addr, buff_size);
pmemobj_tx_publish(act, nintents);
} TX_ONABORT {
UT_FATAL("!Estimated intent log buffer size is too small");
} TX_ONCOMMIT {
UT_OUT("Estimated intent log buffer size is sufficient");
} TX_END
/* release all allocated resources */
for (size_t i = 0; i < nintents; ++i) {
pmemobj_free(&reservations[i]);
UT_ASSERT(OID_IS_NULL(reservations[i]));
}
pmemobj_free(&buff_oid);
UT_ASSERT(OID_IS_NULL(buff_oid));
}
/*
* do_log_snapshots_max_size_limits -- test the
* pmemobj_tx_log_snapshots_max_size function argument processing
*/
static void
do_log_snapshots_max_size_limits(void)
{
UT_OUT("do_log_snapshot_max_size_limits");
const size_t nsizes = 1024; /* an arbitrarily picked number */
/* prepare array of big sizes */
size_t *sizes = (size_t *)MALLOC(sizeof(size_t) * nsizes);
for (size_t i = 0, size = MAX_ALLOC; i < nsizes; ++i) {
sizes[i] = size;
if (size < SIZE_MAX / 2)
size *= 2;
}
size_t size = 0;
size = pmemobj_tx_log_snapshots_max_size(sizes, nsizes);
UT_ASSERTeq(size, SIZE_MAX);
UT_ASSERTeq(errno, ERANGE);
/* release allocated resources */
FREE(sizes);
}
/*
* do_log_snapshots_max_size -- verify pmemobj_tx_log_snapshots_max_size
* reported size is sufficient
*/
static void
do_log_snapshots_max_size(PMEMobjpool *pop)
{
UT_OUT("do_log_snapshot_max_size");
size_t nsizes_max = 15; /* an arbitrarily picked number */
size_t *sizes = (size_t *)MALLOC(nsizes_max * sizeof(size_t));
/* fill up the pool */
size_t nallocated = 0;
PMEMoid *allocated = fill_pool(pop, &nallocated);
UT_ASSERT(nallocated > LOG_BUFFER);
/* the first allocation will be used for as a snapshot log buffer */
void *buff_addr = pmemobj_direct(allocated[LOG_BUFFER]);
size_t max_buff_size = pmemobj_alloc_usable_size(allocated[LOG_BUFFER]);
size_t req_buff_size = 0;
/* how many ranges fit into the buffer */
size_t nsizes_valid = 0;
for (size_t i = nallocated - 1; i > LOG_BUFFER; --i) {
/* initialize the range */
size_t range_size = pmemobj_alloc_usable_size(allocated[i]);
void *range_addr = pmemobj_direct(allocated[i]);
pmemobj_memset(pop, range_addr, 0, range_size, 0);
/* append to the list of sizes */
sizes[nsizes_valid] = range_size;
++nsizes_valid;
if (nsizes_valid == nsizes_max) {
nsizes_max *= 2;
sizes = (size_t *)REALLOC(sizes,
nsizes_max * sizeof(size_t));
}
/* estimate a required buffer size for snapshots */
req_buff_size = pmemobj_tx_log_snapshots_max_size(
sizes, nsizes_valid);
UT_ASSERTne(req_buff_size, SIZE_MAX);
if (req_buff_size > max_buff_size) {
/* if it is too much we have to use one less */
--nsizes_valid;
UT_ASSERTne(nsizes_valid, 0);
req_buff_size = pmemobj_tx_log_snapshots_max_size(
sizes, nsizes_valid);
break;
}
}
TX_BEGIN(pop) {
pmemobj_tx_log_append_buffer(TX_LOG_TYPE_SNAPSHOT, buff_addr,
req_buff_size);
for (size_t i = 0; i < nsizes_valid; i++) {
pmemobj_tx_add_range(allocated[nallocated - i - 1], 0,
sizes[i]);
}
} TX_ONABORT {
UT_FATAL("!Estimated snapshot log buffer size is too small");
} TX_ONCOMMIT {
UT_OUT("Estimated snapshot log buffer size is sufficient");
} TX_END
/* release all allocated resources */
free_pool(allocated, nallocated);
FREE(sizes);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ulog_size");
if (argc != 3)
UT_FATAL("usage: %s [file] [file1]", argv[0]);
PMEMobjpool *pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
PMEMobjpool *pop2 = pmemobj_create(argv[2], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR);
if (pop2 == NULL)
UT_FATAL("!pmemobj_create");
do_tx_max_alloc_no_user_alloc_snap(pop);
do_tx_max_alloc_user_alloc_snap(pop);
do_tx_max_alloc_user_alloc_nested(pop);
do_tx_max_alloc_user_alloc_snap_multi(pop);
do_tx_auto_alloc_disabled(pop);
do_tx_max_alloc_wrong_pop_addr(pop, pop2);
do_tx_max_alloc_tx_publish_abort(pop);
do_tx_buffer_currently_used(pop);
do_tx_max_alloc_tx_publish(pop);
do_tx_user_buffer_atomic_alloc(pop);
do_tx_buffer_overlapping(pop);
do_log_intents_max_size_limits();
do_log_intents_max_size(pop);
do_log_snapshots_max_size_limits();
do_log_snapshots_max_size(pop);
pmemobj_close(pop);
pmemobj_close(pop2);
DONE(NULL);
}
| 26,253 | 27.321467 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memset/pmem_memset.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memset.c -- unit test for doing a memset
*
* usage: pmem_memset file offset length
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memset_common.h"
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
static void *
pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_persist(pmemdest, c, len);
}
static void *
pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_nodrain(pmemdest, c, len);
}
static void
do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, persist_fn p)
{
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_persist_wrapper, 0, p);
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset, Flags[i], p);
if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH)
pmem_persist(dest, bytes);
}
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
int
main(int argc, char *argv[])
{
int fd;
size_t mapped_len;
char *dest;
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memset %s %s %s %savx %savx512f",
argv[2], argv[3],
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
/* open a pmem file and memory map it */
if ((dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL)) == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
size_t dest_off = strtoul(argv[2], NULL, 0);
size_t bytes = strtoul(argv[3], NULL, 0);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file with fd %d", fd);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
do_memset_variants(fd, dest, argv[1], dest_off, bytes, p);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
CLOSE(fd);
DONE(NULL);
}
| 2,428 | 22.355769 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pool_win/obj_pool_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_pool.c -- unit test for pmemobj_create() and pmemobj_open()
*
* usage: obj_pool op path layout [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const wchar_t *path, const wchar_t *layout, size_t poolsize,
unsigned mode)
{
char *upath = ut_toUTF8(path);
PMEMobjpool *pop = pmemobj_createW(path, layout, poolsize, mode);
if (pop == NULL)
UT_OUT("!%s: pmemobj_create", upath);
else {
os_stat_t stbuf;
STATW(path, &stbuf);
UT_OUT("%s: file size %zu mode 0%o",
upath, stbuf.st_size,
stbuf.st_mode & 0777);
pmemobj_close(pop);
int result = pmemobj_checkW(path, layout);
if (result < 0)
UT_OUT("!%s: pmemobj_check", upath);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", upath);
}
free(upath);
}
static void
pool_open(const wchar_t *path, const wchar_t *layout)
{
char *upath = ut_toUTF8(path);
PMEMobjpool *pop = pmemobj_openW(path, layout);
if (pop == NULL) {
UT_OUT("!%s: pmemobj_open", upath);
} else {
UT_OUT("%s: pmemobj_open: Success", upath);
pmemobj_close(pop);
}
free(upath);
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "obj_pool_win");
if (argc < 4)
UT_FATAL("usage: %s op path layout [poolsize mode]",
ut_toUTF8(argv[0]));
wchar_t *layout = NULL;
size_t poolsize;
unsigned mode;
if (wcscmp(argv[3], L"EMPTY") == 0)
layout = L"";
else if (wcscmp(argv[3], L"NULL") != 0)
layout = argv[3];
switch (argv[1][0]) {
case 'c':
poolsize = wcstoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = wcstoul(argv[5], NULL, 8);
pool_create(argv[2], layout, poolsize, mode);
break;
case 'o':
pool_open(argv[2], layout);
break;
default:
UT_FATAL("unknown operation");
}
DONEW(NULL);
}
| 1,965 | 18.858586 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/checksum/checksum.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* checksum.c -- unit test for library internal checksum routine
*
* usage: checksum files...
*/
#include <endian.h>
#include "unittest.h"
#include "util.h"
#include <inttypes.h>
/*
* fletcher64 -- compute a Fletcher64 checksum
*
* Gold standard implementation used to compare to the
* util_checksum() being unit tested.
*/
static uint64_t
fletcher64(void *addr, size_t len)
{
UT_ASSERT(len % 4 == 0);
uint32_t *p32 = addr;
uint32_t *p32end = (uint32_t *)((char *)addr + len);
uint32_t lo32 = 0;
uint32_t hi32 = 0;
while (p32 < p32end) {
lo32 += le32toh(*p32);
p32++;
hi32 += lo32;
}
return htole64((uint64_t)hi32 << 32 | lo32);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "checksum");
if (argc < 2)
UT_FATAL("usage: %s files...", argv[0]);
for (int arg = 1; arg < argc; arg++) {
int fd = OPEN(argv[arg], O_RDONLY);
os_stat_t stbuf;
FSTAT(fd, &stbuf);
size_t size = (size_t)stbuf.st_size;
void *addr =
MMAP(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
uint64_t *ptr = addr;
/*
* Loop through, selecting successive locations
* where the checksum lives in this block, and
* let util_checksum() insert it so it can be
* verified against the gold standard fletcher64
* routine in this file.
*/
while ((char *)(ptr + 1) < (char *)addr + size) {
/* save whatever was at *ptr */
uint64_t oldval = *ptr;
/* mess with it */
*ptr = htole64(0x123);
/*
* calculate a checksum and have it installed
*/
util_checksum(addr, size, ptr, 1, 0);
uint64_t csum = *ptr;
/*
* verify inserted checksum checks out
*/
UT_ASSERT(util_checksum(addr, size, ptr, 0, 0));
/* put a zero where the checksum was installed */
*ptr = 0;
/* calculate a checksum */
uint64_t gold_csum = fletcher64(addr, size);
/* put the old value back */
*ptr = oldval;
/*
* verify checksum now fails
*/
UT_ASSERT(!util_checksum(addr, size, ptr,
0, 0));
/*
* verify the checksum matched the gold version
*/
UT_ASSERTeq(csum, gold_csum);
UT_OUT("%s:%" PRIu64 " 0x%" PRIx64, argv[arg],
(char *)ptr - (char *)addr, csum);
ptr++;
}
uint64_t *addr2 =
MMAP(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
uint64_t *csum = (uint64_t *)addr;
/*
* put a zero where the checksum will be installed
* in the second map
*/
*addr2 = 0;
for (size_t i = size / 8 - 1; i > 0; i -= 1) {
/* calculate a checksum and have it installed */
util_checksum(addr, size, csum, 1, i * 8);
/*
* put a zero in the second map where an ignored part is
*/
*(addr2 + i) = 0;
/* calculate a checksum */
uint64_t gold_csum = fletcher64(addr2, size);
/*
* verify the checksum matched the gold version
*/
UT_ASSERTeq(*csum, gold_csum);
}
CLOSE(fd);
MUNMAP(addr, size);
MUNMAP(addr2, size);
}
DONE(NULL);
}
| 3,014 | 19.510204 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_fragmentation/obj_fragmentation.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_fragmentation.c -- measures average heap fragmentation
*
* A pretty simplistic test that measures internal fragmentation of the
* allocator for the given size.
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_fragmentation"
#define OBJECT_OVERHEAD 64 /* account for the header added to each object */
#define MAX_OVERALL_OVERHEAD 0.10f
/*
* For the best accuracy fragmentation should be measured for one full zone
* because the metadata is preallocated. For reasonable test duration a smaller
* size must be used.
*/
#define DEFAULT_FILE_SIZE ((size_t)(1ULL << 28)) /* 256 megabytes */
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_fragmentation");
if (argc < 3)
UT_FATAL("usage: %s allocsize filename [filesize]", argv[0]);
size_t file_size;
if (argc == 4)
file_size = ATOUL(argv[3]);
else
file_size = DEFAULT_FILE_SIZE;
size_t alloc_size = ATOUL(argv[1]);
const char *path = argv[2];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, file_size,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t allocated = 0;
int err = 0;
do {
PMEMoid oid;
err = pmemobj_alloc(pop, &oid, alloc_size, 0, NULL, NULL);
if (err == 0)
allocated += pmemobj_alloc_usable_size(oid) +
OBJECT_OVERHEAD;
} while (err == 0);
float allocated_pct = ((float)allocated / file_size);
float overhead_pct = 1.f - allocated_pct;
UT_ASSERT(overhead_pct <= MAX_OVERALL_OVERHEAD);
pmemobj_close(pop);
DONE(NULL);
}
| 1,607 | 23.738462 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/traces/traces.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* traces.c -- unit test for traces
*/
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "UT_LOG_LEVEL"
#define LOG_FILE_VAR "UT_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#include <sys/types.h>
#include <stdarg.h>
#include "pmemcommon.h"
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "traces");
/* Execute test */
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
LOG(0, "Log level NONE");
LOG(1, "Log level ERROR");
LOG(2, "Log level WARNING");
LOG(3, "Log level INFO");
LOG(4, "Log level DEBUG");
/* Cleanup */
common_fini();
DONE(NULL);
}
| 728 | 18.184211 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_first_next/obj_first_next.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_first_next.c -- unit tests for POBJ_FIRST macro
*/
#include <stddef.h>
#include "libpmemobj.h"
#include "unittest.h"
#define LAYOUT_NAME "obj_first_next"
TOID_DECLARE(struct type, 0);
TOID_DECLARE(struct type_sec, 1);
struct type {
int id;
};
struct type_sec {
int id;
};
static PMEMobjpool *pop;
typedef void (*fn_op)(int id);
typedef void (*fn_void)();
#define FATAL_USAGE()\
UT_FATAL("usage: obj_first_next <file> [Parfn]")
/*
* get_item_type -- get nth item from list
*/
static TOID(struct type)
get_item_type(int n)
{
TOID(struct type) item;
POBJ_FOREACH_TYPE(pop, item) {
if (n == 0)
return item;
n--;
}
return TOID_NULL(struct type);
}
/*
* get_item_type_sec -- get nth item from list
*/
static TOID(struct type_sec)
get_item_type_sec(int n)
{
TOID(struct type_sec) item;
POBJ_FOREACH_TYPE(pop, item) {
if (n == 0)
return item;
n--;
}
return TOID_NULL(struct type_sec);
}
/*
* do_print_type -- print list elements from type collection
*/
static void
do_print_type(void)
{
TOID(struct type) item;
UT_OUT("type:");
POBJ_FOREACH_TYPE(pop, item) {
UT_OUT("id = %d", D_RO(item)->id);
}
}
/*
* do_print_type_sec -- print list elements from type_sec collection
*/
static void
do_print_type_sec(void)
{
TOID(struct type_sec) item;
UT_OUT("type_sec:");
POBJ_FOREACH_TYPE(pop, item) {
UT_OUT("id = %d", D_RO(item)->id);
}
}
static fn_void do_print[] = {do_print_type, do_print_type_sec};
/*
* type_constructor -- constructor which sets the item's id to
* new value
*/
static int
type_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct type *item = (struct type *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
pmemobj_persist(pop, item, sizeof(*item));
return 0;
}
/*
* type_sec_constructor -- constructor which sets the item's id to
* new value
*/
static int
type_sec_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct type_sec *item = (struct type_sec *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
pmemobj_persist(pop, item, sizeof(*item));
return 0;
}
/*
* do_alloc_type -- allocates new element to type collection
*/
static void
do_alloc_type(int id)
{
TOID(struct type) item;
POBJ_NEW(pop, &item, struct type, type_constructor, &id);
if (TOID_IS_NULL(item))
UT_FATAL("POBJ_NEW");
}
/*
* do_alloc_type_sec -- allocates new element to type_sec collection
*/
static void
do_alloc_type_sec(int id)
{
TOID(struct type_sec) item;
POBJ_NEW(pop, &item, struct type_sec, type_sec_constructor, &id);
if (TOID_IS_NULL(item))
UT_FATAL("POBJ_NEW");
}
static fn_op do_alloc[] = {do_alloc_type, do_alloc_type_sec};
/*
* do_free_type -- remove and free element from type collection
*/
static void
do_free_type(int n)
{
TOID(struct type) item;
if (TOID_IS_NULL(POBJ_FIRST(pop, struct type)))
return;
item = get_item_type(n);
UT_ASSERT(!TOID_IS_NULL(item));
POBJ_FREE(&item);
}
/*
* do_free_type_sec -- remove and free element from type_sec collection
*/
static void
do_free_type_sec(int n)
{
TOID(struct type_sec) item;
if (TOID_IS_NULL(POBJ_FIRST(pop, struct type_sec)))
return;
item = get_item_type_sec(n);
UT_ASSERT(!TOID_IS_NULL(item));
POBJ_FREE(&item);
}
static fn_op do_free[] = {do_free_type, do_free_type_sec};
/*
* do_first_type -- prints id of first object in type collection
*/
static void
do_first_type(void)
{
TOID(struct type) first = POBJ_FIRST(pop, struct type);
UT_OUT("first id = %d", D_RO(first)->id);
}
/*
* do_first_type_sec -- prints id of first object in type_sec collection
*/
static void
do_first_type_sec(void)
{
TOID(struct type_sec) first = POBJ_FIRST(pop, struct type_sec);
UT_OUT("first id = %d", D_RO(first)->id);
}
static fn_void do_first[] = {do_first_type, do_first_type_sec};
/*
* do_next_type -- finds next element from type collection
*/
static void
do_next_type(int n)
{
TOID(struct type) item;
if (TOID_IS_NULL(POBJ_FIRST(pop, struct type)))
return;
item = get_item_type(n);
UT_ASSERT(!TOID_IS_NULL(item));
item = POBJ_NEXT(item);
UT_OUT("next id = %d", D_RO(item)->id);
}
/*
* do_next_type_sec -- finds next element from type_sec collection
*/
static void
do_next_type_sec(int n)
{
TOID(struct type_sec) item;
if (TOID_IS_NULL(POBJ_FIRST(pop, struct type_sec)))
return;
item = get_item_type_sec(n);
UT_ASSERT(!TOID_IS_NULL(item));
item = POBJ_NEXT(item);
UT_OUT("next id = %d", D_RO(item)->id);
}
static fn_op do_next[] = {do_next_type, do_next_type_sec};
/*
* do_cleanup -- de-initialization function
*/
static void
do_cleanup(void)
{
PMEMoid oid, oid_tmp;
POBJ_FOREACH_SAFE(pop, oid, oid_tmp)
pmemobj_free(&oid);
}
static void
test_internal_object_mask(PMEMobjpool *pop)
{
/* allocate root object */
PMEMoid root = pmemobj_root(pop, sizeof(struct type));
TX_BEGIN(pop) {
/* trigger creation of a range cache */
pmemobj_tx_add_range(root, 0, 8);
} TX_END
PMEMoid oid;
pmemobj_alloc(pop, &oid, sizeof(struct type), 0, NULL, NULL);
UT_ASSERT(!OID_IS_NULL(oid));
/* verify that there's no root object nor range cache anywhere */
for (PMEMoid iter = pmemobj_first(pop); !OID_IS_NULL(iter);
iter = pmemobj_next(iter)) {
UT_ASSERT(OID_EQUALS(iter, oid));
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_first_next");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
if ((pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
for (int i = 2; i < argc; i++) {
int list_num;
int id;
char type;
if (sscanf(argv[i], "%c:%d:%d", &type, &list_num, &id) == EOF)
UT_FATAL("!sscanf");
switch (type) {
case 'P':
do_print[list_num]();
break;
case 'a':
do_alloc[list_num](id);
break;
case 'r':
do_free[list_num](id);
break;
case 'f':
do_first[list_num]();
break;
case 'n':
do_next[list_num](id);
break;
default:
FATAL_USAGE();
}
}
do_cleanup();
test_internal_object_mask(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 6,154 | 18.664537 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_transform_win/libpmempool_transform_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool_transform_win -- a unittest for libpmempool transform.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "libpmempool_transform_win");
if (argc != 4)
UT_FATAL("usage: %s poolset_in poolset_out flags",
ut_toUTF8(argv[0]));
int ret = pmempool_transformW(argv[1], argv[2],
(unsigned)wcstoul(argv[3], NULL, 0));
if (ret)
UT_OUT("result: %d, errno: %d", ret, errno);
else
UT_OUT("result: 0");
DONEW(NULL);
}
| 647 | 18.636364 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_recovery/obj_recovery.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_recovery.c -- unit test for pool recovery
*/
#include "unittest.h"
#include "valgrind_internal.h"
#if VG_PMEMCHECK_ENABLED
#define VALGRIND_PMEMCHECK_END_TX VALGRIND_PMC_END_TX
#else
#define VALGRIND_PMEMCHECK_END_TX
#endif
POBJ_LAYOUT_BEGIN(recovery);
POBJ_LAYOUT_ROOT(recovery, struct root);
POBJ_LAYOUT_TOID(recovery, struct foo);
POBJ_LAYOUT_END(recovery);
#define MB (1 << 20)
struct foo {
int bar;
};
struct root {
PMEMmutex lock;
TOID(struct foo) foo;
char large_data[MB];
};
#define BAR_VALUE 5
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_recovery");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recovery) != 1);
if (argc != 5)
UT_FATAL("usage: %s [file] [lock: y/n] "
"[cmd: c/o] [type: n/f/s/l]",
argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
int exists = argv[3][0] == 'o';
enum { TEST_NEW, TEST_FREE, TEST_SET, TEST_LARGE } type;
if (argv[4][0] == 'n')
type = TEST_NEW;
else if (argv[4][0] == 'f')
type = TEST_FREE;
else if (argv[4][0] == 's')
type = TEST_SET;
else if (argv[4][0] == 'l')
type = TEST_LARGE;
else
UT_FATAL("invalid type");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(recovery),
0, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("failed to create pool\n");
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(recovery)))
== NULL) {
UT_FATAL("failed to open pool\n");
}
}
TOID(struct root) root = POBJ_ROOT(pop, struct root);
int lock_type = TX_PARAM_NONE;
void *lock = NULL;
if (argv[2][0] == 'y') {
lock_type = TX_PARAM_MUTEX;
lock = &D_RW(root)->lock;
}
if (type == TEST_SET) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD_FIELD(D_RW(root)->foo, bar);
D_RW(D_RW(root)->foo)->bar = BAR_VALUE * 2;
/*
* Even though flushes are not required inside
* of a transaction, this is done here to
* suppress irrelevant pmemcheck issues, because
* we exit the program before the data is
* flushed, while preserving any real ones.
*/
pmemobj_persist(pop,
&D_RW(D_RW(root)->foo)->bar,
sizeof(int));
/*
* We also need to cleanup the transaction state
* of pmemcheck.
*/
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(D_RW(D_RW(root)->foo)->bar == BAR_VALUE);
}
} else if (type == TEST_LARGE) {
if (!exists) {
TX_BEGIN(pop) {
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
exit(0);
} TX_END
} else {
UT_ASSERT(util_is_zeroed(D_RW(root)->large_data, MB));
TX_BEGIN(pop) { /* we should be able to start TX */
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
pmemobj_tx_abort(0);
} TX_END
}
} else if (type == TEST_NEW) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TOID(struct foo) f = TX_NEW(struct foo);
TX_SET(root, foo, f);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(TOID_IS_NULL(D_RW(root)->foo));
}
} else { /* TEST_FREE */
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TX_FREE(D_RW(root)->foo);
D_RW(root)->foo = TOID_NULL(struct foo);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->foo));
}
}
UT_ASSERT(pmemobj_check(path, POBJ_LAYOUT_NAME(recovery)));
pmemobj_close(pop);
DONE(NULL);
}
| 4,244 | 20.994819 | 61 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_perror/pmem2_perror.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_perror.c -- pmem2_perror unittests
*/
#include "libpmem2.h"
#include "unittest.h"
#include "out.h"
#include "config.h"
#include "source.h"
/*
* test_fail_pmem2_func_simple - simply check print message when func
* from pmem2 API fails
*/
static int
test_fail_pmem2_func_simple(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset");
return 0;
}
/*
* test_fail_pmem2_func_format - check print message when func
* from pmem2 API fails and ellipsis operator is used
*/
static int
test_fail_pmem2_func_format(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset %d", 123);
return 0;
}
/*
* test_fail_system_func_simple - check print message when directly called
* system func fails
*/
static int
test_fail_system_func_simple(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test");
return 0;
}
/*
* test_fail_system_func_format - check print message when directly called
* system func fails and ellipsis operator is used
*/
static int
test_fail_system_func_format(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func
*/
static int
test_fail_pmem2_syscall_simple(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test");
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func and ellipsis operator is used
*/
static int
test_fail_pmem2_syscall_format(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_simple_err_to_errno_check - check if conversion
* from pmem2 err value to errno works fine
*/
static int
test_simple_err_to_errno_check(const struct test_case *tc,
int argc, char *argv[])
{
int ret_errno = pmem2_err_to_errno(PMEM2_E_NOSUPP);
UT_ASSERTeq(ret_errno, ENOTSUP);
ret_errno = pmem2_err_to_errno(PMEM2_E_UNKNOWN);
UT_ASSERTeq(ret_errno, EINVAL);
ret_errno = pmem2_err_to_errno(-ENOTSUP);
UT_ASSERTeq(ret_errno, ENOTSUP);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_fail_pmem2_func_simple),
TEST_CASE(test_fail_pmem2_func_format),
TEST_CASE(test_fail_system_func_simple),
TEST_CASE(test_fail_system_func_format),
TEST_CASE(test_fail_pmem2_syscall_simple),
TEST_CASE(test_fail_pmem2_syscall_format),
TEST_CASE(test_simple_err_to_errno_check),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_perror");
util_init();
out_init("pmem2_perror", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 4,205 | 21.253968 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_perror/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
@g.require_granularity(g.ANY)
class Pmem2Perror(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem2_perror', self.test_case, stderr_file=self.stderr_file)
class TEST0(Pmem2Perror):
test_case = 'test_fail_pmem2_func_simple'
stderr_file = 'test0.log'
class TEST1(Pmem2Perror):
test_case = 'test_fail_pmem2_func_format'
stderr_file = 'test1.log'
class TEST2(Pmem2Perror):
test_case = 'test_fail_system_func_simple'
stderr_file = 'test2.log'
class TEST3(Pmem2Perror):
test_case = 'test_fail_system_func_format'
stderr_file = 'test3.log'
class TEST4(Pmem2Perror):
test_case = 'test_fail_pmem2_syscall_simple'
stderr_file = 'test4.log'
class TEST5(Pmem2Perror):
test_case = 'test_fail_pmem2_syscall_format'
stderr_file = 'test5.log'
class TEST6(Pmem2Perror):
test_case = 'test_simple_err_to_errno_check'
stderr_file = None
| 1,079 | 20.176471 | 78 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_mem_ext/pmem2_mem_ext.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_mem_ext.c -- test for low level functions from libpmem2
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "valgrind_internal.h"
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *pmemdest, int c, size_t len,
unsigned flags);
static unsigned Flags[] = {
0,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
/*
* do_memcpy_with_flag -- pmem2 memcpy with specified flag amd size
*/
static void
do_memcpy_with_flag(char *addr, size_t data_size, memcpy_fn cpy_fn, int flag)
{
char *addr2 = addr + data_size;
cpy_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memmove_with_flag -- pmem2 memmove with specified flag and size
*/
static void
do_memmove_with_flag(char *addr, size_t data_size, memmove_fn mov_fn, int flag)
{
char *addr2 = addr + data_size;
mov_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memset_with_flag -- pmem2 memset with specified flag and size
*/
static void
do_memset_with_flag(char *addr, size_t data_size, memset_fn set_fn, int flag)
{
set_fn(addr, 1, data_size, Flags[flag]);
if (Flags[flag] & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST(addr, data_size);
}
int
main(int argc, char *argv[])
{
int fd;
char *addr;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file type size flag", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_mem_ext %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
char type = argv[2][0];
size_t data_size = strtoul(argv[3], NULL, 0);
int flag = atoi(argv[4]);
UT_ASSERT(flag < ARRAY_SIZE(Flags));
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
mapped_len = pmem2_map_get_size(map);
UT_ASSERT(data_size * 2 < mapped_len);
addr = pmem2_map_get_address(map);
if (addr == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
switch (type) {
case 'C':
{
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_with_flag(addr, data_size, memcpy_fn, flag);
break;
}
case 'S':
{
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
do_memset_with_flag(addr, data_size, memset_fn, flag);
break;
}
case 'M':
{
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
do_memmove_with_flag(addr, data_size, memmove_fn, flag);
break;
}
default:
UT_FATAL("!wrong type of test %c", type);
break;
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,349 | 22.426573 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_mem_ext/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
from testframework import tools
from testframework import futils
import os
import sys
NO_FLAGS = 0
PMEM_F_MEM_NONTEMPORAL = 1
PMEM_F_MEM_TEMPORAL = 2
PMEM_F_MEM_NONTEMPORAL_v_PMEM_F_MEM_TEMPORAL = 3
PMEM_F_MEM_WC = 4
PMEM_F_MEM_WB = 5
PMEM_F_MEM_NOFLUSH = 6
'''
ALL_FLAGS = PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB
'''
ALL_FLAGS = 7
# This match is valid for all cases with a BYTE
# granularity and small (<256) data size.
MATCH_BYTE_SMALL = \
(
(NO_FLAGS, 128, "t"),
(PMEM_F_MEM_NONTEMPORAL, 128, "nt"),
(PMEM_F_MEM_TEMPORAL, 128, "t"),
(PMEM_F_MEM_NONTEMPORAL_v_PMEM_F_MEM_TEMPORAL, 128, "nt"),
(PMEM_F_MEM_WC, 128, "t"),
(PMEM_F_MEM_WB, 128, "t"),
(PMEM_F_MEM_NOFLUSH, 128, "t"),
(ALL_FLAGS, 128, "t")
)
# This match is valid for all cases with a BYTE
# granularity and big (>256) data size.
MATCH_BYTE_BIG = \
(
(NO_FLAGS, 1024, "t"),
(PMEM_F_MEM_NONTEMPORAL, 1024, "nt"),
(PMEM_F_MEM_TEMPORAL, 1024, "t"),
(PMEM_F_MEM_NONTEMPORAL_v_PMEM_F_MEM_TEMPORAL, 1024, "nt"),
(PMEM_F_MEM_WC, 1024, "t"),
(PMEM_F_MEM_WB, 1024, "t"),
(PMEM_F_MEM_NOFLUSH, 1024, "t"),
(ALL_FLAGS, 1024, "t")
)
# This match is valid for all cases with a PAGE/CACHELINE
# granularity and small (<256) data size.
MATCH_PAGE_CACHELINE_SMALL = \
(
(NO_FLAGS, 128, "t"),
(PMEM_F_MEM_NONTEMPORAL, 128, "nt"),
(PMEM_F_MEM_TEMPORAL, 128, "t"),
(PMEM_F_MEM_NONTEMPORAL_v_PMEM_F_MEM_TEMPORAL, 128, "nt"),
(PMEM_F_MEM_WC, 128, "nt"),
(PMEM_F_MEM_WB, 128, "t"),
(PMEM_F_MEM_NOFLUSH, 128, "t"),
(ALL_FLAGS, 128, "t")
)
# This match is valid for all cases with a PAGE/CACHELINE
# granularity and big (>256) data size.
MATCH_PAGE_CACHELINE_BIG = \
(
(NO_FLAGS, 1024, "nt"),
(PMEM_F_MEM_NONTEMPORAL, 1024, "nt"),
(PMEM_F_MEM_TEMPORAL, 1024, "t"),
(PMEM_F_MEM_NONTEMPORAL_v_PMEM_F_MEM_TEMPORAL, 1024, "nt"),
(PMEM_F_MEM_WC, 1024, "nt"),
(PMEM_F_MEM_WB, 1024, "t"),
(PMEM_F_MEM_NOFLUSH, 1024, "t"),
(ALL_FLAGS, 1024, "t")
)
SSE2 = 1
AVX = 2
AVX512 = 3
VARIANT_LIBC = 'libc'
VARIANT_GENERIC = 'generic'
VARIANT_SSE2 = 'sse2'
VARIANT_AVX = 'avx'
VARIANT_AVX512F = 'avx512f'
@t.require_build('debug')
@t.require_architectures('x86_64')
class Pmem2MemExt(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
available_arch = SSE2
variant = VARIANT_SSE2
# By default data size is 128 - this is smaller than threshold value (256)
# to predict usage of temporal stores. This value is overriden is some tets
# to value bigger than 256.
data_size = 128
pmem2_log = ""
oper = ("C", "M", "S")
def setup(self, ctx):
ret = tools.Tools(ctx.env, ctx.build).cpufd()
self.check_arch(ctx.variant(), ret.returncode)
def check_arch(self, variant, available_arch):
if variant == VARIANT_AVX512F:
if available_arch < AVX512:
raise futils.Skip("SKIP: AVX512F unavailable")
# remove this when MSVC we use will support AVX512F
if sys.platform.startswith('win32'):
raise futils.Skip("SKIP: AVX512F not supported by MSVC")
is_avx512f_enabled = tools.envconfig['PMEM2_AVX512F_ENABLED']
if is_avx512f_enabled == "0":
raise futils.Skip("SKIP: AVX512F disabled at build time")
if variant == VARIANT_AVX and available_arch < AVX:
raise futils.Skip("SKIP: AVX unavailable")
def check_log(self, ctx, match, type, flag):
with open(os.path.join(self.cwd, self.pmem2_log), 'r') as f:
str_val = f.read()
# count function match, only one log should occurr at the time
count = str_val.count(match)
if count != 1:
raise futils.Fail(
"Pattern: {} occurrs {} times. One expected. "
"Type: {} Flag id: {}"
.format(match, count, type, flag))
def create_match(self, variant, oper, store_type):
match = ""
if variant == VARIANT_LIBC:
if oper == "C" or oper == "M":
match = "memmove_nodrain_libc"
elif oper == "S":
match = "memset_nodrain_libc"
return match
if variant == VARIANT_GENERIC:
if oper == "C" or oper == "M":
match = "memmove_nodrain_generic"
elif oper == "S":
match = "memset_nodrain_generic"
return match
if oper in ("C", "M"):
match += "memmove_mov"
elif oper == "S":
match += "memset_mov"
else:
raise futils.Fail(
"Operation: {} not supported.".format(oper))
if store_type == "nt":
match += store_type
if variant == VARIANT_SSE2:
match += "_sse2"
elif variant == VARIANT_AVX:
match += "_avx"
else:
match += "_avx512f"
return match
def run(self, ctx):
self.pmem2_log = 'pmem2_' + str(self.testnum) + '.log'
# XXX: add support in the python framework
# enable pmem2 low level logging
ctx.env['PMEM2_LOG_FILE'] = self.pmem2_log
ctx.env['PMEM2_LOG_LEVEL'] = '15'
if ctx.wc_workaround() == 'on':
ctx.env['PMEM_WC_WORKAROUND'] = '1'
elif ctx.wc_workaround() == 'off':
ctx.env['PMEM_WC_WORKAROUND'] = '0'
if ctx.variant() == VARIANT_LIBC:
ctx.env['PMEM_NO_MOVNT'] = '1'
ctx.env['PMEM_NO_GENERIC_MEMCPY'] = '1'
elif ctx.variant() == VARIANT_GENERIC:
ctx.env['PMEM_NO_MOVNT'] = '1'
elif ctx.variant() == VARIANT_SSE2:
ctx.env['PMEM_AVX'] = '0'
ctx.env['PMEM_AVX512F'] = '0'
elif ctx.variant() == VARIANT_AVX:
ctx.env['PMEM_AVX'] = '1'
ctx.env['PMEM_AVX512F'] = '0'
elif ctx.variant() == VARIANT_AVX512F:
ctx.env['PMEM_AVX'] = '0'
ctx.env['PMEM_AVX512F'] = '1'
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
for tc in self.test_case:
for o in self.oper:
flag_id = tc[0]
size = tc[1]
store_type = tc[2]
match = self.create_match(ctx.variant(), o, store_type)
ctx.exec('pmem2_mem_ext', filepath, o, size, flag_id)
self.check_log(ctx, match, o, flag_id)
@t.add_params('variant', [VARIANT_LIBC, VARIANT_GENERIC])
@t.add_params('wc_workaround', ['default'])
class TEST0(Pmem2MemExt):
test_case = [(NO_FLAGS, 1024, "")]
@g.require_granularity(g.PAGE, g.CACHELINE)
@t.add_params('variant', [VARIANT_SSE2, VARIANT_AVX, VARIANT_AVX512F])
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST1(Pmem2MemExt):
test_case = MATCH_PAGE_CACHELINE_SMALL
@g.require_granularity(g.BYTE)
@t.add_params('variant', [VARIANT_SSE2, VARIANT_AVX, VARIANT_AVX512F])
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST2(Pmem2MemExt):
test_case = MATCH_BYTE_SMALL
@g.require_granularity(g.PAGE, g.CACHELINE)
@t.add_params('variant', [VARIANT_SSE2, VARIANT_AVX, VARIANT_AVX512F])
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST3(Pmem2MemExt):
test_case = MATCH_PAGE_CACHELINE_BIG
@g.require_granularity(g.BYTE)
@t.add_params('variant', [VARIANT_SSE2, VARIANT_AVX, VARIANT_AVX512F])
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST4(Pmem2MemExt):
test_case = MATCH_BYTE_BIG
| 8,825 | 33.885375 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_is_pmem_posix/pmem_is_pmem_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pmem_is_pmem_posix.c -- Posix specific unit test for pmem_is_pmem()
*
* usage: pmem_is_pmem_posix op addr len [op addr len ...]
* where op can be: 'a' (add), 'r' (remove), 't' (test),
* 'f' (fault_injection for util_range_register),
* 's' (fault_injection for util_range_split)
*/
#include <stdlib.h>
#include "unittest.h"
#include "mmap.h"
#include "../libpmem/pmem.h"
static enum pmem_map_type
str2type(char *str)
{
if (strcmp(str, "DEV_DAX") == 0)
return PMEM_DEV_DAX;
if (strcmp(str, "MAP_SYNC") == 0)
return PMEM_MAP_SYNC;
FATAL("unknown type '%s'", str);
}
static int
do_fault_injection_register(void *addr, size_t len, enum pmem_map_type type)
{
if (!pmem_fault_injection_enabled())
goto end;
pmem_inject_fault_at(PMEM_MALLOC, 1, "util_range_register");
int ret = util_range_register(addr, len, "", type);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
end:
return 4;
}
static int
do_fault_injection_split(void *addr, size_t len)
{
if (!pmem_fault_injection_enabled())
goto end;
pmem_inject_fault_at(PMEM_MALLOC, 1, "util_range_split");
int ret = util_range_unregister(addr, len);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
end:
return 3;
}
static int
range_add(void *addr, size_t len, const char *path, enum pmem_map_type t)
{
int ret = util_range_register(addr, len, path, t);
if (ret != 0)
UT_OUT("%s", pmem_errormsg());
return 4;
}
static int
range_add_ddax(void *addr, size_t len, const char *path, enum pmem_map_type t)
{
range_add(addr, len, path, t);
return 5;
}
static int
range_rm(void *addr, size_t len)
{
int ret = util_range_unregister(addr, len);
UT_ASSERTeq(ret, 0);
return 3;
}
static int
range_test(void *addr, size_t len)
{
UT_OUT("addr %p len %zu is_pmem %d", addr, len,
pmem_is_pmem(addr, len));
return 3;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_is_pmem_posix");
if (argc < 4)
UT_FATAL("usage: %s op addr len type [op addr len type file]",
argv[0]);
/* insert memory regions to the list */
int i;
for (i = 1; i < argc; ) {
UT_ASSERT(i + 2 < argc);
errno = 0;
void *addr = (void *)strtoull(argv[i + 1], NULL, 0);
UT_ASSERTeq(errno, 0);
size_t len = strtoull(argv[i + 2], NULL, 0);
UT_ASSERTeq(errno, 0);
switch (argv[i][0]) {
case 'a':
{
enum pmem_map_type t = str2type(argv[i + 3]);
/*
* If type is DEV_DAX we expect path to ddax
* as a third arg. Functions return number of
* consumed arguments.
*/
if (t == PMEM_DEV_DAX)
i += range_add_ddax(addr, len, argv[i + 4], t);
else
i += range_add(addr, len, "", t);
break;
}
case 'r':
i += range_rm(addr, len);
break;
case 't':
i += range_test(addr, len);
break;
case 'f':
i += do_fault_injection_register(addr, len,
str2type(argv[i + 3]));
break;
case 's':
i += do_fault_injection_split(addr, len);
break;
default:
FATAL("invalid op '%c'", argv[i][0]);
}
}
DONE(NULL);
}
| 3,051 | 18.818182 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct_volatile/obj_direct_volatile.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_direct_volatile.c -- unit test for pmemobj_direct_volatile()
*/
#include "unittest.h"
static PMEMobjpool *pop;
struct test {
PMEMvlt(int) count;
};
#define TEST_OBJECTS 100
#define TEST_WORKERS 10
static struct test *tests[TEST_OBJECTS];
static int
test_constructor(void *ptr, void *arg)
{
int *count = ptr;
util_fetch_and_add32(count, 1);
return 0;
}
static void *
test_worker(void *arg)
{
for (int i = 0; i < TEST_OBJECTS; ++i) {
int *count = pmemobj_volatile(pop, &tests[i]->count.vlt,
&tests[i]->count.value, sizeof(tests[i]->count.value),
test_constructor, NULL);
UT_ASSERTne(count, NULL);
UT_ASSERTeq(*count, 1);
}
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_direct_volatile");
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
char *path = argv[1];
pop = pmemobj_create(path, "obj_direct_volatile",
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
for (int i = 0; i < TEST_OBJECTS; ++i) {
PMEMoid oid;
pmemobj_zalloc(pop, &oid, sizeof(struct test), 1);
UT_ASSERT(!OID_IS_NULL(oid));
tests[i] = pmemobj_direct(oid);
}
os_thread_t t[TEST_WORKERS];
for (int i = 0; i < TEST_WORKERS; ++i) {
THREAD_CREATE(&t[i], NULL, test_worker, NULL);
}
for (int i = 0; i < TEST_WORKERS; ++i) {
THREAD_JOIN(&t[i], NULL);
}
for (int i = 0; i < TEST_OBJECTS; ++i) {
UT_ASSERTeq(tests[i]->count.value, 1);
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,563 | 18.55 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_parse_size/util_parse_size.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* util_parse_size.c -- unit test for parsing a size
*/
#include "unittest.h"
#include "util.h"
#include <inttypes.h>
int
main(int argc, char *argv[])
{
int ret = 0;
uint64_t size = 0;
START(argc, argv, "util_parse_size");
for (int arg = 1; arg < argc; ++arg) {
ret = util_parse_size(argv[arg], &size);
if (ret == 0) {
UT_OUT("%s - correct %"PRIu64, argv[arg], size);
} else {
UT_OUT("%s - incorrect", argv[arg]);
}
}
DONE(NULL);
}
| 543 | 16.548387 | 52 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_unmap/pmem_unmap.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmem_unmap.c -- unit tests for pmem_unmap
*/
#include "unittest.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
#define PAGE_4K (4 * KILOBYTE)
#define PAGE_2M (2 * MEGABYTE)
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_unmap");
const char *path;
unsigned long long len;
int flags;
mode_t mode;
size_t mlenp;
size_t size;
int is_pmem;
char *ret;
os_stat_t stbuf;
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
path = argv[1];
len = 0;
flags = 0;
mode = S_IWUSR | S_IRUSR;
STAT(path, &stbuf);
size = (size_t)stbuf.st_size;
UT_ASSERTeq(size, 20 * MEGABYTE);
ret = pmem_map_file(path, len, flags, mode, &mlenp, &is_pmem);
UT_ASSERTeq(pmem_unmap(ret, PAGE_4K), 0);
ret += PAGE_2M;
UT_ASSERTeq(pmem_unmap(ret, PAGE_2M), 0);
ret += PAGE_2M;
UT_ASSERTeq(pmem_unmap(ret, PAGE_2M - 1), 0);
ret += PAGE_2M;
UT_ASSERTne(pmem_unmap(ret, 0), 0);
ret += PAGE_2M - 1;
UT_ASSERTne(pmem_unmap(ret, PAGE_4K), 0);
DONE(NULL);
}
| 1,069 | 17.135593 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_util/rpmemd_util_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util_test.c -- unit tests for rpmemd_util module
*/
#include "unittest.h"
#include "rpmem_common.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "util.h"
/* structure to store results */
struct result {
int ret;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
};
/* all values to test */
static const enum rpmem_persist_method pms[] =
{RPMEM_PM_GPSPM, RPMEM_PM_APM, MAX_RPMEM_PM};
static const int is_pmems[] = {0, 1};
enum mode {
MODE_VALID,
MODE_INVALID,
MODE_MAX
};
static const int ranges[2][2][2] = {
[MODE_VALID] = {
{0, ARRAY_SIZE(pms) - 1},
{0, ARRAY_SIZE(is_pmems)}
},
[MODE_INVALID] = {
{ARRAY_SIZE(pms) - 1, ARRAY_SIZE(pms)},
{0, ARRAY_SIZE(is_pmems)}
}
};
/* expected results */
static const struct result exp_results[3][2] = {
{
/* GPSPM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* GPSPM and is_pmem == true */
{0, RPMEM_PM_GPSPM, rpmemd_pmem_persist,
pmem_memcpy_persist}
}, {
/* APM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* APM and is_pmem == true */
{0, RPMEM_PM_APM, rpmemd_flush_fatal,
pmem_memcpy_persist}
}, {
/* persistency method outside of the range */
{1, 0, 0, 0},
{1, 0, 0, 0}
}
};
static void
test_apply_pm_policy(struct result *result, int is_pmem)
{
if (rpmemd_apply_pm_policy(&result->persist_method, &result->persist,
&result->memcpy_persist, is_pmem)) {
goto err;
}
result->ret = 0;
return;
err:
result->ret = 1;
}
#define USAGE() do {\
UT_ERR("usage: %s valid|invalid", argv[0]);\
} while (0)
static void
test(const int pm_range[2], const int is_pmem_range[2])
{
rpmemd_log_level = RPD_LOG_NOTICE;
int ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
struct result result;
const struct result *exp_result;
for (int pm_ind = pm_range[0]; pm_ind < pm_range[1]; ++pm_ind) {
for (int is_pmem_ind = is_pmem_range[0];
is_pmem_ind < is_pmem_range[1]; ++is_pmem_ind) {
result.persist_method = pms[pm_ind];
exp_result = &exp_results[pm_ind][is_pmem_ind];
test_apply_pm_policy(&result, is_pmems[is_pmem_ind]);
UT_ASSERTeq(result.ret, exp_result->ret);
if (exp_result->ret == 0) {
UT_ASSERTeq(result.persist_method,
exp_result->persist_method);
UT_ASSERTeq(result.persist,
exp_result->persist);
}
}
}
rpmemd_log_close();
}
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_util");
if (argc < 2) {
USAGE();
return 1;
}
const char *mode_str = argv[1];
enum mode mode = MODE_MAX;
if (strcmp(mode_str, "valid") == 0) {
mode = MODE_VALID;
} else if (strcmp(mode_str, "invalid") == 0) {
mode = MODE_INVALID;
} else {
USAGE();
return 1;
}
UT_ASSERTne(mode, MODE_MAX);
test(ranges[mode][0], ranges[mode][1]);
DONE(NULL);
}
| 3,027 | 20.027778 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
from os import path
import testframework as t
class BASE(t.BaseTest):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile0')
ctx.exec('obj_defrag', testfile)
class TEST0(BASE):
"defrag test"
| 353 | 16.7 | 54 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag/obj_defrag.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_defrag.c -- unit test for pmemobj_defrag
*/
#include "unittest.h"
#include <limits.h>
#define OBJECT_SIZE 100
static void
defrag_basic(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
char *buff = (char *)MALLOC(OBJECT_SIZE);
memset(buff, 0xc, OBJECT_SIZE);
char *foop = (char *)pmemobj_direct(oid3);
pmemobj_memcpy_persist(pop, foop, buff, OBJECT_SIZE);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid1);
PMEMoid oid4 = oid3;
PMEMoid *oids[] = {&oid2, &oid3, &oid4};
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, 3, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, 2);
UT_ASSERTeq(result.relocated, 2);
/* the object at higher location should move into the freed oid1 pos */
foop = (char *)pmemobj_direct(oid3);
UT_ASSERT(oid3.off < oid2.off);
UT_ASSERTeq(oid3.off, oid4.off);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(buff);
}
struct test_object
{
PMEMoid a;
PMEMoid b;
PMEMoid c;
};
static void
defrag_nested_pointers(PMEMobjpool *pop)
{
int ret;
/*
* This is done so that the oids below aren't allocated literally in the
* ideal position in the heap (chunk 0, offset 0).
*/
#define EXTRA_ALLOCS 100
for (int i = 0; i < EXTRA_ALLOCS; ++i) {
PMEMoid extra;
ret = pmemobj_zalloc(pop, &extra, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
pmemobj_free(&extra);
}
#undef EXTRA_ALLOCS
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
struct test_object *oid1p = (struct test_object *)pmemobj_direct(oid1);
struct test_object *oid2p = (struct test_object *)pmemobj_direct(oid2);
struct test_object *oid3p = (struct test_object *)pmemobj_direct(oid3);
oid1p->a = OID_NULL;
oid1p->b = oid2;
oid1p->c = oid1;
pmemobj_persist(pop, oid1p, sizeof(*oid1p));
oid2p->a = oid1;
oid2p->b = OID_NULL;
oid2p->c = oid3;
pmemobj_persist(pop, oid2p, sizeof(*oid2p));
oid3p->a = oid2;
oid3p->b = oid2;
oid3p->c = oid1;
pmemobj_persist(pop, oid3p, sizeof(*oid3p));
#define OID_PTRS 12
#define EXTRA_OID_PTRS 60
#define OIDS_ALL (EXTRA_OID_PTRS + OID_PTRS)
PMEMoid **oids = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * OIDS_ALL);
PMEMoid *oid3pprs = (PMEMoid *)MALLOC(sizeof(PMEMoid) * EXTRA_OID_PTRS);
int i;
for (i = 0; i < EXTRA_OID_PTRS; ++i) {
oid3pprs[i] = oid3;
oids[i] = &oid3pprs[i];
}
oids[i + 0] = &oid1;
oids[i + 1] = &oid2;
oids[i + 2] = &oid3;
oids[i + 3] = &oid1p->a;
oids[i + 4] = &oid1p->b;
oids[i + 5] = &oid1p->c;
oids[i + 6] = &oid2p->a;
oids[i + 7] = &oid2p->b;
oids[i + 8] = &oid2p->c;
oids[i + 9] = &oid3p->a;
oids[i + 10] = &oid3p->b;
oids[i + 11] = &oid3p->c;
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, OIDS_ALL, &result);
UT_ASSERTeq(result.total, 3);
UT_ASSERTeq(result.relocated, 3);
UT_ASSERTeq(ret, 0);
oid1p = (struct test_object *)pmemobj_direct(oid1);
oid2p = (struct test_object *)pmemobj_direct(oid2);
oid3p = (struct test_object *)pmemobj_direct(oid3);
for (int i = 0; i < EXTRA_OID_PTRS; ++i) {
UT_ASSERTeq(oid3pprs[i].off, oid3.off);
}
UT_ASSERTeq(oid1p->a.off, 0);
UT_ASSERTeq(oid1p->b.off, oid2.off);
UT_ASSERTeq(oid1p->c.off, oid1.off);
UT_ASSERTeq(oid2p->a.off, oid1.off);
UT_ASSERTeq(oid2p->b.off, 0);
UT_ASSERTeq(oid2p->c.off, oid3.off);
UT_ASSERTeq(oid3p->a.off, oid2.off);
UT_ASSERTeq(oid3p->b.off, oid2.off);
UT_ASSERTeq(oid3p->c.off, oid1.off);
pmemobj_free(&oid1);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(oids);
FREE(oid3pprs);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_defrag");
const char *path = argv[1];
PMEMobjpool *pop = NULL;
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
PMEMOBJ_MIN_POOL * 2, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
defrag_basic(pop);
defrag_nested_pointers(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 4,429 | 22.817204 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_deep_flush/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
@g.no_testdir()
class PMEM2_DEEP_FLUSH(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem2_deep_flush', self.test_case)
class TEST0(PMEM2_DEEP_FLUSH):
"""test pmem2_deep_flush"""
test_case = "test_deep_flush_func"
@t.linux_only
class TEST1(PMEM2_DEEP_FLUSH):
"""test pmem2_deep_flush with mocked DAX devices"""
test_case = "test_deep_flush_func_devdax"
class TEST2(PMEM2_DEEP_FLUSH):
"""test pmem2_deep_flush with range beyond mapping"""
test_case = "test_deep_flush_range_beyond_mapping"
| 710 | 21.935484 | 57 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_deep_flush/pmem2_deep_flush.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_deep_flush.c -- unit test for pmem_deep_flush()
*
* usage: pmem2_deep_flush file deep_persist_size offset
*
* pmem2_deep_flush depending on the mapping granularity is performed using one
* of the following paths:
* - page: NOP
* - cache: pmem2_deep_flush_dax
* - byte: pmem2_persist_cpu_cache + pmem2_deep_flush_dax
*
* Where pmem2_deep_flush_dax:
* - pmem2_get_type_from_stat is used to determine a file type
* - for regular files performs pmem2_flush_file_buffers_os OR
* - for Device DAX:
* - is looking for Device DAX region (pmem2_get_region_id)
* - is constructing the region deep flush file paths
* - opens deep_flush file (os_open)
* - reads deep_flush file (read)
* - performs a write to it (write)
*
* Where pmem2_persist_cpu_cache performs:
* - flush (replaced by mock_flush) AND
* - drain (replaced by mock_drain)
*
* Additionally, for the sake of this test, the following functions are
* replaced:
* - pmem2_get_type_from_stat (to control perceived file type)
* - pmem2_flush_file_buffers_os (for counting calls)
* - pmem2_get_region_id (to prevent reading sysfs in search for non
* existing Device DAXes)
* or mocked:
* - os_open (to prevent opening non existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
* - write (for counting writes to non-existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
*
* NOTE: In normal usage the persist function precedes any call to
* pmem2_deep_flush. This test aims to validate the pmem2_deep_flush
* function and so the persist function is omitted.
*/
#include "source.h"
#ifndef _WIN32
#include <sys/sysmacros.h>
#endif
#include "mmap.h"
#include "persist.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "unittest.h"
static int n_file_buffs_flushes = 0;
static int n_fences = 0;
static int n_flushes = 0;
static int n_writes = 0;
static int n_reads = 0;
static enum pmem2_file_type *ftype_value;
static int read_invalid = 0;
static int deep_flush_not_needed = 0;
#ifndef _WIN32
#define MOCK_FD 999
#define MOCK_REG_ID 888
#define MOCK_BUS_DEVICE_PATH "/sys/bus/nd/devices/region888/deep_flush"
#define MOCK_DEV_ID 777UL
/*
* pmem2_get_region_id -- redefine libpmem2 function
*/
int
pmem2_get_region_id(const struct pmem2_source *src,
unsigned *region_id)
{
*region_id = MOCK_REG_ID;
return 0;
}
/*
* os_open -- os_open mock
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strcmp(path, MOCK_BUS_DEVICE_PATH) == 0)
return MOCK_FD;
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* write -- write mock
*/
FUNC_MOCK(write, int, int fd, const void *buffer, size_t count)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(*(char *)buffer, '1');
UT_ASSERTeq(count, 1);
UT_ASSERTeq(fd, MOCK_FD);
++n_writes;
return 1;
}
FUNC_MOCK_END
/*
* read -- read mock
*/
FUNC_MOCK(read, int, int fd, void *buffer, size_t nbytes)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(nbytes, 2);
UT_ASSERTeq(fd, MOCK_FD);
UT_OUT("mocked read, fd %d", fd);
char pattern[2] = {'1', '\n'};
int ret = sizeof(pattern);
if (deep_flush_not_needed)
pattern[0] = '0';
if (read_invalid) {
ret = 0;
goto end;
}
memcpy(buffer, pattern, sizeof(pattern));
end:
++n_reads;
return ret;
}
FUNC_MOCK_END
#endif /* not _WIN32 */
/*
* mock_flush -- count flush calls in the test
*/
static void
mock_flush(const void *addr, size_t len)
{
++n_flushes;
}
/*
* mock_drain -- count drain calls in the test
*/
static void
mock_drain(void)
{
++n_fences;
}
/*
* pmem2_arch_init -- attach flush and drain functions replacements
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
info->flush = mock_flush;
info->fence = mock_drain;
}
/*
* pmem2_map_find -- redefine libpmem2 function, redefinition is needed
* for a proper compilation of the test. NOTE: this function is not used
* in the test.
*/
struct pmem2_map *
pmem2_map_find(const void *addr, size_t len)
{
UT_ASSERT(0);
return NULL;
}
/*
* pmem2_flush_file_buffers_os -- redefine libpmem2 function
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
++n_file_buffs_flushes;
return 0;
}
/*
* map_init -- fill pmem2_map in minimal scope
*/
static void
map_init(struct pmem2_map *map)
{
const size_t length = 8 * MEGABYTE;
map->content_length = length;
/*
* The test needs to allocate more memory because some test cases
* validate behavior with address beyond mapping.
*/
map->addr = MALLOC(2 * length);
#ifndef _WIN32
map->source.type = PMEM2_SOURCE_FD;
/* mocked device ID for device DAX */
map->source.value.st_rdev = MOCK_DEV_ID;
#else
map->source.type = PMEM2_SOURCE_HANDLE;
#endif
ftype_value = &map->source.value.ftype;
}
/*
* counters_check_n_reset -- check numbers of uses of deep-flushing elements
* and reset them
*/
static void
counters_check_n_reset(int msynces, int flushes, int fences,
int writes, int reads)
{
UT_ASSERTeq(n_file_buffs_flushes, msynces);
UT_ASSERTeq(n_flushes, flushes);
UT_ASSERTeq(n_fences, fences);
UT_ASSERTeq(n_writes, writes);
UT_ASSERTeq(n_reads, reads);
n_file_buffs_flushes = 0;
n_flushes = 0;
n_fences = 0;
n_writes = 0;
n_reads = 0;
read_invalid = 0;
deep_flush_not_needed = 0;
}
/*
* test_deep_flush_func -- test pmem2_deep_flush for all granularity options
*/
static int
test_deep_flush_func(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
*ftype_value = PMEM2_FTYPE_REG;
void *addr = map.addr;
size_t len = map.content_length;
map.effective_granularity = PMEM2_GRANULARITY_PAGE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_func_devdax -- test pmem2_deep_flush with mocked DAX devices
*/
static int
test_deep_flush_func_devdax(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
void *addr = map.addr;
size_t len = map.content_length;
*ftype_value = PMEM2_FTYPE_DEVDAX;
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_range_beyond_mapping -- test pmem2_deep_flush with
* the address that goes beyond mapping
*/
static int
test_deep_flush_range_beyond_mapping(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_map map;
map_init(&map);
/* set address completely beyond mapping */
void *addr = (void *)((uintptr_t)map.addr + map.content_length);
size_t len = map.content_length;
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
/*
* set address in the middle of mapping, which makes range partially
* beyond mapping
*/
addr = (void *)((uintptr_t)map.addr + map.content_length / 2);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
FREE(map.addr);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_deep_flush_func),
TEST_CASE(test_deep_flush_func_devdax),
TEST_CASE(test_deep_flush_range_beyond_mapping),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_deep_flush");
pmem2_persist_init();
util_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 8,865 | 22.270341 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct/obj_direct_non_inline.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_direct_inline.c -- unit test for direct
*/
#define PMEMOBJ_DIRECT_NON_INLINE
#include "unittest.h"
#include "obj_direct.h"
void *
obj_direct_non_inline(PMEMoid oid)
{
UT_OUT("pmemobj_direct non-inlined");
return pmemobj_direct(oid);
}
| 332 | 17.5 | 46 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct/obj_direct.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_direct.c -- unit test for pmemobj_direct()
*/
#include "obj.h"
#include "obj_direct.h"
#include "sys_util.h"
#include "unittest.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock1;
static os_mutex_t lock2;
static os_cond_t sync_cond1;
static os_cond_t sync_cond2;
static int cond1;
static int cond2;
static PMEMoid thread_oid;
static void *
obj_direct(PMEMoid oid)
{
void *ptr1 = obj_direct_inline(oid);
void *ptr2 = obj_direct_non_inline(oid);
UT_ASSERTeq(ptr1, ptr2);
return ptr1;
}
static void *
test_worker(void *arg)
{
/* check before pool is closed, then let main continue */
UT_ASSERTne(obj_direct(thread_oid), NULL);
util_mutex_lock(&lock1);
cond1 = 1;
os_cond_signal(&sync_cond1);
util_mutex_unlock(&lock1);
/* wait for main thread to free & close, then check */
util_mutex_lock(&lock2);
while (!cond2)
os_cond_wait(&sync_cond2, &lock2);
util_mutex_unlock(&lock2);
UT_ASSERTeq(obj_direct(thread_oid), NULL);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_direct");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
util_mutex_init(&lock1);
util_mutex_init(&lock2);
util_cond_init(&sync_cond1);
util_cond_init(&sync_cond2);
cond1 = cond2 = 0;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *));
UT_ASSERTne(pops, NULL);
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(oids, NULL);
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(tmpoids, NULL);
oids[0] = OID_NULL;
UT_ASSERTeq(obj_direct(oids[0]), NULL);
for (unsigned i = 0; i < npools; ++i) {
oids[i] = (PMEMoid) {pops[i]->uuid_lo, 0};
UT_ASSERTeq(obj_direct(oids[i]), NULL);
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERTeq((char *)obj_direct(oids[i]) - off,
(char *)pops[i]);
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERTne(obj_direct(thread_oid), NULL);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the worker thread to perform the first check */
util_mutex_lock(&lock1);
while (!cond1)
os_cond_wait(&sync_cond1, &lock1);
util_mutex_unlock(&lock1);
for (unsigned i = 0; i < npools; ++i) {
UT_ASSERTne(obj_direct(tmpoids[i]), NULL);
pmemobj_free(&tmpoids[i]);
UT_ASSERTeq(obj_direct(tmpoids[i]), NULL);
pmemobj_close(pops[i]);
UT_ASSERTeq(obj_direct(oids[i]), NULL);
}
/* signal the worker that we're free and closed */
util_mutex_lock(&lock2);
cond2 = 1;
os_cond_signal(&sync_cond2);
util_mutex_unlock(&lock2);
THREAD_JOIN(&t, NULL);
util_cond_destroy(&sync_cond1);
util_cond_destroy(&sync_cond2);
util_mutex_destroy(&lock1);
util_mutex_destroy(&lock2);
FREE(pops);
FREE(tmpoids);
FREE(oids);
DONE(NULL);
}
| 3,476 | 22.653061 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct/obj_direct_inline.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_direct_inline.c -- unit test for direct
*/
#include "unittest.h"
#include "obj_direct.h"
void *
obj_direct_inline(PMEMoid oid)
{
UT_OUT("pmemobj_direct inlined");
return pmemobj_direct(oid);
}
| 289 | 17.125 | 46 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct/obj_direct.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_direct.h -- unit test for pmemobj_direct()
*/
#ifndef OBJ_DIRECT_H
#define OBJ_DIRECT_H 1
#include "libpmemobj.h"
void *obj_direct_inline(PMEMoid oid);
void *obj_direct_non_inline(PMEMoid oid);
#endif /* OBJ_DIRECT_H */
| 316 | 18.8125 | 49 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memcheck/obj_memcheck.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(mc);
POBJ_LAYOUT_ROOT(mc, struct root);
POBJ_LAYOUT_TOID(mc, struct struct1);
POBJ_LAYOUT_END(mc);
struct struct1 {
int fld;
int dyn[];
};
struct root {
TOID(struct struct1) s1;
TOID(struct struct1) s2;
};
static void
test_memcheck_bug(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[100];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 8);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MAKE_MEM_NOACCESS(tmp, 8);
tmp[7] = 0x66;
#endif
}
static void
test_memcheck_bug2(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[1000];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 128, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 128);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 256, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 256);
/*
* This should produce warning:
* Address ... is 0 bytes inside a block of size 128 bytes freed.
* instead, it produces a warning:
* Address ... is 0 bytes after a block of size 128 freed
*/
int *data = (int *)(tmp + 256);
*data = 0x66;
#endif
}
static void
test_everything(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(mc),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
struct root *rt = D_RW(POBJ_ROOT(pop, struct root));
POBJ_ALLOC(pop, &rt->s1, struct struct1, sizeof(struct struct1),
NULL, NULL);
struct struct1 *s1 = D_RW(rt->s1);
struct struct1 *s2;
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
POBJ_FREE(&rt->s2);
/* read of uninitialized variable */
if (s1->fld)
UT_OUT("%d", 1);
/* write to freed object */
s2->fld = 7;
pmemobj_persist(pop, s2, sizeof(*s2));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
memset(s2, 0, pmemobj_alloc_usable_size(rt->s2.oid));
s2->fld = 12; /* ok */
/* invalid write */
s2->dyn[100000] = 9;
/* invalid write */
s2->dyn[1000] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1));
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 100 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 9; /* ok */
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_FREE(&rt->s2);
/* invalid write to REALLOCated and FREEd object */
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 30 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 0;
s2->dyn[29] = 29;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
POBJ_FREE(&rt->s2);
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
pmemobj_close(pop);
}
static void usage(const char *a)
{
UT_FATAL("usage: %s [m|t] file-name", a);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memcheck");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(mc) != 1);
if (argc < 2)
usage(argv[0]);
if (strcmp(argv[1], "m") == 0)
test_memcheck_bug();
else if (strcmp(argv[1], "t") == 0) {
if (argc < 3)
usage(argv[0]);
test_everything(argv[2]);
} else
usage(argv[0]);
test_memcheck_bug2();
DONE(NULL);
}
| 3,591 | 20.769697 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_extend/obj_extend.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* obj_extend.c -- pool extension tests
*
*/
#include <stddef.h>
#include "unittest.h"
#define ALLOC_SIZE (((1 << 20) * 2) - 16) /* 2 megabytes - 16 bytes (hdr) */
#define RESV_SIZE ((1 << 29) + ((1 << 20) * 8)) /* 512 + 8 megabytes */
#define FRAG 0.9
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_extend");
if (argc < 2)
UT_FATAL("usage: %s file-name [alloc-size] [opath]", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "obj_extend",
0, S_IWUSR | S_IRUSR)) == NULL) {
UT_ERR("pmemobj_create: %s", pmemobj_errormsg());
exit(0);
}
size_t alloc_size;
if (argc > 2)
alloc_size = ATOUL(argv[2]);
else
alloc_size = ALLOC_SIZE;
const char *opath = path;
if (argc > 3)
opath = argv[3];
size_t allocated = 0;
PMEMoid oid;
while (pmemobj_alloc(pop, &oid, alloc_size, 0, NULL, NULL) == 0) {
allocated += pmemobj_alloc_usable_size(oid);
}
UT_ASSERT(allocated > (RESV_SIZE * FRAG));
pmemobj_close(pop);
if ((pop = pmemobj_open(opath, "obj_extend")) != NULL) {
pmemobj_close(pop);
int result = pmemobj_check(opath, "obj_extend");
UT_ASSERTeq(result, 1);
} else {
UT_ERR("pmemobj_open: %s", pmemobj_errormsg());
}
DONE(NULL);
}
| 1,330 | 19.166667 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/obj_defrag_advanced.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* obj_defrag_advanced.c -- test for libpmemobj defragmentation feature
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include "rand.h"
#include "vgraph.h"
#include "pgraph.h"
#include "os_thread.h"
#include "unittest.h"
struct create_params_t {
uint64_t seed;
rng_t rng;
struct vgraph_params vparams;
struct pgraph_params pparams;
};
/*
* graph_create -- create a graph
* - generate an intermediate volatile graph representation
* - use the volatile graph to allocate a persistent one
*/
static void
graph_create(struct create_params_t *task, PMEMobjpool *pop, PMEMoid *oidp,
rng_t *rngp)
{
struct vgraph_t *vgraph = vgraph_new(&task->vparams, rngp);
pgraph_new(pop, oidp, vgraph, &task->pparams, rngp);
vgraph_delete(vgraph);
}
/*
* graph_defrag -- defragment the pool
* - collect pointers to all PMEMoids
* - do a sanity checks
* - call pmemobj_defrag
* - return # of relocated objects
*/
static size_t
graph_defrag(PMEMobjpool *pop, PMEMoid oid)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
/* count number of oids */
unsigned oidcnt = pgraph->nodes_num;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
oidcnt += pnode->edges_num;
}
/* create array of oid pointers */
PMEMoid **oidv = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * oidcnt);
unsigned oidi = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
oidv[oidi++] = &pgraph->nodes[i];
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
oidv[oidi++] = &pnode->edges[j];
}
}
UT_ASSERTeq(oidi, oidcnt);
/* check if all oids are valid */
for (unsigned i = 0; i < oidcnt; ++i) {
void *ptr = pmemobj_direct(*oidv[i]);
UT_ASSERTne(ptr, NULL);
}
/* check if all oids appear only once */
for (unsigned i = 0; i < oidcnt - 1; ++i) {
for (unsigned j = i + 1; j < oidcnt; ++j) {
UT_ASSERTne(oidv[i], oidv[j]);
}
}
struct pobj_defrag_result result;
int ret = pmemobj_defrag(pop, oidv, oidcnt, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, pgraph->nodes_num);
FREE(oidv);
return result.relocated;
}
/*
* graph_defrag_ntimes -- defragment the graph N times
* - where N <= max_rounds
* - it stops defrag if # of relocated objects == 0
*/
static void
graph_defrag_ntimes(PMEMobjpool *pop, PMEMoid oid, unsigned max_rounds)
{
size_t relocated;
unsigned rounds = 0;
do {
relocated = graph_defrag(pop, oid);
++rounds;
} while (relocated > 0 && rounds < max_rounds);
}
#define HAS_TO_EXIST (1)
/*
* graph_dump -- dump a graph from the pool to a text file
*/
static void
graph_dump(PMEMoid oid, const char *path, int has_exist)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
if (has_exist)
UT_ASSERTne(pgraph, NULL);
if (pgraph)
pgraph_print(pgraph, path);
}
#define FGETS_BUFF_LEN 1024
/*
* dump_compare -- compare graph dumps
* Test fails if the contents of dumps do not match
*/
static void
dump_compare(const char *path1, const char *path2)
{
FILE *dump1 = FOPEN(path1, "r");
FILE *dump2 = FOPEN(path2, "r");
char buff1[FGETS_BUFF_LEN];
char buff2[FGETS_BUFF_LEN];
char *sret1, *sret2;
do {
sret1 = fgets(buff1, FGETS_BUFF_LEN, dump1);
sret2 = fgets(buff2, FGETS_BUFF_LEN, dump2);
/* both files have to end at the same time */
if (!sret1) {
UT_ASSERTeq(sret2, NULL);
FCLOSE(dump1);
FCLOSE(dump2);
return;
}
UT_ASSERTeq(sret1, buff1);
UT_ASSERTeq(sret2, buff2);
UT_ASSERTeq(strcmp(buff1, buff2), 0);
} while (1);
}
/*
* create_params_init -- initialize create params
*/
static void
create_params_init(struct create_params_t *params)
{
params->seed = 1;
/* good enough defaults - no magic here */
params->vparams.max_nodes = 50;
params->vparams.max_edges = 10;
params->vparams.range_nodes = 10;
params->vparams.range_edges = 10;
params->vparams.min_pattern_size = 8;
params->vparams.max_pattern_size = 1024;
params->pparams.graph_copies = 10;
}
/* global state */
static struct global_t {
PMEMobjpool *pop;
} global;
/*
* PMEMobj root object structure
*/
struct root_t {
unsigned graphs_num;
PMEMoid graphs[];
};
/*
* root_size -- calculate a root object size
*/
static inline size_t
root_size(unsigned graph_num, size_t min_root_size)
{
size_t size = sizeof(struct root_t) + sizeof(PMEMoid) * graph_num;
return MAX(size, min_root_size);
}
#define QUERY_GRAPHS_NUM UINT_MAX
static struct root_t *
get_root(unsigned graphs_num, size_t min_root_size)
{
PMEMoid roid;
struct root_t *root;
if (graphs_num == QUERY_GRAPHS_NUM) {
/* allocate a root object without graphs */
roid = pmemobj_root(global.pop, root_size(0, 0));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
graphs_num = root->graphs_num;
}
UT_ASSERT(graphs_num > 0);
/* reallocate a root object with all known graphs */
roid = pmemobj_root(global.pop, root_size(graphs_num, min_root_size));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
return root;
}
/*
* parse_nonzero -- parse non-zero unsigned
*/
static void
parse_nonzero(unsigned *var, const char *arg)
{
unsigned long v = STRTOUL(arg, NULL, 10);
UT_ASSERTne(v, 0);
UT_ASSERT(v < UINT_MAX);
*var = v;
}
#define GRAPH_LAYOUT POBJ_LAYOUT_NAME(graph)
/*
* op_pool_create -- create a pool
*/
static int
op_pool_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_create(path, GRAPH_LAYOUT, 0, S_IWUSR | S_IRUSR);
if (global.pop == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
return 1;
}
/*
* op_pool_close -- close the poll
*/
static int
op_pool_close(const struct test_case *tc, int argc, char *argv[])
{
pmemobj_close(global.pop);
global.pop = NULL;
return 0;
}
/*
* op_graph_create -- create a graph
*/
static int
op_graph_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size>", tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
struct root_t *root = get_root(1, min_root_size);
randomize(cparams.seed);
/* generate a single graph */
graph_create(&cparams, global.pop, &root->graphs[0], NULL);
root->graphs_num = 1;
pmemobj_persist(global.pop, root, root_size(1, min_root_size));
return 4;
}
/*
* op_graph_dump -- dump the graph
*/
static int
op_graph_dump(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump>", tc->name);
/* parse arguments */
const char *dump = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* dump the graph before defrag */
graph_dump(root->graphs[0], dump, HAS_TO_EXIST);
return 1;
}
/*
* op_graph_defrag -- defrag the graph
*/
static int
op_graph_defrag(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <max-rounds>", tc->name);
/* parse arguments */
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[0]);
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* do the defrag */
graph_defrag_ntimes(global.pop, root->graphs[0], max_rounds);
return 1;
}
/*
* op_dump_compare -- compare dumps
*/
static int
op_dump_compare(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: %s <dump1> <dump2>", tc->name);
/* parse arguments */
const char *dump1 = argv[0];
const char *dump2 = argv[1];
dump_compare(dump1, dump2);
return 2;
}
struct create_n_defrag_params_t {
char dump1[PATH_MAX];
char dump2[PATH_MAX];
struct create_params_t cparams;
PMEMobjpool *pop;
PMEMoid *oidp;
unsigned max_rounds;
unsigned ncycles;
};
/*
* create_n_defrag_thread -- create and defrag graphs mutiple times
*/
static void *
create_n_defrag_thread(void *arg)
{
struct create_n_defrag_params_t *params =
(struct create_n_defrag_params_t *)arg;
struct create_params_t *cparams = ¶ms->cparams;
for (unsigned i = 0; i < params->ncycles; ++i) {
graph_create(cparams, global.pop, params->oidp, &cparams->rng);
graph_dump(*params->oidp, params->dump1, HAS_TO_EXIST);
graph_defrag_ntimes(params->pop, *params->oidp,
params->max_rounds);
graph_dump(*params->oidp, params->dump2, HAS_TO_EXIST);
dump_compare(params->dump1, params->dump2);
pgraph_delete(params->oidp);
}
return NULL;
}
/*
* op_graph_create_n_defrag_mt -- multi-threaded graphs creation & defrag
*/
static int
op_graph_create_n_defrag_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 8)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size> <max-defrag-rounds> <n-threads>"
"<n-create-defrag-cycles> <dump-suffix>",
tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[4]);
unsigned nthreads;
parse_nonzero(&nthreads, argv[5]);
unsigned ncycles;
parse_nonzero(&ncycles, argv[6]);
char *dump_suffix = argv[7];
struct root_t *root = get_root(nthreads, min_root_size);
root->graphs_num = nthreads;
pmemobj_persist(global.pop, root, sizeof(*root));
/* prepare threads params */
struct create_n_defrag_params_t *paramss =
(struct create_n_defrag_params_t *)MALLOC(
sizeof(*paramss) * nthreads);
for (unsigned i = 0; i < nthreads; ++i) {
struct create_n_defrag_params_t *params = ¶mss[i];
SNPRINTF(params->dump1, PATH_MAX, "dump_1_th%u_%s.log",
i, dump_suffix);
SNPRINTF(params->dump2, PATH_MAX, "dump_2_th%u_%s.log",
i, dump_suffix);
memcpy(¶ms->cparams, &cparams, sizeof(cparams));
params->cparams.seed += i;
randomize_r(¶ms->cparams.rng, params->cparams.seed);
params->pop = global.pop;
params->oidp = &root->graphs[i];
params->max_rounds = max_rounds;
params->ncycles = ncycles;
}
/* spawn threads */
os_thread_t *threads = (os_thread_t *)MALLOC(
sizeof(*threads) * nthreads);
for (unsigned i = 0; i < nthreads; ++i)
THREAD_CREATE(&threads[i], NULL, create_n_defrag_thread,
¶mss[i]);
/* join all threads */
void *ret = NULL;
for (unsigned i = 0; i < nthreads; ++i) {
THREAD_JOIN(&threads[i], &ret);
UT_ASSERTeq(ret, NULL);
}
FREE(threads);
FREE(paramss);
return 8;
}
/*
* op_pool_open -- open the pool
*/
static int
op_pool_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_open(path, GRAPH_LAYOUT);
if (global.pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
return 1;
}
/*
* op_graph_dump_all -- dump all graphs
*/
static int
op_graph_dump_all(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump-prefix>", tc->name);
/* parse arguments */
const char *dump_prefix = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
char dump[PATH_MAX];
for (unsigned i = 0; i < root->graphs_num; ++i) {
SNPRINTF(dump, PATH_MAX, "%s_%u.log", dump_prefix, i);
graph_dump(root->graphs[i], dump, HAS_TO_EXIST);
}
return 1;
}
/*
* ops -- available ops
*/
static struct test_case ops[] = {
TEST_CASE(op_pool_create),
TEST_CASE(op_pool_close),
TEST_CASE(op_graph_create),
TEST_CASE(op_graph_dump),
TEST_CASE(op_graph_defrag),
TEST_CASE(op_dump_compare),
TEST_CASE(op_graph_create_n_defrag_mt),
/* for pool validation only */
TEST_CASE(op_pool_open),
TEST_CASE(op_graph_dump_all),
};
#define NOPS ARRAY_SIZE(ops)
#define TEST_NAME "obj_defrag_advanced"
int
main(int argc, char *argv[])
{
START(argc, argv, TEST_NAME);
TEST_CASE_PROCESS(argc, argv, ops, NOPS);
DONE(NULL);
}
| 12,707 | 21.452297 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/pgraph.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pgraph.h -- persistent graph representation
*/
#ifndef OBJ_DEFRAG_ADV_PGRAPH
#define OBJ_DEFRAG_ADV_PGRAPH
#include <libpmemobj/base.h>
struct pgraph_params
{
unsigned graph_copies;
};
struct pnode_t
{
unsigned node_id;
unsigned edges_num;
size_t pattern_size;
size_t size;
PMEMoid edges[];
};
struct pgraph_t
{
unsigned nodes_num;
PMEMoid nodes[];
};
void pgraph_new(PMEMobjpool *pop, PMEMoid *oidp, struct vgraph_t *vgraph,
struct pgraph_params *params, rng_t *rngp);
void pgraph_delete(PMEMoid *oidp);
void pgraph_print(struct pgraph_t *graph, const char *dump);
#endif /* pgraph.h */
| 695 | 16.4 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
import valgrind as vg
# These tests last too long under drd
# Exceptions: test no. 2
@t.require_valgrind_disabled('drd')
class ObjDefragAdvanced(t.BaseTest):
test_type = t.Short
max_nodes = 50
max_edges = 10
graph_copies = 10
pool_size = 500 * t.MiB
max_rounds = 10
min_root_size = 0
def run(self, ctx):
ctx.require_free_space(self.pool_size)
path = ctx.create_holey_file(self.pool_size, 'testfile')
dump1 = 'dump_1_{}.log'.format(self.testnum)
dump2 = 'dump_2_{}.log'.format(self.testnum)
ctx.exec('obj_defrag_advanced',
'op_pool_create', path,
'op_graph_create', str(self.max_nodes), str(self.max_edges),
str(self.graph_copies), str(self.min_root_size),
'op_graph_dump', dump1,
'op_graph_defrag', str(self.max_rounds),
'op_graph_dump', dump2,
'op_pool_close',
'op_dump_compare', dump1, dump2)
class TEST0(ObjDefragAdvanced):
max_nodes = 5
max_edges = 5
graph_copies = 5
class TEST1(ObjDefragAdvanced):
max_nodes = 2048
max_edges = 5
graph_copies = 5
@t.require_valgrind_disabled('helgrind')
@g.require_granularity(g.CACHELINE)
class TEST2(ObjDefragAdvanced):
test_type = t.Medium
# XXX port this to the new framework
# Restore defaults
drd = vg.AUTO
max_nodes = 512
max_edges = 64
graph_copies = 5
min_root_size = 4096
@g.require_granularity(g.CACHELINE)
class ObjDefragAdvancedMt(ObjDefragAdvanced):
test_type = t.Medium
nthreads = 2
ncycles = 2
def run(self, ctx):
ctx.require_free_space(self.pool_size)
path = ctx.create_holey_file(self.pool_size, 'testfile')
ctx.exec('obj_defrag_advanced',
'op_pool_create', path,
'op_graph_create_n_defrag_mt', self.max_nodes,
self.max_edges, self.graph_copies, self.min_root_size,
self.max_rounds, self.nthreads, self.ncycles, self.testnum,
'op_pool_close')
class TEST3(ObjDefragAdvancedMt):
max_nodes = 256
max_edges = 64
graph_copies = 10
nthreads = 1
ncycles = 25
class TEST4(ObjDefragAdvancedMt):
max_nodes = 128
max_edges = 32
graph_copies = 10
nthreads = 10
ncycles = 25
# This test last too long under helgrind/memcheck/pmemcheck
@t.require_valgrind_disabled('helgrind', 'memcheck', 'pmemcheck')
class TEST5(ObjDefragAdvancedMt):
max_nodes = 256
max_edges = 32
graph_copies = 5
nthreads = 10
ncycles = 25
# a testcase designed to verify the pool content in case of fail
# class TESTX(ObjDefragAdvanced):
# def run(self, ctx):
# path = '/custom/pool/path'
# dump_prefix = 'dump'
#
# ctx.exec('obj_defrag_advanced',
# 'op_pool_open', path,
# 'op_graph_dump_all', dump_prefix,
# 'op_pool_close')
| 3,143 | 24.152 | 77 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/pgraph.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pgraph.c -- persistent graph representation
*/
#include <inttypes.h>
#include "unittest.h"
#include "vgraph.h"
#include "pgraph.h"
#define PATTERN 'g'
/*
* pnode_size -- return the entire of node size
*/
static size_t
pnode_size(unsigned edges_num, size_t pattern_size)
{
size_t node_size = sizeof(struct pnode_t);
node_size += sizeof(PMEMoid) * edges_num;
node_size += pattern_size;
return node_size;
}
/*
* pnode_init -- initialize the node
*/
static void
pnode_init(PMEMobjpool *pop, PMEMoid pnode_oid, struct vnode_t *vnode,
PMEMoid pnodes[])
{
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(pnode_oid);
pnode->node_id = vnode->node_id;
pnode->size = vnode->psize;
/* set edges */
pnode->edges_num = vnode->edges_num;
for (unsigned i = 0; i < vnode->edges_num; ++i)
pnode->edges[i] = pnodes[vnode->edges[i]];
/* initialize pattern */
pnode->pattern_size = vnode->pattern_size;
void *pattern = (void *)&pnode->edges[pnode->edges_num];
pmemobj_memset(pop, pattern, PATTERN, pnode->pattern_size,
PMEMOBJ_F_MEM_NOFLUSH);
/* persist the whole node state */
pmemobj_persist(pop, (const void *)pnode, pnode->size);
}
/*
* order_shuffle -- shuffle the nodes in graph
*/
static void
order_shuffle(unsigned *order, unsigned num, rng_t *rngp)
{
for (unsigned i = 0; i < num; ++i) {
unsigned j = rand_range(0, num, rngp);
unsigned temp = order[j];
order[j] = order[i];
order[i] = temp;
}
}
/*
* order_new -- generate the sequence of the graph nodes allocation
*/
static unsigned *
order_new(struct vgraph_t *vgraph, rng_t *rngp)
{
unsigned *order = (unsigned *)MALLOC(sizeof(unsigned)
* vgraph->nodes_num);
/* initialize id list */
for (unsigned i = 0; i < vgraph->nodes_num; ++i)
order[i] = i;
order_shuffle(order, vgraph->nodes_num, rngp);
return order;
}
/*
* pgraph_copy_new -- allocate a persistent copy of the volatile graph
*/
static PMEMoid *
pgraph_copy_new(PMEMobjpool *pop, struct vgraph_t *vgraph, rng_t *rngp)
{
/* to be returned array of PMEMoids to raw nodes allocations */
PMEMoid *nodes = (PMEMoid *)MALLOC(sizeof(PMEMoid) * vgraph->nodes_num);
/* generates random order of nodes allocation */
unsigned *order = order_new(vgraph, rngp);
/* allocate the nodes in the random order */
int ret;
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t vnode = vgraph->node[order[i]];
PMEMoid *node = &nodes[order[i]];
ret = pmemobj_alloc(pop, node, vnode.psize, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
}
FREE(order);
return nodes;
}
/*
* pgraph_copy_delete -- free copies of the graph
*/
static void
pgraph_copy_delete(PMEMoid *nodes, unsigned num)
{
for (unsigned i = 0; i < num; ++i) {
if (OID_IS_NULL(nodes[i]))
continue;
pmemobj_free(&nodes[i]);
}
FREE(nodes);
}
/*
* pgraph_size -- return the struct pgraph_t size
*/
static size_t
pgraph_size(unsigned nodes_num)
{
return sizeof(struct pgraph_t) + sizeof(PMEMoid) * nodes_num;
}
/*
* pgraph_new -- allocate a new persistent graph in such a way
* that the fragmentation is as large as possible
*/
void
pgraph_new(PMEMobjpool *pop, PMEMoid *oidp, struct vgraph_t *vgraph,
struct pgraph_params *params, rng_t *rngp)
{
int ret = pmemobj_alloc(pop, oidp, pgraph_size(vgraph->nodes_num),
0, NULL, NULL);
UT_ASSERTeq(ret, 0);
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
pgraph->nodes_num = vgraph->nodes_num;
pmemobj_persist(pop, pgraph, sizeof(*pgraph));
/* calculate size of pnodes */
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t *vnode = &vgraph->node[i];
vnode->psize = pnode_size(vnode->edges_num,
vnode->pattern_size);
}
/* prepare multiple copies of the nodes */
unsigned copies_num = rand_range(1, params->graph_copies, rngp);
PMEMoid **copies = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * copies_num);
for (unsigned i = 0; i < copies_num; ++i)
copies[i] = pgraph_copy_new(pop, vgraph, rngp);
/* peek exactly the one copy of each node */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
unsigned copy_id = rand_range(0, copies_num, rngp);
pgraph->nodes[i] = copies[copy_id][i];
copies[copy_id][i] = OID_NULL;
}
pmemobj_persist(pop, pgraph->nodes,
sizeof(PMEMoid) * pgraph->nodes_num);
/* free unused copies of the nodes */
for (unsigned i = 0; i < copies_num; ++i)
pgraph_copy_delete(copies[i], vgraph->nodes_num);
FREE(copies);
/* initialize pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pnode_init(pop, pgraph->nodes[i], &vgraph->node[i],
pgraph->nodes);
}
/*
* pgraph_delete -- free the persistent graph
*/
void
pgraph_delete(PMEMoid *oidp)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
/* free pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pmemobj_free(&pgraph->nodes[i]);
pmemobj_free(oidp);
}
/*
* pgraph_print -- print graph in human readable format
*/
void
pgraph_print(struct pgraph_t *pgraph, const char *dump)
{
UT_ASSERTne(dump, NULL);
FILE *out = FOPEN(dump, "w");
/* print the graph statistics */
fprintf(out, "# of nodes: %u\n", pgraph->nodes_num);
uint64_t total_edges_num = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
total_edges_num += pnode->edges_num;
}
fprintf(out, "Total # of edges: %" PRIu64 "\n\n", total_edges_num);
/* print the graph itself */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
fprintf(out, "%u:", pnode->node_id);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
PMEMoid edge_oid = pnode->edges[j];
struct pnode_t *edge =
(struct pnode_t *)pmemobj_direct(edge_oid);
UT_ASSERT(edge->node_id < pgraph->nodes_num);
fprintf(out, "%u, ", edge->node_id);
}
fprintf(out, "\n");
}
FCLOSE(out);
}
| 6,058 | 23.934156 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/vgraph.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.c -- volatile graph representation
*/
#include <stdlib.h>
#include <stdio.h>
#include "rand.h"
#include "unittest.h"
#include "vgraph.h"
/*
* rand_range -- generate pseudo-random number from given interval [min, max]
*/
unsigned
rand_range(unsigned min, unsigned max, rng_t *rngp)
{
if (min == max)
return min;
if (min > max)
UT_FATAL("!rand_range");
unsigned ret;
if (rngp)
ret = (unsigned)rnd64_r(rngp);
else
ret = (unsigned)rnd64();
return ((unsigned)ret % (max - min)) + min;
}
/*
* vnode_new -- allocate a new volatile node
*/
static void
vnode_new(struct vnode_t *node, unsigned v, struct vgraph_params *params,
rng_t *rngp)
{
unsigned min_edges = 1;
if (params->max_edges > params->range_edges)
min_edges = params->max_edges - params->range_edges;
unsigned edges_num = rand_range(min_edges, params->max_edges, rngp);
node->node_id = v;
node->edges_num = edges_num;
node->edges = (unsigned *)MALLOC(sizeof(int) * edges_num);
node->pattern_size = rand_range(params->min_pattern_size,
params->max_pattern_size, rngp);
}
/*
* vnode_delete -- free a volatile node
*/
static void
vnode_delete(struct vnode_t *node)
{
FREE(node->edges);
}
/*
* vgraph_get_node -- return node in graph based on given id_node
*/
static struct vnode_t *
vgraph_get_node(struct vgraph_t *graph, unsigned id_node)
{
struct vnode_t *node;
node = &graph->node[id_node];
return node;
}
/*
* vgraph_add_edges -- randomly assign destination nodes to the edges
*/
static void
vgraph_add_edges(struct vgraph_t *graph, rng_t *rngp)
{
unsigned nodes_count = 0;
unsigned edges_count = 0;
struct vnode_t *node;
for (nodes_count = 0; nodes_count < graph->nodes_num; nodes_count++) {
node = vgraph_get_node(graph, nodes_count);
unsigned edges_num = node->edges_num;
for (edges_count = 0; edges_count < edges_num; edges_count++) {
unsigned node_link =
rand_range(0, graph->nodes_num, rngp);
node->edges[edges_count] = node_link;
}
}
}
/*
* vgraph_new -- allocate a new volatile graph
*/
struct vgraph_t *
vgraph_new(struct vgraph_params *params, rng_t *rngp)
{
unsigned min_nodes = 1;
if (params->max_nodes > params->range_nodes)
min_nodes = params->max_nodes - params->range_nodes;
unsigned nodes_num = rand_range(min_nodes, params->max_nodes, rngp);
struct vgraph_t *graph =
(struct vgraph_t *)MALLOC(sizeof(struct vgraph_t) +
sizeof(struct vnode_t) * nodes_num);
graph->nodes_num = nodes_num;
for (unsigned i = 0; i < nodes_num; i++) {
vnode_new(&graph->node[i], i, params, rngp);
}
vgraph_add_edges(graph, rngp);
return graph;
}
/*
* vgraph_delete -- free the volatile graph
*/
void
vgraph_delete(struct vgraph_t *graph)
{
for (unsigned i = 0; i < graph->nodes_num; i++)
vnode_delete(&graph->node[i]);
FREE(graph);
}
| 2,894 | 21.099237 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/vgraph.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.h -- volatile graph representation
*/
#ifndef OBJ_DEFRAG_ADV_VGRAPH
#define OBJ_DEFRAG_ADV_VGRAPH
#include "rand.h"
struct vgraph_params
{
unsigned max_nodes; /* max # of nodes per graph */
unsigned max_edges; /* max # of edges per node */
/* # of nodes is between [max_nodes - range_nodes, max_nodes] */
unsigned range_nodes;
/* # of edges is between [max_edges - range_edges, max_edges] */
unsigned range_edges;
unsigned min_pattern_size;
unsigned max_pattern_size;
};
struct vnode_t
{
unsigned node_id;
unsigned edges_num; /* # of edges starting from this node */
unsigned *edges; /* ids of nodes the edges are pointing to */
/* the persistent node attributes */
size_t pattern_size; /* size of the pattern allocated after the node */
size_t psize; /* the total size of the node */
};
struct vgraph_t
{
unsigned nodes_num;
struct vnode_t node[];
};
unsigned rand_range(unsigned min, unsigned max, rng_t *rngp);
struct vgraph_t *vgraph_new(struct vgraph_params *params, rng_t *rngp);
void vgraph_delete(struct vgraph_t *graph);
#endif
| 1,158 | 23.145833 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_badblock/util_badblock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_badblock.c -- unit test for the linux bad block API
*
*/
#include "unittest.h"
#include "util.h"
#include "out.h"
#include "set.h"
#include "badblocks.h"
#include "set_badblocks.h"
#include "fault_injection.h"
#include "file.h"
#define MIN_POOL ((size_t)(1024 * 1024 * 8)) /* 8 MiB */
#define MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
/*
* do_list -- (internal) list bad blocks in the file
*/
static void
do_list(const char *path)
{
int ret;
struct stat st;
if (os_stat(path, &st) < 0)
UT_FATAL("!stat %s", path);
struct badblocks *bbs = badblocks_new();
if (bbs == NULL)
UT_FATAL("!badblocks_new");
ret = badblocks_get(path, bbs);
if (ret)
UT_FATAL("!badblocks_get");
if (bbs->bb_cnt == 0 || bbs->bbv == NULL) {
UT_OUT("No bad blocks found.");
goto exit_free;
}
int file_type = util_file_get_type(path);
if (file_type < 0)
UT_FATAL("!Cannot read type of the file");
UT_OUT("Found %u bad block(s):", bbs->bb_cnt);
unsigned b;
for (b = 0; b < bbs->bb_cnt; b++) {
UT_OUT("%zu %zu",
/* offset is printed in 512b sectors */
bbs->bbv[b].offset >> 9,
/*
* length is printed in:
* - 512b sectors in case of DAX devices,
* - blocks in case of regular files.
*/
(file_type == TYPE_DEVDAX) ?
bbs->bbv[b].length >> 9 :
bbs->bbv[b].length / (unsigned)st.st_blksize);
}
exit_free:
badblocks_delete(bbs);
}
/*
* do_clear -- (internal) clear bad blocks in the file
*/
static void
do_clear(const char *path)
{
if (badblocks_clear_all(path))
UT_FATAL("!badblocks_clear_all: %s", path);
}
/*
* do_create -- (internal) create a pool
*/
static void
do_create(const char *path)
{
struct pool_set *set;
struct pool_attr attr;
unsigned nlanes = 1;
memset(&attr, 0, sizeof(attr));
if (util_pool_create(&set, path, 0, MIN_POOL, MIN_PART,
&attr, &nlanes, REPLICAS_ENABLED) != 0)
UT_FATAL("!util_pool_create: %s", path);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* do_open -- (internal) open a pool
*/
static void
do_open(const char *path)
{
struct pool_set *set;
const struct pool_attr attr;
unsigned nlanes = 1;
if (util_pool_open(&set, path, MIN_PART,
&attr, &nlanes, NULL, 0) != 0) {
UT_FATAL("!util_pool_open: %s", path);
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
static void
do_fault_injection(const char *path)
{
if (!core_fault_injection_enabled())
return;
core_inject_fault_at(PMEM_MALLOC, 1, "badblocks_recovery_file_alloc");
char *ret = badblocks_recovery_file_alloc(path, 0, 0);
UT_ASSERTeq(ret, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_badblock");
util_init();
out_init("UTIL_BADBLOCK", "UTIL_BADBLOCK", "", 1, 0);
if (argc < 3)
UT_FATAL("usage: %s file op:l|c|r|o", argv[0]);
const char *path = argv[1];
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
if (argv[arg][1] != '\0')
UT_FATAL(
"op must be l, c, r or o (l=list, c=clear, r=create, o=open)");
switch (argv[arg][0]) {
case 'l':
do_list(path);
break;
case 'c':
do_clear(path);
break;
case 'r':
do_create(path);
break;
case 'o':
do_open(path);
break;
case 'f':
do_fault_injection(path);
break;
default:
UT_FATAL(
"op must be l, c, r or o (l=list, c=clear, r=create, o=open)");
break;
}
}
out_fini();
DONE(NULL);
}
| 3,485 | 18.694915 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_mem/obj_mem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_mem.c -- simple test for pmemobj_memcpy, pmemobj_memmove and
* pmemobj_memset that verifies nothing blows up on pmemobj side.
* Real consistency tests are for libpmem.
*/
#include "unittest.h"
static unsigned Flags[] = {
0,
PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_NONTEMPORAL,
PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_WC,
PMEMOBJ_F_MEM_WB,
PMEMOBJ_F_MEM_NOFLUSH,
/* all possible flags */
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NOFLUSH |
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL |
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_WB,
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_mem");
if (argc != 2)
UT_FATAL("usage: %s [directory]", argv[0]);
PMEMobjpool *pop = pmemobj_create(argv[1], "obj_mem", 0,
S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!pmemobj_create");
struct root {
char c[4096];
};
struct root *r = pmemobj_direct(pmemobj_root(pop, sizeof(struct root)));
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
unsigned f = Flags[i];
pmemobj_memset(pop, &r->c[0], 0x77, 2048, f);
pmemobj_memset(pop, &r->c[2048], 0xff, 2048, f);
pmemobj_memcpy(pop, &r->c[2048 + 7], &r->c[0], 100, f);
pmemobj_memcpy(pop, &r->c[2048 + 1024], &r->c[0] + 17, 128, f);
pmemobj_memmove(pop, &r->c[125], &r->c[150], 100, f);
pmemobj_memmove(pop, &r->c[350], &r->c[325], 100, f);
if (f & PMEMOBJ_F_MEM_NOFLUSH)
pmemobj_persist(pop, r, sizeof(*r));
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,644 | 22.84058 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_valgr_simple/pmem_valgr_simple.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2016, Intel Corporation */
/*
* pmem_valgr_simple.c -- simple unit test using pmemcheck
*
* usage: pmem_valgr_simple file
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
size_t mapped_len;
char *dest;
int is_pmem;
START(argc, argv, "pmem_valgr_simple");
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
int dest_off = atoi(argv[2]);
size_t bytes = strtoul(argv[3], NULL, 0);
dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, &is_pmem);
if (dest == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
/* these will not be made persistent */
*(int *)dest = 4;
/* this will be made persistent */
uint64_t *tmp64dst = (void *)((uintptr_t)dest + 4096);
*tmp64dst = 50;
if (is_pmem) {
pmem_persist(tmp64dst, sizeof(*tmp64dst));
} else {
UT_ASSERTeq(pmem_msync(tmp64dst, sizeof(*tmp64dst)), 0);
}
uint16_t *tmp16dst = (void *)((uintptr_t)dest + 1024);
*tmp16dst = 21;
/* will appear as flushed/fenced in valgrind log */
pmem_flush(tmp16dst, sizeof(*tmp16dst));
/* shows strange behavior of memset in some cases */
memset(dest + dest_off, 0, bytes);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
DONE(NULL);
}
| 1,240 | 21.160714 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_check_version/libpmempool_check_version.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* libpmempool_check_version -- a unittest for libpmempool_check_version.
*
*/
#include "unittest.h"
#include "libpmempool.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_check_version");
UT_ASSERTne(pmempool_check_version(0, 0), NULL);
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION - 1,
PMEMPOOL_MINOR_VERSION));
if (PMEMPOOL_MINOR_VERSION > 0) {
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION - 1));
}
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION + 1,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION + 1));
DONE(NULL);
}
| 897 | 22.631579 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_mt/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# obj_tx_mt/config.sh -- test configuration
#
# Extend timeout for this test, as it may take a few minutes
# when run on a non-pmem file system.
CONF_GLOBAL_TIMEOUT='10m'
| 274 | 18.642857 | 60 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_mt/obj_tx_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_tx_mt.c -- multi-threaded test for pmemobj_tx_*
*
* It checks that objects are removed from transactions before on abort/commit
* phase.
*/
#include "unittest.h"
#include "sys_util.h"
#define THREADS 8
#define LOOPS 8
static PMEMobjpool *pop;
static PMEMoid tab;
static os_mutex_t mtx;
static void *
tx_alloc_free(void *arg)
{
volatile int locked;
for (int i = 0; i < LOOPS; ++i) {
locked = 0;
TX_BEGIN(pop) {
util_mutex_lock(&mtx);
locked = 1;
tab = pmemobj_tx_zalloc(128, 1);
} TX_ONCOMMIT {
if (locked)
util_mutex_unlock(&mtx);
} TX_ONABORT {
if (locked)
util_mutex_unlock(&mtx);
} TX_END
locked = 0;
TX_BEGIN(pop) {
util_mutex_lock(&mtx);
locked = 1;
pmemobj_tx_free(tab);
tab = OID_NULL;
} TX_ONCOMMIT {
if (locked)
util_mutex_unlock(&mtx);
} TX_ONABORT {
if (locked)
util_mutex_unlock(&mtx);
} TX_END
}
return NULL;
}
static void *
tx_snap(void *arg)
{
volatile int locked;
for (int i = 0; i < LOOPS; ++i) {
locked = 0;
TX_BEGIN(pop) {
util_mutex_lock(&mtx);
locked = 1;
if (!OID_IS_NULL(tab))
pmemobj_tx_add_range(tab, 0, 8);
} TX_ONCOMMIT {
if (locked)
util_mutex_unlock(&mtx);
} TX_ONABORT {
if (locked)
util_mutex_unlock(&mtx);
} TX_END
locked = 0;
}
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_mt");
util_mutex_init(&mtx);
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
if ((pop = pmemobj_create(argv[1], "mt", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
int i = 0;
os_thread_t *threads = MALLOC(THREADS * sizeof(threads[0]));
for (int j = 0; j < THREADS / 2; ++j) {
THREAD_CREATE(&threads[i++], NULL, tx_alloc_free, NULL);
THREAD_CREATE(&threads[i++], NULL, tx_snap, NULL);
}
while (i > 0)
THREAD_JOIN(&threads[--i], NULL);
pmemobj_close(pop);
util_mutex_destroy(&mtx);
FREE(threads);
DONE(NULL);
}
| 2,041 | 17.396396 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_mmap_dtor/win_mmap_dtor.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_mmap_dtor.c -- unit test for windows mmap destructor
*/
#include "unittest.h"
#include "os.h"
#include "win_mmap.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
unsigned long long Mmap_align;
int
main(int argc, char *argv[])
{
START(argc, argv, "win_mmap_dtor");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
SYSTEM_INFO si;
GetSystemInfo(&si);
/* set pagesize for mmap */
Mmap_align = si.dwAllocationGranularity;
const char *path = argv[1];
int fd = os_open(path, O_RDWR);
UT_ASSERTne(fd, -1);
/*
* Input file has size equal to 2MB, but the mapping is 3MB.
* In this case mmap should map whole file and reserve 1MB
* of virtual address space for remaining part of the mapping.
*/
void *addr = mmap(NULL, 3 * MEGABYTE, PROT_READ, MAP_SHARED, fd, 0);
UT_ASSERTne(addr, MAP_FAILED);
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
bytes_returned = VirtualQuery(addr, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 2 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
win_mmap_fini();
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
/*
* region size can be bigger than 1MB because there was probably
* free space after this mapping
*/
UT_ASSERTeq(basic_info.State, MEM_FREE);
DONE(NULL);
}
| 1,778 | 22.72 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_win/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_win/mocks_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_win/pmem_map_file_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pmem_map_file_win.c -- unit test for mapping persistent memory for raw access
*
* usage: pmem_map_file_win file
*/
#define _GNU_SOURCE
#include "unittest.h"
#include <stdlib.h>
#define CHECK_BYTES 4096 /* bytes to compare before/after map call */
ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
static int device_dax = 0;
/*
* parse_flags -- parse 'flags' string
*/
static int
parse_flags(const wchar_t *flags_str)
{
int ret = 0;
while (*flags_str != L'\0') {
switch (*flags_str) {
case L'0':
case L'-':
/* no flags */
break;
case L'T':
ret |= PMEM_FILE_TMPFILE;
break;
case L'S':
ret |= PMEM_FILE_SPARSE;
break;
case L'C':
ret |= PMEM_FILE_CREATE;
break;
case L'E':
ret |= PMEM_FILE_EXCL;
break;
case L'X':
/* not supported flag */
ret |= (PMEM_FILE_ALL_FLAGS + 1);
break;
case L'D':
device_dax = 1;
break;
default:
UT_FATAL("unknown flags: %c", *flags_str);
}
flags_str++;
};
return ret;
}
/*
* do_check -- check the mapping
*/
static void
do_check(int fd, void *addr, size_t mlen)
{
/* arrange to catch SEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char pat[CHECK_BYTES];
char buf[CHECK_BYTES];
/* write some pattern to the file */
memset(pat, 0x5A, CHECK_BYTES);
WRITE(fd, pat, CHECK_BYTES);
if (memcmp(pat, addr, CHECK_BYTES))
UT_OUT("first %d bytes do not match", CHECK_BYTES);
/* fill up mapped region with new pattern */
memset(pat, 0xA5, CHECK_BYTES);
memcpy(addr, pat, CHECK_BYTES);
UT_ASSERTeq(pmem_msync(addr, CHECK_BYTES), 0);
UT_ASSERTeq(pmem_unmap(addr, mlen), 0);
if (!ut_sigsetjmp(Jmp)) {
/* same memcpy from above should now fail */
memcpy(addr, pat, CHECK_BYTES);
} else {
UT_OUT("unmap successful");
}
LSEEK(fd, (os_off_t)0, SEEK_SET);
if (READ(fd, buf, CHECK_BYTES) == CHECK_BYTES) {
if (memcmp(pat, buf, CHECK_BYTES))
UT_OUT("first %d bytes do not match", CHECK_BYTES);
}
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "pmem_map_file_win");
int fd;
void *addr;
size_t mlen;
size_t *mlenp;
const wchar_t *path;
unsigned long long len;
int flags;
int mode;
int is_pmem;
int *is_pmemp;
int use_mlen;
int use_is_pmem;
if (argc < 7)
UT_FATAL("usage: %s path len flags mode use_mlen "
"use_is_pmem ...", ut_toUTF8(argv[0]));
for (int i = 1; i + 5 < argc; i += 6) {
path = argv[i];
len = wcstoull(argv[i + 1], NULL, 0);
flags = parse_flags(argv[i + 2]);
mode = wcstol(argv[i + 3], NULL, 8);
use_mlen = _wtoi(argv[i + 4]);
use_is_pmem = _wtoi(argv[i + 5]);
mlen = SIZE_MAX;
if (use_mlen)
mlenp = &mlen;
else
mlenp = NULL;
if (use_is_pmem)
is_pmemp = &is_pmem;
else
is_pmemp = NULL;
char *upath = ut_toUTF8(path);
char *uflags = ut_toUTF8(argv[i + 2]);
UT_OUT("%s %lld %s %o %d %d",
upath, len, uflags, mode, use_mlen, use_is_pmem);
free(uflags);
free(upath);
addr = pmem_map_fileW(path, len, flags, mode, mlenp, is_pmemp);
if (addr == NULL) {
UT_OUT("!pmem_map_file");
continue;
}
if (use_mlen) {
UT_ASSERTne(mlen, SIZE_MAX);
UT_OUT("mapped_len %zu", mlen);
} else {
mlen = len;
}
if (addr) {
if ((flags & PMEM_FILE_TMPFILE) == 0 && !device_dax) {
fd = WOPEN(argv[i], O_RDWR);
if (!use_mlen) {
os_stat_t stbuf;
FSTAT(fd, &stbuf);
mlen = stbuf.st_size;
}
if (fd != -1) {
do_check(fd, addr, mlen);
(void) CLOSE(fd);
} else {
UT_OUT("!cannot open file: %s",
argv[i]);
}
} else {
UT_ASSERTeq(pmem_unmap(addr, mlen), 0);
}
}
}
DONEW(NULL);
}
/*
* Since libpmem is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmem_init)
MSVC_DESTR(libpmem_fini)
| 4,071 | 18.483254 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_locks/obj_tx_locks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_locks.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "direct"
#define NUM_LOCKS 2
#define NUM_THREADS 10
#define TEST_VALUE_A 5
#define TEST_VALUE_B 10
#define TEST_VALUE_C 15
#define BEGIN_TX(pop, mutexes, rwlocks)\
TX_BEGIN_PARAM((pop), TX_PARAM_MUTEX,\
&(mutexes)[0], TX_PARAM_MUTEX, &(mutexes)[1], TX_PARAM_RWLOCK,\
&(rwlocks)[0], TX_PARAM_RWLOCK, &(rwlocks)[1], TX_PARAM_NONE)
#define BEGIN_TX_OLD(pop, mutexes, rwlocks)\
TX_BEGIN_LOCK((pop), TX_LOCK_MUTEX,\
&(mutexes)[0], TX_LOCK_MUTEX, &(mutexes)[1], TX_LOCK_RWLOCK,\
&(rwlocks)[0], TX_LOCK_RWLOCK, &(rwlocks)[1], TX_LOCK_NONE)
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
int a;
int b;
int c;
};
static PMEMobjpool *Pop;
/*
* do_tx -- (internal) thread-friendly transaction
*/
static void *
do_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_tx_old -- (internal) thread-friendly transaction, tests deprecated macros
*/
static void *
do_tx_old(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX_OLD(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_tx -- (internal) thread-friendly aborted transaction
*/
static void *
do_aborted_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_B;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_nested_tx-- (internal) thread-friendly nested transaction
*/
static void *
do_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_END
} TX_ONCOMMIT {
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_nested_tx -- (internal) thread-friendly aborted nested transaction
*/
static void *
do_aborted_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_C;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
UT_ASSERT(data->a == TEST_VALUE_A);
data->c = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
UT_ASSERT(data->b == TEST_VALUE_B);
UT_ASSERT(data->c == TEST_VALUE_C);
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->a == TEST_VALUE_B);
data->b = TEST_VALUE_A;
} TX_END
return NULL;
}
static void
run_mt_test(void *(*worker)(void *), void *arg)
{
os_thread_t thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_CREATE(&thread[i], NULL, worker, arg);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_locks");
if (argc > 3)
UT_FATAL("usage: %s <file> [m]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
int multithread = 0;
if (argc == 3) {
multithread = (argv[2][0] == 'm');
if (!multithread)
UT_FATAL("wrong test type supplied %c", argv[1][0]);
}
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
if (multithread) {
run_mt_test(do_tx, test_obj);
} else {
do_tx(test_obj);
do_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_tx, test_obj);
} else {
do_aborted_tx(test_obj);
do_aborted_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_nested_tx, test_obj);
} else {
do_nested_tx(test_obj);
do_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_nested_tx, test_obj);
} else {
do_aborted_nested_tx(test_obj);
do_aborted_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_B);
UT_ASSERT(test_obj->b == TEST_VALUE_A);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
/* test that deprecated macros still work */
UT_COMPILE_ERROR_ON((int)TX_LOCK_NONE != (int)TX_PARAM_NONE);
UT_COMPILE_ERROR_ON((int)TX_LOCK_MUTEX != (int)TX_PARAM_MUTEX);
UT_COMPILE_ERROR_ON((int)TX_LOCK_RWLOCK != (int)TX_PARAM_RWLOCK);
if (multithread) {
run_mt_test(do_tx_old, test_obj);
} else {
do_tx_old(test_obj);
do_tx_old(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
pmemobj_close(Pop);
DONE(NULL);
}
| 6,164 | 21.918216 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_recovery/blk_recovery.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_recovery.c -- unit test for pmemblk recovery
*
* usage: blk_recovery bsize file first_lba lba
*
*/
#include "unittest.h"
#include <sys/param.h>
#include "blk.h"
#include "btt_layout.h"
#include <endian.h>
static size_t Bsize;
/*
* construct -- build a buffer for writing
*/
static void
construct(unsigned char *buf)
{
static int ord = 1;
for (int i = 0; i < Bsize; i++)
buf[i] = ord;
ord++;
if (ord > 255)
ord = 1;
}
/*
* ident -- identify what a buffer holds
*/
static char *
ident(unsigned char *buf)
{
static char descr[100];
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
sprintf(descr, "{%u} TORN at byte %d", val, i);
return descr;
}
sprintf(descr, "{%u}", val);
return descr;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_recovery");
if (argc != 5 && argc != 3)
UT_FATAL("usage: %s bsize file [first_lba lba]", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
if (argc > 3) {
PMEMblkpool *handle;
if ((handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!%s: pmemblk_create", path);
UT_OUT("%s block size %zu usable blocks %zu",
argv[1], Bsize, pmemblk_nblock(handle));
/* write the first lba */
os_off_t lba = STRTOL(argv[3], NULL, 0);
unsigned char *buf = MALLOC(Bsize);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
UT_OUT("write lba %zu: %s", lba, ident(buf));
/* reach into the layout and write-protect the map */
struct btt_info *infop = (void *)((char *)handle +
roundup(sizeof(struct pmemblk), BLK_FORMAT_DATA_ALIGN));
char *mapaddr = (char *)infop + le32toh(infop->mapoff);
char *flogaddr = (char *)infop + le32toh(infop->flogoff);
UT_OUT("write-protecting map, length %zu",
(size_t)(flogaddr - mapaddr));
MPROTECT(mapaddr, (size_t)(flogaddr - mapaddr), PROT_READ);
/* map each file argument with the given map type */
lba = STRTOL(argv[4], NULL, 0);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
else
UT_FATAL("write lba %zu: %s", lba, ident(buf));
} else {
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
else
UT_OUT("%s: consistent", path);
}
DONE(NULL);
}
| 4,164 | 26.766667 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/out_err_mt/out_err_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* out_err_mt.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const char *msg)
{
UT_OUT("%s", msg);
UT_OUT("PMEM: %s", pmem_errormsg());
UT_OUT("PMEMOBJ: %s", pmemobj_errormsg());
UT_OUT("PMEMLOG: %s", pmemlog_errormsg());
UT_OUT("PMEMBLK: %s", pmemblk_errormsg());
UT_OUT("PMEMPOOL: %s", pmempool_errormsg());
}
static void
check_errors(unsigned ver)
{
int ret;
int err_need;
int err_found;
ret = sscanf(pmem_errormsg(),
"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = sscanf(pmemobj_errormsg(),
"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = sscanf(pmemlog_errormsg(),
"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = sscanf(pmemblk_errormsg(),
"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = sscanf(pmempool_errormsg(),
"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
unsigned ver = *(unsigned *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
unsigned ver[NUM_THREADS];
for (unsigned i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (unsigned i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "out_err_mt");
if (argc != 6)
UT_FATAL("usage: %s file1 file2 file3 file4 dir",
argv[0]);
print_errors("start");
PMEMobjpool *pop = pmemobj_create(argv[1], "test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_create(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_create(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors("version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors("pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors("pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors("pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, (long long)nblock + 1);
print_errors("pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {NULL, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors("pmempool_check_init");
DONE(NULL);
}
| 3,840 | 22.278788 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_api/libpmempool_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* libpmempool_test -- test of libpmempool.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "unittest.h"
/*
* Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to
* test libpmempool against various pmempool_check_args structure versions.
*/
struct pmempool_check_args_1_0 {
const char *path;
const char *backup_path;
enum pmempool_pool_type pool_type;
int flags;
};
/*
* check_pool -- check given pool
*/
static void
check_pool(struct pmempool_check_args *args, size_t args_size)
{
const char *status2str[] = {
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent",
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent",
[PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired",
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair",
[PMEMPOOL_CHECK_RESULT_ERROR] = "fatal",
};
PMEMpoolcheck *ppc = pmempool_check_init(args, args_size);
if (!ppc) {
char buff[UT_MAX_ERR_MSG];
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
UT_OUT("Error: %s", buff);
return;
}
struct pmempool_check_status *status = NULL;
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
UT_OUT("%s", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
UT_OUT("status = %s", status2str[ret]);
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(char *name)
{
UT_OUT("Usage: %s [-t <pool_type>] [-r <repair>] [-d <dry_run>] "
"[-y <always_yes>] [-f <flags>] [-a <advanced>] "
"[-b <backup_path>] <pool_path>", name);
}
/*
* set_flag -- parse the value and set the flag according to a obtained value
*/
static void
set_flag(const char *value, int *flags, int flag)
{
if (atoi(value) > 0)
*flags |= flag;
else
*flags &= ~flag;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_test");
int opt;
struct pmempool_check_args_1_0 args = {
.path = NULL,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_LOG,
.flags = PMEMPOOL_CHECK_FORMAT_STR |
PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE
};
size_t args_size = sizeof(struct pmempool_check_args_1_0);
while ((opt = getopt(argc, argv, "t:r:d:a:y:s:b:")) != -1) {
switch (opt) {
case 't':
if (strcmp(optarg, "blk") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BLK;
} else if (strcmp(optarg, "log") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_LOG;
} else if (strcmp(optarg, "obj") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_OBJ;
} else if (strcmp(optarg, "btt") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BTT;
} else {
args.pool_type =
(uint32_t)strtoul(optarg, NULL, 0);
}
break;
case 'r':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR);
break;
case 'd':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN);
break;
case 'a':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED);
break;
case 'y':
set_flag(optarg, &args.flags,
PMEMPOOL_CHECK_ALWAYS_YES);
break;
case 's':
args_size = strtoul(optarg, NULL, 0);
break;
case 'b':
args.backup_path = optarg;
break;
default:
print_usage(argv[0]);
UT_FATAL("unknown option: %c", opt);
}
}
if (optind < argc) {
args.path = argv[optind];
}
check_pool((struct pmempool_check_args *)&args, args_size);
DONE(NULL);
}
| 3,753 | 22.31677 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_nblock/blk_nblock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* blk_nblock.c -- unit test for pmemblk_nblock()
*
* usage: blk_nblock bsize:file...
*
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_nblock");
if (argc < 2)
UT_FATAL("usage: %s bsize:file...", argv[0]);
/* map each file argument with the given map type */
for (int arg = 1; arg < argc; arg++) {
char *fname;
size_t bsize = strtoul(argv[arg], &fname, 0);
if (*fname != ':')
UT_FATAL("usage: %s bsize:file...", argv[0]);
fname++;
PMEMblkpool *handle;
handle = pmemblk_create(fname, bsize, 0, S_IWUSR | S_IRUSR);
if (handle == NULL) {
UT_OUT("!%s: pmemblk_create", fname);
} else {
UT_OUT("%s: block size %zu usable blocks: %zu",
fname, bsize, pmemblk_nblock(handle));
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
int result = pmemblk_check(fname, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", fname);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent",
fname);
else {
UT_ASSERTeq(pmemblk_check(fname, bsize + 1),
-1);
UT_ASSERTeq(pmemblk_check(fname, 0), 1);
handle = pmemblk_open(fname, 0);
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
}
}
}
DONE(NULL);
}
| 1,358 | 22.431034 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_non_zero/blk_non_zero.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* blk_non_zero.c -- unit test for pmemblk_read/write/set_zero/
* set_error/inject_fault
*
* usage: blk_non_zero bsize file func operation:lba...
*
* func is 'c' or 'o' (create or open)
* operations are 'r' or 'w' or 'z' or 'e' or 't' or 'm'
*
*/
#define _GNU_SOURCE
#include <sys/param.h>
#include "unittest.h"
#include "blk.h"
static size_t Bsize;
/*
* construct -- build a buffer for writing
*/
static void
construct(unsigned char *buf)
{
static int ord = 1;
for (int i = 0; i < Bsize; i++)
buf[i] = ord;
ord++;
if (ord > 255)
ord = 1;
}
/*
* ident -- identify what a buffer holds
*/
static char *
ident(unsigned char *buf)
{
static char descr[100];
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
sprintf(descr, "{%u} TORN at byte %d", val, i);
return descr;
}
sprintf(descr, "{%u}", val);
return descr;
}
/*
* is_zeroed -- read is_zeroed flag from header
*/
static int
is_zeroed(const char *path)
{
int fd = OPEN(path, O_RDWR);
os_stat_t stbuf;
FSTAT(fd, &stbuf);
void *addr = MMAP(NULL, (size_t)stbuf.st_size, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
struct pmemblk *header = addr;
int ret = header->is_zeroed;
MUNMAP(addr, (size_t)stbuf.st_size);
CLOSE(fd);
return ret;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_non_zero");
if (argc < 5)
UT_FATAL("usage: %s bsize file func [file_size] op:lba...",
argv[0]);
int read_arg = 1;
Bsize = strtoul(argv[read_arg++], NULL, 0);
const char *path = argv[read_arg++];
PMEMblkpool *handle = NULL;
switch (*argv[read_arg++]) {
case 'c': {
size_t fsize = strtoul(argv[read_arg++], NULL, 0);
handle = pmemblk_create(path, Bsize, fsize,
S_IRUSR | S_IWUSR);
if (handle == NULL)
UT_FATAL("!%s: pmemblk_create", path);
break;
}
case 'o':
handle = pmemblk_open(path, Bsize);
if (handle == NULL)
UT_FATAL("!%s: pmemblk_open", path);
break;
default:
UT_FATAL("unrecognized command %s", argv[read_arg - 1]);
}
UT_OUT("%s block size %zu usable blocks %zu",
argv[1], Bsize, pmemblk_nblock(handle));
UT_OUT("is zeroed:\t%d", is_zeroed(path));
unsigned char *buf = MALLOC(Bsize);
if (buf == NULL)
UT_FATAL("cannot allocate buf");
/* map each file argument with the given map type */
for (; read_arg < argc; read_arg++) {
if (strchr("rwzetm", argv[read_arg][0]) == NULL ||
argv[read_arg][1] != ':')
UT_FATAL("op must be r: or w: or z: or e: or t: or m:");
os_off_t lba = STRTOL(&argv[read_arg][2], NULL, 0);
switch (argv[read_arg][0]) {
case 'r':
if (pmemblk_read(handle, buf, lba) < 0)
UT_OUT("!read lba %zu", lba);
else
UT_OUT("read lba %zu: %s", lba,
ident(buf));
break;
case 'w':
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_OUT("!write lba %zu", lba);
else
UT_OUT("write lba %zu: %s", lba,
ident(buf));
break;
case 'z':
if (pmemblk_set_zero(handle, lba) < 0)
UT_OUT("!set_zero lba %zu", lba);
else
UT_OUT("set_zero lba %zu", lba);
break;
case 'e':
if (pmemblk_set_error(handle, lba) < 0)
UT_OUT("!set_error lba %zu", lba);
else
UT_OUT("set_error lba %zu", lba);
break;
case 't':
if (!pmemblk_fault_injection_enabled())
break;
pmemblk_inject_fault_at(PMEM_MALLOC, 1, "build_rtt");
int ret = pmemblk_set_error(handle, lba);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
break;
case 'm':
if (!pmemblk_fault_injection_enabled())
break;
pmemblk_inject_fault_at(PMEM_MALLOC, 1,
"build_map_locks");
ret = pmemblk_set_error(handle, lba);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
break;
}
}
FREE(buf);
pmemblk_close(handle);
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
DONE(NULL);
}
| 4,038 | 19.296482 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_invalid/obj_tx_invalid.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_tx_invalid.c -- tests which transactional functions are available in
* which transaction stages
*/
#include <stddef.h>
#include "file.h"
#include "unittest.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(tx_invalid);
POBJ_LAYOUT_ROOT(tx_invalid, struct dummy_root);
POBJ_LAYOUT_TOID(tx_invalid, struct dummy_node);
POBJ_LAYOUT_END(tx_invalid);
struct dummy_node {
int value;
};
struct dummy_root {
TOID(struct dummy_node) node;
};
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file-name op", argv[0]);
START(argc, argv, "obj_tx_invalid %s", argv[2]);
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(tx_invalid) != 1);
PMEMobjpool *pop;
const char *path = argv[1];
int exists = util_file_exists(path);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(tx_invalid),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("!pmemobj_create %s", path);
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(tx_invalid)))
== NULL) {
UT_FATAL("!pmemobj_open %s", path);
}
}
PMEMoid oid = pmemobj_first(pop);
if (OID_IS_NULL(oid)) {
if (pmemobj_alloc(pop, &oid, 10, 1, NULL, NULL))
UT_FATAL("!pmemobj_alloc");
} else {
UT_ASSERTeq(pmemobj_type_num(oid), 1);
}
if (strcmp(argv[2], "alloc") == 0)
pmemobj_tx_alloc(10, 1);
else if (strcmp(argv[2], "alloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_alloc(10, 1);
}
else if (strcmp(argv[2], "zalloc") == 0)
pmemobj_tx_zalloc(10, 1);
else if (strcmp(argv[2], "zalloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zalloc(10, 1);
}
else if (strcmp(argv[2], "strdup") == 0)
pmemobj_tx_strdup("aaa", 1);
else if (strcmp(argv[2], "strdup-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_strdup("aaa", 1);
}
else if (strcmp(argv[2], "realloc") == 0)
pmemobj_tx_realloc(oid, 10, 1);
else if (strcmp(argv[2], "realloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_realloc(oid, 10, 1);
}
else if (strcmp(argv[2], "zrealloc") == 0)
pmemobj_tx_zrealloc(oid, 10, 1);
else if (strcmp(argv[2], "zrealloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zrealloc(oid, 10, 1);
}
else if (strcmp(argv[2], "free") == 0)
pmemobj_tx_free(oid);
else if (strcmp(argv[2], "free-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_free(oid);
}
else if (strcmp(argv[2], "add_range") == 0)
pmemobj_tx_add_range(oid, 0, 10);
else if (strcmp(argv[2], "add_range-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range(oid, 0, 10);
}
else if (strcmp(argv[2], "add_range_direct") == 0)
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
else if (strcmp(argv[2], "add_range_direct-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
}
else if (strcmp(argv[2], "abort") == 0)
pmemobj_tx_abort(ENOMEM);
else if (strcmp(argv[2], "abort-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_abort(ENOMEM);
}
else if (strcmp(argv[2], "commit") == 0)
pmemobj_tx_commit();
else if (strcmp(argv[2], "commit-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_commit();
}
else if (strcmp(argv[2], "end") == 0)
pmemobj_tx_end();
else if (strcmp(argv[2], "end-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_end();
} TX_END
} else if (strcmp(argv[2], "end-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_end();
}
else if (strcmp(argv[2], "process") == 0)
pmemobj_tx_process();
else if (strcmp(argv[2], "process-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_process();
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "process-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_process();
}
else if (strcmp(argv[2], "begin") == 0) {
TX_BEGIN(pop) {
} TX_END
} else if (strcmp(argv[2], "begin-in-work") == 0) {
TX_BEGIN(pop) {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
TX_BEGIN(pop) {
} TX_END
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,213 | 23.809735 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_rpmem_heap_state/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2017, Intel Corporation
#
#
# obj_rpmem_heap_state/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 290 | 19.785714 | 54 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_pool_lock/blk_pool_lock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* blk_pool_lock.c -- unit test which checks whether it's possible to
* simultaneously open the same blk pool
*/
#include "unittest.h"
static void
test_reopen(const char *path)
{
PMEMblkpool *blk1 = pmemblk_create(path, 4096, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!blk1)
UT_FATAL("!create");
PMEMblkpool *blk2 = pmemblk_open(path, 4096);
if (blk2)
UT_FATAL("pmemblk_open should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemblk_open failed but for unexpected reason");
pmemblk_close(blk1);
blk2 = pmemblk_open(path, 4096);
if (!blk2)
UT_FATAL("pmemobj_open should succeed after close");
pmemblk_close(blk2);
UNLINK(path);
}
#ifndef _WIN32
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
pid_t pid = fork();
PMEMblkpool *blk;
char *path = argv[1];
if (pid < 0)
UT_FATAL("fork failed");
if (pid == 0) {
/* child */
if (sleep)
usleep(sleep);
while (os_access(path, R_OK))
usleep(100 * 1000);
blk = pmemblk_open(path, 4096);
if (blk)
UT_FATAL("pmemblk_open after fork should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemblk_open after fork failed but for "
"unexpected reason");
exit(0);
}
blk = pmemblk_create(path, 4096, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!blk)
UT_FATAL("!create");
int status;
if (waitpid(pid, &status, 0) < 0)
UT_FATAL("!waitpid failed");
if (!WIFEXITED(status))
UT_FATAL("child process failed");
pmemblk_close(blk);
UNLINK(path);
}
#else
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
PMEMblkpool *blk;
if (sleep > 0)
return;
char *path = argv[1];
/* before starting the 2nd process, create a pool */
blk = pmemblk_create(path, 4096, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!blk)
UT_FATAL("!create");
/*
* "X" is pass as an additional param to the new process
* created by ut_spawnv to distinguish second process on Windows
*/
uintptr_t result = ut_spawnv(argc, argv, "X", NULL);
if (result != 0)
UT_FATAL("Create new process failed error: %d", GetLastError());
pmemblk_close(blk);
}
#endif
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_pool_lock");
if (argc < 2)
UT_FATAL("usage: %s path", argv[0]);
if (argc == 2) {
test_reopen(argv[1]);
test_open_in_different_process(argc, argv, 0);
for (unsigned i = 1; i < 100000; i *= 2)
test_open_in_different_process(argc, argv, i);
} else if (argc == 3) {
PMEMblkpool *blk;
/* 2nd arg used by windows for 2 process test */
blk = pmemblk_open(argv[1], 4096);
if (blk)
UT_FATAL("pmemblk_open after create process should "
"not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemblk_open after create process failed "
"but for unexpected reason");
}
DONE(NULL);
}
| 2,923 | 19.737589 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_addr_ext/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# rpmem_addr_ext/config.sh -- test configuration file
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=sockets
CONF_GLOBAL_RPMEM_PMETHOD=GPSPM
| 289 | 19.714286 | 53 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_addr_ext/rpmem_addr_ext.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* rpmem_addr_ext.c -- advanced unittest for invalid target formats
*/
#include "unittest.h"
#include "librpmem.h"
#include "pool_hdr.h"
#include "set.h"
#include "util.h"
#include "out.h"
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#define POOL_SIZE (8 * 1024 * 1024) /* 8 MiB */
#define NLANES 32
#define MAX_TARGET_LENGTH 256
/*
* test_prepare -- prepare test environment
*/
static void
test_prepare()
{
/*
* Till fix introduced to libfabric in pull request
* https://github.com/ofiwg/libfabric/pull/2551 misuse of errno value
* lead to SIGSEGV.
*/
errno = 0;
}
/*
* test_create -- test case for creating remote pool
*/
static int
test_create(const char *target, void *pool)
{
const char *pool_set = "invalid.poolset";
unsigned nlanes = NLANES;
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
RPMEMpool *rpp = rpmem_create(target, pool_set, pool, POOL_SIZE,
&nlanes, &pool_attr);
UT_ASSERTeq(rpp, NULL);
return 0;
}
/*
* test_open -- test case for opening remote pool
*/
static int
test_open(const char *target, void *pool)
{
const char *pool_set = "invalid.poolset";
unsigned nlanes = NLANES;
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
RPMEMpool *rpp = rpmem_open(target, pool_set, pool, POOL_SIZE, &nlanes,
&pool_attr);
UT_ASSERTeq(rpp, NULL);
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_addr_ext");
if (argc < 2)
UT_FATAL("usage: rpmem_addr_ext <targets>");
const char *targets_file_name = argv[1];
char target[MAX_TARGET_LENGTH];
void *pool = PAGEALIGNMALLOC(POOL_SIZE);
FILE *targets_file = FOPEN(targets_file_name, "r");
while (fgets(target, sizeof(target), targets_file)) {
/* assume each target has new line at the end and remove it */
target[strlen(target) - 1] = '\0';
test_prepare();
test_create(target, pool);
test_prepare();
test_open(target, pool);
}
FCLOSE(targets_file);
FREE(pool);
DONE(NULL);
}
| 2,089 | 18.351852 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush/mocks_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in pmem_has_auto_flush.c
*/
#include <fts.h>
#include "fs.h"
#include "unittest.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(open)(path, flags, mode);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(open)(path2, flags, mode);
}
FUNC_MOCK_END
struct fs {
FTS *ft;
struct fs_entry entry;
};
/*
* fs_new -- creates fs traversal instance
*/
FUNC_MOCK(fs_new, struct fs *, const char *path)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(fs_new)(path);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(fs_new)(path2);
}
FUNC_MOCK_END
/*
* os_stat -- os_stat mock to handle sysfs path
*/
FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(os_stat)(path, buf);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(os_stat)(path2, buf);
}
FUNC_MOCK_END
| 1,627 | 22.257143 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush/pmem_has_auto_flush.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmem_has_auto_flush.c -- unit test for pmem_has_auto_flush() function
*
* this test checks if function pmem_has_auto_flush handle sysfs path
* and persistence_domain file in proper way
*/
#include <string.h>
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_has_auto_flush");
if (argc != 1)
UT_FATAL("usage: %s path", argv[0]);
int ret = pmem_has_auto_flush();
UT_OUT("pmem_has_auto_flush %d", ret);
DONE(NULL);
}
| 550 | 18.678571 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/mocks_ndctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_ndctl.c -- mocked ndctl functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <sys/stat.h>
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "pmem2_badblock_mocks.h"
#define RESOURCE_ADDRESS 0x1000 /* any non-zero value */
#define UINT(ptr) (unsigned)((uintptr_t)ptr)
/* index of bad blocks */
static unsigned i_bb;
/*
* ndctl_namespace_get_mode - mock ndctl_namespace_get_mode
*/
FUNC_MOCK(ndctl_namespace_get_mode, enum ndctl_namespace_mode,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return NDCTL_NS_MODE_FSDAX;
/* raw mode */
return NDCTL_NS_MODE_RAW;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_pfn - mock ndctl_namespace_get_pfn
*/
FUNC_MOCK(ndctl_namespace_get_pfn, struct ndctl_pfn *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return (struct ndctl_pfn *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_dax - mock ndctl_namespace_get_dax
*/
FUNC_MOCK(ndctl_namespace_get_dax, struct ndctl_dax *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_REGION((uintptr_t)ndns))
/* region mode */
return (struct ndctl_dax *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_resource - mock ndctl_pfn_get_resource
*/
FUNC_MOCK(ndctl_pfn_get_resource, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_size - mock ndctl_pfn_get_size
*/
FUNC_MOCK(ndctl_pfn_get_size, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_dax_get_resource - mock ndctl_dax_get_resource
*/
FUNC_MOCK(ndctl_dax_get_resource, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_dax_get_size - mock ndctl_dax_get_size
*/
FUNC_MOCK(ndctl_dax_get_size, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_resource - mock ndctl_namespace_get_resource
*/
FUNC_MOCK(ndctl_namespace_get_resource, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_size - mock ndctl_namespace_get_size
*/
FUNC_MOCK(ndctl_namespace_get_size, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_region_get_resource - mock ndctl_region_get_resource
*/
FUNC_MOCK(ndctl_region_get_resource, unsigned long long,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_region_get_bus - mock ndctl_region_get_bus
*/
FUNC_MOCK(ndctl_region_get_bus, struct ndctl_bus *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return (struct ndctl_bus *)region;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_first_badblock - mock ndctl_namespace_get_first_badblock
*/
FUNC_MOCK(ndctl_namespace_get_first_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_next_badblock - mock ndctl_namespace_get_next_badblock
*/
FUNC_MOCK(ndctl_namespace_get_next_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_first_badblock - mock ndctl_region_get_first_badblock
*/
FUNC_MOCK(ndctl_region_get_first_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_next_badblock - mock ndctl_region_get_next_badblock
*/
FUNC_MOCK(ndctl_region_get_next_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
static struct ndctl_data {
uintptr_t bus;
unsigned long long address;
unsigned long long length;
} data;
/*
* ndctl_bus_cmd_new_ars_cap - mock ndctl_bus_cmd_new_ars_cap
*/
FUNC_MOCK(ndctl_bus_cmd_new_ars_cap, struct ndctl_cmd *,
struct ndctl_bus *bus, unsigned long long address,
unsigned long long len)
FUNC_MOCK_RUN_DEFAULT {
data.bus = (uintptr_t)bus;
data.address = address;
data.length = len;
return (struct ndctl_cmd *)&data;
}
FUNC_MOCK_END
/*
* ndctl_cmd_submit - mock ndctl_cmd_submit
*/
FUNC_MOCK(ndctl_cmd_submit, int, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_cmd_ars_cap_get_range - mock ndctl_cmd_ars_cap_get_range
*/
FUNC_MOCK(ndctl_cmd_ars_cap_get_range, int,
struct ndctl_cmd *ars_cap, struct ndctl_range *range)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_bus_cmd_new_clear_error - mock ndctl_bus_cmd_new_clear_error
*/
FUNC_MOCK(ndctl_bus_cmd_new_clear_error, struct ndctl_cmd *,
unsigned long long address,
unsigned long long len,
struct ndctl_cmd *ars_cap)
FUNC_MOCK_RUN_DEFAULT {
return ars_cap;
}
FUNC_MOCK_END
/*
* ndctl_cmd_clear_error_get_cleared - mock ndctl_cmd_clear_error_get_cleared
*/
FUNC_MOCK(ndctl_cmd_clear_error_get_cleared, unsigned long long,
struct ndctl_cmd *clear_err)
FUNC_MOCK_RUN_DEFAULT {
struct ndctl_data *pdata = (struct ndctl_data *)clear_err;
UT_OUT("ndctl_clear_error(%lu, %llu, %llu)",
pdata->bus, pdata->address, pdata->length);
return pdata->length;
}
FUNC_MOCK_END
/*
* ndctl_cmd_unref - mock ndctl_cmd_unref
*/
FUNC_MOCK(ndctl_cmd_unref, void, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
}
FUNC_MOCK_END
| 5,900 | 22.050781 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.h -- definitions for pmem2_badblock_mocks test
*/
#include "extent.h"
/* fd bits 6-8: type of device */
#define FD_REG_FILE (1 << 6) /* regular file */
#define FD_CHR_DEV (2 << 6) /* character device */
#define FD_DIRECTORY (3 << 6) /* directory */
#define FD_BLK_DEV (4 << 6) /* block device */
/* fd bits 4-5: ndctl mode */
#define MODE_NO_DEVICE (1 << 4) /* did not found any matching device */
#define MODE_NAMESPACE (2 << 4) /* namespace mode */
#define MODE_REGION (3 << 4) /* region mode */
/* fd bits 0-3: number of test */
/* masks */
#define MASK_DEVICE 0b0111000000 /* bits 6-8: device mask */
#define MASK_MODE 0b0000110000 /* bits 4-5: mode mask */
#define MASK_TEST 0b0000001111 /* bits 0-3: test mask */
/* checks */
#define IS_MODE_NO_DEVICE(x) ((x & MASK_MODE) == MODE_NO_DEVICE)
#define IS_MODE_NAMESPACE(x) ((x & MASK_MODE) == MODE_NAMESPACE)
#define IS_MODE_REGION(x) ((x & MASK_MODE) == MODE_REGION)
/* default block size: 1kB */
#define BLK_SIZE_1KB 1024
/* default size of device: 1 GiB */
#define DEV_SIZE_1GB (1024 * 1024 * 1024)
struct badblock *get_nth_hw_badblock(unsigned test, unsigned *i_bb);
int get_extents(int fd, struct extents **exts);
| 1,290 | 31.275 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/mocks_pmem2.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_pmem2.c -- mocked pmem2 functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "extent.h"
#include "source.h"
#include "pmem2_utils.h"
#include "pmem2_badblock_mocks.h"
/*
* pmem2_region_namespace - mock pmem2_region_namespace
*/
FUNC_MOCK(pmem2_region_namespace, int,
struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTne(pregion, NULL);
dev_t st_rdev = src->value.st_rdev;
*pregion = (void *)st_rdev;
if (pndns == NULL)
return 0;
UT_ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (IS_MODE_NO_DEVICE(st_rdev)) {
/* did not found any matching device */
*pndns = NULL;
return 0;
}
*pndns = (void *)st_rdev;
return 0;
}
FUNC_MOCK_END
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
FUNC_MOCK(pmem2_extents_create_get, int,
int fd, struct extents **exts)
FUNC_MOCK_RUN_DEFAULT {
return get_extents(fd, exts);
}
FUNC_MOCK_END
| 1,279 | 20.694915 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.c -- unit test for pmem2_badblock_*()
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "source.h"
#include "badblocks.h"
#include "pmem2_badblock_mocks.h"
#define BAD_BLOCKS_NUMBER 10
#define EXTENTS_NUMBER 8
#define MAX_BB_SET_STR "4"
#define MAX_BB_SET 4
#define DEFAULT_BB_SET 1
#define USAGE_MSG \
"Usage: pmem2_badblock_mocks <test_case> <file_type> <mode> [bad_blocks_set]\n"\
"Possible values of arguments:\n"\
" test_case : test_basic, test_read_clear_bb \n"\
" file_type : reg_file, chr_dev\n"\
" mode : no_device, namespace, region\n"\
" bad_blocks_set : 1-"MAX_BB_SET_STR"\n\n"
/* indexes of arguments */
enum args_t {
ARG_TEST_CASE = 1,
ARG_FILE_TYPE,
ARG_MODE,
ARG_BB_SET,
/* it always has to be the last one */
ARG_NUMBER, /* number of arguments */
};
typedef int test_fn(struct pmem2_source *src);
typedef struct badblock bad_blocks_array[BAD_BLOCKS_NUMBER];
/* HW bad blocks expressed in 512b sectors */
static bad_blocks_array hw_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 HW bad block */
{ {1, 1}, {0, 0} },
/* test #3 - 6 HW bad blocks */
{ {4, 10}, {16, 10}, {28, 2}, {32, 4}, {40, 4}, {50, 2}, {0, 0} },
/* test #4 - 7 HW bad blocks */
{ {2, 4}, {8, 2}, {12, 6}, {20, 2}, {24, 10}, {38, 4}, {46, 2}, \
{0, 0} },
};
/* file's bad blocks expressed in 512b sectors */
static bad_blocks_array file_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 file bad block */
{ {0, 2}, {0, 0} },
/* test #3 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
/* test #4 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
};
/* file's extents expressed in 512b sectors */
static struct extent files_extents[][EXTENTS_NUMBER] =
{
/* test #1 - no extents */
{ {0, 0, 0} },
/* test #2 - 1 extent */
{ {0, 0, 2}, {0, 0, 0} },
/* test #3 - 7 extents */
{ {2, 2, 4}, {8, 8, 2}, {12, 12, 6}, {20, 20, 2}, {24, 24, 10}, \
{38, 38, 4}, {46, 46, 2}, {0, 0, 0} },
/* test #4 - 6 extents */
{ {4, 4, 10}, {16, 16, 10}, {28, 28, 2}, {32, 32, 4}, {40, 40, 4}, \
{50, 50, 2}, {0, 0, 0} },
};
/*
* map_test_to_set -- map number of a test to an index of bad blocks' set
*/
static inline unsigned
map_test_to_set(unsigned test)
{
return test & MASK_TEST;
}
/*
* get_nth_typed_badblock -- get next typed badblock
*/
static struct badblock *
get_nth_typed_badblock(unsigned test, unsigned *i_bb,
bad_blocks_array bad_blocks[])
{
unsigned set = map_test_to_set(test);
struct badblock *bb = &bad_blocks[set][*i_bb];
if (bb->offset == 0 && bb->len == 0)
bb = NULL; /* no more bad blocks */
else
(*i_bb)++;
return bb;
}
/*
* get_nth_hw_badblock -- get next HW badblock
*/
struct badblock *
get_nth_hw_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, hw_bad_blocks);
}
/*
* get_nth_file_badblock -- get next file's badblock
*/
static struct badblock *
get_nth_file_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, file_bad_blocks);
}
/*
* get_nth_badblock -- get next badblock
*/
static struct badblock *
get_nth_badblock(int fd, unsigned *i_bb)
{
UT_ASSERT(fd >= 0);
if ((fd & MASK_MODE) == MODE_NO_DEVICE)
/* no matching device found */
return NULL;
switch (fd & MASK_DEVICE) {
case FD_REG_FILE: /* regular file */
return get_nth_file_badblock((unsigned)fd, i_bb);
case FD_CHR_DEV: /* character device */
return get_nth_hw_badblock((unsigned)fd, i_bb);
case FD_DIRECTORY:
case FD_BLK_DEV:
break;
}
/* no bad blocks found */
return NULL;
}
/*
* get_extents -- get file's extents
*/
int
get_extents(int fd, struct extents **exts)
{
unsigned set = map_test_to_set((unsigned)fd);
*exts = ZALLOC(sizeof(struct extents));
struct extents *pexts = *exts;
/* set block size */
pexts->blksize = BLK_SIZE_1KB;
if ((fd & MASK_DEVICE) != FD_REG_FILE) {
/* not a regular file */
return 0;
}
/* count extents (length > 0) */
while (files_extents[set][pexts->extents_count].length)
pexts->extents_count++;
/*
* It will be freed internally by libpmem2
* (pmem2_badblock_context_delete)
*/
pexts->extents = MALLOC(pexts->extents_count * sizeof(struct extent));
for (int i = 0; i < pexts->extents_count; i++) {
struct extent ext = files_extents[set][i];
uint64_t off_phy = ext.offset_physical;
uint64_t off_log = ext.offset_logical;
uint64_t len = ext.length;
/* check alignment */
UT_ASSERTeq(SEC2B(off_phy) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(off_log) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(len) % pexts->blksize, 0);
pexts->extents[i].offset_physical = SEC2B(off_phy);
pexts->extents[i].offset_logical = SEC2B(off_log);
pexts->extents[i].length = SEC2B(len);
}
return 0;
}
/*
* test_basic -- basic test
*/
static int
test_basic(struct pmem2_source *src)
{
UT_OUT("TEST: test_basic: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
ret = pmem2_badblock_next(bbctx, &bb);
pmem2_badblock_context_delete(&bbctx);
return ret;
}
/*
* test_read_clear_bb -- test reading and clearing bad blocks
*/
static int
test_read_clear_bb(struct pmem2_source *src)
{
UT_OUT("TEST: test_read_clear_bb: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
struct badblock *bb2;
unsigned i_bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
i_bb = 0;
while ((ret = pmem2_badblock_next(bbctx, &bb)) == 0) {
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTne(bb2, NULL);
UT_ASSERTeq(bb.offset, SEC2B(bb2->offset));
UT_ASSERTeq(bb.length, SEC2B(bb2->len));
ret = pmem2_badblock_clear(bbctx, &bb);
if (ret)
goto exit_free;
}
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTeq(bb2, NULL);
exit_free:
pmem2_badblock_context_delete(&bbctx);
return ret;
}
static void
parse_arguments(int argc, char *argv[], int *test, enum pmem2_file_type *ftype,
test_fn **test_func)
{
if (argc < (ARG_NUMBER - 1) || argc > ARG_NUMBER) {
UT_OUT(USAGE_MSG);
if (argc > ARG_NUMBER)
UT_FATAL("too many arguments");
else
UT_FATAL("missing required argument(s)");
}
char *test_case = argv[ARG_TEST_CASE];
char *file_type = argv[ARG_FILE_TYPE];
char *mode = argv[ARG_MODE];
*test = 0;
*test_func = NULL;
if (strcmp(test_case, "test_basic") == 0) {
*test_func = test_basic;
} else if (strcmp(test_case, "test_read_clear_bb") == 0) {
*test_func = test_read_clear_bb;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong test case: %s", test_case);
}
if (strcmp(file_type, "reg_file") == 0) {
*test |= FD_REG_FILE;
*ftype = PMEM2_FTYPE_REG;
} else if (strcmp(file_type, "chr_dev") == 0) {
*test |= FD_CHR_DEV;
*ftype = PMEM2_FTYPE_DEVDAX;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong file type: %s", file_type);
}
if (strcmp(mode, "no_device") == 0) {
*test |= MODE_NO_DEVICE;
} else if (strcmp(mode, "namespace") == 0) {
*test |= MODE_NAMESPACE;
} else if (strcmp(mode, "region") == 0) {
*test |= MODE_REGION;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong mode: %s", mode);
}
int bad_blocks_set =
(argc == 5) ? atoi(argv[ARG_BB_SET]) : DEFAULT_BB_SET;
if (bad_blocks_set >= 1 && bad_blocks_set <= MAX_BB_SET) {
*test |= (bad_blocks_set - 1);
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong bad_blocks_set: %i", bad_blocks_set);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_badblock_mocks");
/* sanity check of defines */
UT_ASSERTeq(atoi(MAX_BB_SET_STR), MAX_BB_SET);
struct pmem2_source src;
test_fn *test_func;
src.type = PMEM2_SOURCE_FD;
parse_arguments(argc, argv, &src.value.fd, &src.value.ftype,
&test_func);
src.value.st_rdev = (dev_t)src.value.fd;
int result = test_func(&src);
UT_ASSERTeq(result, PMEM2_E_NO_BAD_BLOCK_FOUND);
DONE(NULL);
}
| 8,239 | 22.815029 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
@t.linux_only
@t.require_ndctl
class BB_MOCKS_BASIC(t.Test):
"""PART #1 - basic tests"""
def run(self, ctx):
test = 'test_basic'
ctx.exec('pmem2_badblock_mocks', test, ctx.file_type(), ctx.mode())
@t.add_params('file_type', ['reg_file', 'chr_dev'])
@t.add_params('mode', ['no_device'])
class TEST0(BB_MOCKS_BASIC):
"""did not found any matching device"""
"""regular file / character device"""
pass
@t.add_params('file_type', ['reg_file'])
@t.add_params('mode', ['namespace', 'region'])
class TEST1(BB_MOCKS_BASIC):
"""regular file, namespace mode / region mode"""
pass
@t.add_params('file_type', ['chr_dev'])
@t.add_params('mode', ['region'])
class TEST2(BB_MOCKS_BASIC):
"""character device, region mode"""
pass
@t.linux_only
@t.require_ndctl
class BB_MOCKS_READ_CLEAR(t.Test):
"""PART #2 - test reading and clearing bad blocks"""
def run(self, ctx):
test = 'test_read_clear_bb'
ctx.exec('pmem2_badblock_mocks',
test, ctx.file_type(), ctx.mode(), ctx.bb())
@t.add_params('file_type', ['reg_file'])
@t.add_params('mode', ['namespace', 'region'])
@t.add_params('bb', [1, 2, 3, 4])
class TEST3(BB_MOCKS_READ_CLEAR):
"""regular file, namespace mode / region mode"""
pass
@t.add_params('file_type', ['chr_dev'])
@t.add_params('mode', ['region'])
@t.add_params('bb', [1, 2, 3, 4])
class TEST4(BB_MOCKS_READ_CLEAR):
"""character device, region mode"""
pass
| 1,591 | 23.875 | 75 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/mocks_other.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_other.c -- mocked various functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <sys/stat.h>
#include "unittest.h"
#include "out.h"
#include "pmem2_badblock_mocks.h"
/*
* fallocate -- mock fallocate
*/
FUNC_MOCK(fallocate, int,
int fd, int mode, __off_t offset, __off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("fallocate(%i, %i, %lu, %lu)", fd, mode, offset, len);
return 0;
}
FUNC_MOCK_END
/*
* fcntl -- mock fcntl
*/
FUNC_MOCK(fcntl, int,
int fildes, int cmd)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(cmd, F_GETFL);
return O_RDWR;
}
FUNC_MOCK_END
| 681 | 17.944444 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_api/pmem2_api.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_api.c -- PMEM2_API_[START|END] unittests
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup_integration.h"
/*
* map_valid -- return valid mapped pmem2_map and validate mapped memory length
*/
static struct pmem2_map *
map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size)
{
struct pmem2_map *map = NULL;
PMEM2_MAP(cfg, src, &map);
UT_ASSERTeq(pmem2_map_get_size(map), size);
return map;
}
/*
* test_pmem2_api_logs -- map O_RDWR file and do pmem2_[cpy|set|move]_fns
*/
static int
test_pmem2_api_logs(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_mem_move_cpy_set_with_map_private <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
const char *word1 = "Persistent memory...";
const char *word2 = "Nonpersistent memory";
const char *word3 = "XXXXXXXXXXXXXXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size = 0;
PMEM2_SOURCE_SIZE(src, &size);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
memcpy_fn(addr, word1, strlen(word1), 0);
UT_ASSERTeq(strcmp(addr, word1), 0);
memmove_fn(addr, word2, strlen(word2), 0);
UT_ASSERTeq(strcmp(addr, word2), 0);
memset_fn(addr, 'X', strlen(word3), 0);
UT_ASSERTeq(strcmp(addr, word3), 0);
/* cleanup after the test */
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_pmem2_api_logs),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_api");
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 2,130 | 22.94382 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_api/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import os
import testframework as t
import futils
@t.require_valgrind_enabled('pmemcheck')
class Pmem2ApiLogs(t.Test):
test_type = t.Medium
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile')
ctx.env['PMREORDER_EMIT_LOG'] = self.PMREORDER_EMIT_LOG
ctx.valgrind.add_opt('--log-stores=yes')
ctx.exec('pmem2_api', self.test_case, filepath)
log_name = 'pmemcheck{}.log'.format(self.testnum)
pmemecheck_log = os.path.join(ctx.cwd, log_name)
memmove_fn_begin_nums = futils.count(
pmemecheck_log, 'pmem2_memmove.BEGIN')
memmove_fn_end_nums = futils.count(
pmemecheck_log, 'pmem2_memmove.END')
memset_fn_begin_nums = futils.count(
pmemecheck_log, 'pmem2_memset.BEGIN')
memset_fn_end_nums = futils.count(
pmemecheck_log, 'pmem2_memset.END')
if (memmove_fn_begin_nums != self.expected_memmove_fn_nums or
memmove_fn_end_nums != self.expected_memmove_fn_nums or
memset_fn_begin_nums != self.expected_memset_fn_nums or
memset_fn_end_nums != self.expected_memset_fn_nums):
raise futils.Fail(
'Pattern: pmem2_memmove.BEGIN occurrs {} times. Expected {}.\n'
'Pattern: pmem2_memmove.END occurrs {} times. Expected {}.\n'
'Pattern: pmem2_memset.BEGIN occurrs {} times. Expected {}.\n'
'Pattern: pmem2_memset.END occurrs {} times. Expected {}.'
.format(memmove_fn_begin_nums, self.expected_memmove_fn_nums,
memmove_fn_end_nums, self.expected_memmove_fn_nums,
memset_fn_begin_nums, self.expected_memset_fn_nums,
memset_fn_end_nums, self.expected_memset_fn_nums)
)
class TEST0(Pmem2ApiLogs):
"""
test the emission of library and function names to pmemcheck stores log
"""
test_case = "test_pmem2_api_logs"
expected_memmove_fn_nums = 2
expected_memset_fn_nums = 1
PMREORDER_EMIT_LOG = '1'
class TEST1(Pmem2ApiLogs):
"""
test the emission of library and function names to pmemcheck stores log
with PMREORDER_EMIT_LOG set to '0'. Any names shouldn't be emitted.
"""
test_case = "test_pmem2_api_logs"
expected_memmove_fn_nums = 0
expected_memset_fn_nums = 0
PMREORDER_EMIT_LOG = '0'
| 2,513 | 35.434783 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_misc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd_obc_test_misc.c -- miscellaneous test cases for rpmemd_obc module
*/
#include "rpmemd_obc_test_common.h"
/*
* client_send_disconnect -- connect, send specified number of bytes and
* disconnect
*/
static void
client_send_disconnect(char *target, void *msg, size_t size)
{
struct rpmem_ssh *ssh = clnt_connect(target);
if (size)
clnt_send(ssh, msg, size);
clnt_close(ssh);
}
/*
* client_econnreset -- test case for closing connection when operation on
* server is in progress - client side
*/
int
client_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
set_rpmem_cmd("server_econnreset");
{
/*
* Connect and disconnect immediately.
*/
client_send_disconnect(target, msg, 0);
}
{
/*
* Connect, send half of a message header and close the
* connection.
*/
client_send_disconnect(target, msg,
sizeof(struct rpmem_msg_hdr) / 2);
}
{
/*
* Connect, send only a message header and close the
* connection.
*/
client_send_disconnect(target, msg,
sizeof(struct rpmem_msg_hdr));
}
{
/*
* Connect, send half of a message and close the connection.
*/
client_send_disconnect(target, msg, msg_size / 2);
}
FREE(msg);
return 1;
}
/*
* server_econnreset -- test case for closing connection when operation on
* server is in progress - server side
*/
int
server_econnreset(const struct test_case *tc, int argc, char *argv[])
{
struct rpmemd_obc *rpdc;
int ret;
rpdc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
UT_ASSERTne(rpdc, NULL);
ret = rpmemd_obc_status(rpdc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmemd_obc_process(rpdc, &REQ_CB, NULL);
UT_ASSERTne(ret, 0);
rpmemd_obc_fini(rpdc);
return 0;
}
| 2,128 | 18.897196 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_open.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_open.c -- test cases for open request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking open request message. Must be kept in sync
* with client_bad_msg_open function.
*/
#define BAD_MSG_OPEN_COUNT 11
/*
* client_bad_msg_open -- check if server detects invalid open request
* messages
*/
static void
client_bad_msg_open(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_OPEN_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_open_noresp -- send open request message and don't expect a
* response
*/
static void
client_msg_open_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_open_resp -- send open request message and expect a response
* with specified status. If status is 0, validate open request response
* message
*/
static void
client_msg_open_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_msg_open_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_open_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_OPEN_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_open_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_open(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_OPEN);
client_msg_open_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 0);
client_msg_open_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 1);
client_msg_open_resp(target, 1);
return 1;
}
| 4,105 | 21.56044 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmemd_obc_test.c -- unit test for rpmemd_obc module
*/
#include "rpmemd_obc_test_common.h"
#include "out.h"
#include "os.h"
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(server_bad_msg),
TEST_CASE(server_msg_noresp),
TEST_CASE(server_msg_resp),
TEST_CASE(client_bad_msg_hdr),
TEST_CASE(server_econnreset),
TEST_CASE(client_econnreset),
TEST_CASE(client_create),
TEST_CASE(client_open),
TEST_CASE(client_close),
TEST_CASE(client_set_attr),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_obc");
out_init("rpmemd_obc",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
rpmemd_log_close();
out_fini();
DONE(NULL);
}
| 1,104 | 19.090909 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_msg_hdr.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd_obc_test_msg_hdr.c -- test cases for message header
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking message header. Must be kept in sync with
* client_bad_msg_hdr function.
*/
#define BAD_MSG_HDR_COUNT 6
/*
* client_bad_msg_hdr -- test case for checking message header
*/
int
client_bad_msg_hdr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
for (int i = 0; i < BAD_MSG_HDR_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
struct rpmem_msg_hdr msg = MSG_HDR;
switch (i) {
case 0:
msg.size -= 1;
break;
case 1:
msg.size = 0;
break;
case 2:
msg.type = MAX_RPMEM_MSG_TYPE;
break;
case 3:
msg.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 4:
msg.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 5:
msg.type = RPMEM_MSG_TYPE_CLOSE_RESP;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_hdr(&msg);
clnt_send(ssh, &msg, sizeof(msg));
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
return 1;
}
| 1,232 | 17.681818 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_create.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_create.c -- test cases for create request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking create request message. Must be kept in sync
* with client_bad_msg_create function.
*/
#define BAD_MSG_CREATE_COUNT 11
/*
* client_bad_msg_create -- check if server detects invalid create request
* messages
*/
static void
client_bad_msg_create(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_CREATE_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_create_noresp -- send create request message and don't expect
* a response
*/
static void
client_msg_create_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_create_resp -- send create request message and expect a response
* with specified status. If status is 0, validate create request response
* message
*/
static void
client_msg_create_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_msg_create_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_create_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_CREATE_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_create_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_create -- test case for create request message - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_create(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CREATE);
client_msg_create_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 0);
client_msg_create_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 1);
client_msg_create_resp(target, 1);
return 1;
}
| 4,165 | 22.016575 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_set_attr.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* rpmemd_obc_test_set_attr.c -- test cases for set attributes request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_set_attr_noresp -- send set attributes request message and don't
* expect a response
*/
static void
client_msg_set_attr_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_set_attr_resp -- send set attributes request message and expect
* a response with specified status. If status is 0, validate set attributes
* request response message
*/
static void
client_msg_set_attr_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_msg_set_attr_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_set_attr_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_SET_ATTR_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_set_attr_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_set_attr -- test case for set attributes request message - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_SET_ATTR);
client_msg_set_attr_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 0);
client_msg_set_attr_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 1);
client_msg_set_attr_resp(target, 1);
return 1;
}
| 2,255 | 22.5 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_obc_test_common.c -- common definitions for rpmemd_obc tests
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include "os.h"
#include "rpmemd_obc_test_common.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = SNPRINTF(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
/*
* req_cb_check_req -- validate request attributes
*/
static void
req_cb_check_req(const struct rpmem_req_attr *req)
{
UT_ASSERTeq(req->nlanes, NLANES);
UT_ASSERTeq(req->pool_size, POOL_SIZE);
UT_ASSERTeq(req->provider, PROVIDER);
UT_ASSERTeq(strcmp(req->pool_desc, POOL_DESC), 0);
}
/*
* req_cb_check_pool_attr -- validate pool attributes
*/
static void
req_cb_check_pool_attr(const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_pool_attr attr = POOL_ATTR_INIT;
UT_ASSERTeq(memcmp(&attr, pool_attr, sizeof(attr)), 0);
}
/*
* req_cb_create -- callback for create request operation
*
* This function behaves according to arguments specified via
* struct req_cb_arg.
*/
static int
req_cb_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
UT_ASSERTne(arg, NULL);
UT_ASSERTne(req, NULL);
UT_ASSERTne(pool_attr, NULL);
req_cb_check_req(req);
req_cb_check_pool_attr(pool_attr);
struct req_cb_arg *args = arg;
args->types |= (1 << RPMEM_MSG_TYPE_CREATE);
int ret = args->ret;
if (args->resp) {
struct rpmem_resp_attr resp = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = PERSIST_METHOD,
.nlanes = NLANES_RESP,
};
ret = rpmemd_obc_create_resp(obc,
args->status, &resp);
}
if (args->force_ret)
ret = args->ret;
return ret;
}
/*
* req_cb_open -- callback for open request operation
*
* This function behaves according to arguments specified via
* struct req_cb_arg.
*/
static int
req_cb_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
UT_ASSERTne(arg, NULL);
UT_ASSERTne(req, NULL);
req_cb_check_req(req);
struct req_cb_arg *args = arg;
args->types |= (1 << RPMEM_MSG_TYPE_OPEN);
int ret = args->ret;
if (args->resp) {
struct rpmem_resp_attr resp = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = PERSIST_METHOD,
.nlanes = NLANES_RESP,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
ret = rpmemd_obc_open_resp(obc, args->status,
&resp, &pool_attr);
}
if (args->force_ret)
ret = args->ret;
return ret;
}
/*
* req_cb_close -- callback for close request operation
*
* This function behaves according to arguments specified via
* struct req_cb_arg.
*/
static int
req_cb_close(struct rpmemd_obc *obc, void *arg, int flags)
{
UT_ASSERTne(arg, NULL);
struct req_cb_arg *args = arg;
args->types |= (1 << RPMEM_MSG_TYPE_CLOSE);
int ret = args->ret;
if (args->resp)
ret = rpmemd_obc_close_resp(obc, args->status);
if (args->force_ret)
ret = args->ret;
return ret;
}
/*
* req_cb_set_attr -- callback for set attributes request operation
*
* This function behaves according to arguments specified via
* struct req_cb_arg.
*/
static int
req_cb_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
UT_ASSERTne(arg, NULL);
struct req_cb_arg *args = arg;
args->types |= (1 << RPMEM_MSG_TYPE_SET_ATTR);
int ret = args->ret;
if (args->resp)
ret = rpmemd_obc_set_attr_resp(obc, args->status);
if (args->force_ret)
ret = args->ret;
return ret;
}
/*
* REQ_CB -- request callbacks
*/
struct rpmemd_obc_requests REQ_CB = {
.create = req_cb_create,
.open = req_cb_open,
.close = req_cb_close,
.set_attr = req_cb_set_attr,
};
/*
* clnt_wait_disconnect -- wait for disconnection
*/
void
clnt_wait_disconnect(struct rpmem_ssh *ssh)
{
int ret;
ret = rpmem_ssh_monitor(ssh, 0);
UT_ASSERTne(ret, 1);
}
/*
* clnt_connect -- create a ssh connection with specified target
*/
struct rpmem_ssh *
clnt_connect(char *target)
{
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
struct rpmem_ssh *ssh = rpmem_ssh_open(info);
UT_ASSERTne(ssh, NULL);
rpmem_target_free(info);
return ssh;
}
/*
* clnt_close -- close client
*/
void
clnt_close(struct rpmem_ssh *ssh)
{
rpmem_ssh_close(ssh);
}
/*
* clnt_send -- send data
*/
void
clnt_send(struct rpmem_ssh *ssh, const void *buff, size_t len)
{
int ret;
ret = rpmem_ssh_send(ssh, buff, len);
UT_ASSERTeq(ret, 0);
}
/*
* clnt_recv -- receive data
*/
void
clnt_recv(struct rpmem_ssh *ssh, void *buff, size_t len)
{
int ret;
ret = rpmem_ssh_recv(ssh, buff, len);
UT_ASSERTeq(ret, 0);
}
/*
* server_msg_args -- process a message according to specified arguments
*/
static void
server_msg_args(struct rpmemd_obc *rpdc, enum conn_wait_close conn,
struct req_cb_arg *args)
{
int ret;
unsigned long long types = args->types;
args->types = 0;
ret = rpmemd_obc_process(rpdc, &REQ_CB, args);
UT_ASSERTeq(ret, args->ret);
UT_ASSERTeq(args->types, types);
if (conn == CONN_WAIT_CLOSE) {
ret = rpmemd_obc_process(rpdc, &REQ_CB, args);
UT_ASSERTeq(ret, 1);
}
rpmemd_obc_fini(rpdc);
}
/*
* server_msg_resp -- process a message of specified type, response to client
* with specific status value and return status of sending response function
*/
int
server_msg_resp(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: %s msg_type status", tc->name);
unsigned type = ATOU(argv[0]);
int status = atoi(argv[1]);
int ret;
struct rpmemd_obc *rpdc;
rpdc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
UT_ASSERTne(rpdc, NULL);
ret = rpmemd_obc_status(rpdc, 0);
UT_ASSERTeq(ret, 0);
struct req_cb_arg args = {
.ret = 0,
.force_ret = 0,
.resp = 1,
.types = (1U << type),
.status = status,
};
server_msg_args(rpdc, CONN_WAIT_CLOSE, &args);
return 2;
}
/*
* server_msg_noresp -- process a message of specified type, do not response to
* client and return specific value from process callback
*/
int
server_msg_noresp(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s msg_type", tc->name);
int type = atoi(argv[0]);
int ret;
struct rpmemd_obc *rpdc;
rpdc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
UT_ASSERTne(rpdc, NULL);
ret = rpmemd_obc_status(rpdc, 0);
UT_ASSERTeq(ret, 0);
struct req_cb_arg args = {
.ret = -1,
.force_ret = 1,
.resp = 0,
.types = (1U << type),
.status = 0,
};
server_msg_args(rpdc, CONN_CLOSE, &args);
return 1;
}
/*
* server_bad_msg -- process a message and expect
* error returned from rpmemd_obc_process function
*/
int
server_bad_msg(const struct test_case *tc, int argc, char *argv[])
{
int ret;
struct rpmemd_obc *rpdc;
rpdc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
UT_ASSERTne(rpdc, NULL);
ret = rpmemd_obc_status(rpdc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmemd_obc_process(rpdc, &REQ_CB, NULL);
UT_ASSERTne(ret, 0);
rpmemd_obc_fini(rpdc);
return 0;
}
| 7,924 | 18.329268 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/setup.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2019, Intel Corporation
#
# src/test/rpmemd_obc/setup.sh -- common setup for rpmemd_obc tests
#
require_nodes 2
require_node_log_files 1 $RPMEM_LOG_FILE
RPMEM_CMD="\"cd ${NODE_TEST_DIR[0]} && UNITTEST_FORCE_QUIET=1"
RPMEM_CMD="$RPMEM_CMD RPMEMD_LOG_FILE=$RPMEMD_LOG_FILE"
RPMEM_CMD="$RPMEM_CMD RPMEMD_LOG_LEVEL=$RPMEMD_LOG_LEVEL"
RPMEM_CMD="$RPMEM_CMD LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[0]}:$REMOTE_LD_LIBRARY_PATH"
RPMEM_CMD="$RPMEM_CMD ./rpmemd_obc$EXESUFFIX\""
export_vars_node 1 RPMEM_CMD
| 578 | 29.473684 | 89 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_close.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd_obc_test_close.c -- test cases for close request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_close_noresp -- send close request message and don't expect a
* response
*/
static void
client_msg_close_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(target);
}
/*
* client_msg_close_resp -- send close request message and expect a response
* with specified status. If status is 0, validate close request response
* message
*/
static void
client_msg_close_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_msg_close_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_close_resp(&resp);
if (status)
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
clnt_close(ssh);
FREE(target);
}
/*
* client_close -- test case for close request message - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CLOSE);
client_msg_close_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 0);
client_msg_close_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 1);
client_msg_close_resp(target, 1);
return 1;
}
| 1,791 | 21.683544 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_common.h -- common declarations for rpmemd_obc test
*/
#include "unittest.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_util.h"
#include "rpmemd_log.h"
#include "rpmemd_obc.h"
#define PORT 1234
#define RKEY 0x0123456789abcdef
#define RADDR 0xfedcba9876543210
#define PERSIST_METHOD RPMEM_PM_APM
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
#define POOL_SIZE 0x0001234567abcdef
#define NLANES 0x123
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool.set"
#define BUFF_SIZE 8192
static const char pool_desc[] = POOL_DESC;
#define POOL_DESC_SIZE (sizeof(pool_desc) / sizeof(pool_desc[0]))
struct rpmem_ssh *clnt_connect(char *target);
void clnt_wait_disconnect(struct rpmem_ssh *ssh);
void clnt_send(struct rpmem_ssh *ssh, const void *buff, size_t len);
void clnt_recv(struct rpmem_ssh *ssh, void *buff, size_t len);
void clnt_close(struct rpmem_ssh *ssh);
enum conn_wait_close {
CONN_CLOSE,
CONN_WAIT_CLOSE,
};
void set_rpmem_cmd(const char *fmt, ...);
extern struct rpmemd_obc_requests REQ_CB;
struct req_cb_arg {
int resp;
unsigned long long types;
int force_ret;
int ret;
int status;
};
static const struct rpmem_msg_hdr MSG_HDR = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_hdr),
};
static const struct rpmem_msg_create CREATE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE,
.size = sizeof(struct rpmem_msg_create),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_attr = POOL_ATTR_INIT,
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_open OPEN_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN,
.size = sizeof(struct rpmem_msg_open),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_close CLOSE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_close),
},
};
static const struct rpmem_msg_set_attr SET_ATTR_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR,
.size = sizeof(struct rpmem_msg_set_attr),
},
.pool_attr = POOL_ATTR_ALT,
};
TEST_CASE_DECLARE(server_accept_sim);
TEST_CASE_DECLARE(server_accept_sim_fork);
TEST_CASE_DECLARE(client_accept_sim);
TEST_CASE_DECLARE(server_accept_seq);
TEST_CASE_DECLARE(server_accept_seq_fork);
TEST_CASE_DECLARE(client_accept_seq);
TEST_CASE_DECLARE(client_bad_msg_hdr);
TEST_CASE_DECLARE(server_bad_msg);
TEST_CASE_DECLARE(server_msg_noresp);
TEST_CASE_DECLARE(server_msg_resp);
TEST_CASE_DECLARE(client_econnreset);
TEST_CASE_DECLARE(server_econnreset);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
| 3,791 | 23 | 70 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ctl_prefault/ctl_prefault.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* ctl_prefault.c -- tests for the ctl entry points: prefault
*/
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include "unittest.h"
#define OBJ_STR "obj"
#define BLK_STR "blk"
#define LOG_STR "log"
#define BSIZE 20
#define LAYOUT "obj_ctl_prefault"
#ifdef __FreeBSD__
typedef char vec_t;
#else
typedef unsigned char vec_t;
#endif
typedef int (*fun)(void *, const char *, void *);
/*
* prefault_fun -- function ctl_get/set testing
*/
static void
prefault_fun(int prefault, fun get_func, fun set_func)
{
int ret;
int arg;
int arg_read;
if (prefault == 1) { /* prefault at open */
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_open", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
} else if (prefault == 2) { /* prefault at create */
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_create", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
}
}
/*
* count_resident_pages -- count resident_pages
*/
static size_t
count_resident_pages(void *pool, size_t length)
{
size_t arr_len = (length + Ut_pagesize - 1) / Ut_pagesize;
vec_t *vec = MALLOC(sizeof(*vec) * arr_len);
int ret = mincore(pool, length, vec);
UT_ASSERTeq(ret, 0);
size_t resident_pages = 0;
for (size_t i = 0; i < arr_len; ++i)
resident_pages += vec[i] & 0x1;
FREE(vec);
return resident_pages;
}
/*
* test_obj -- open/create PMEMobjpool
*/
static void
test_obj(const char *path, int open)
{
PMEMobjpool *pop;
if (open) {
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
} else {
if ((pop = pmemobj_create(path, LAYOUT,
PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
}
size_t resident_pages = count_resident_pages(pop, PMEMOBJ_MIN_POOL);
pmemobj_close(pop);
UT_OUT("%ld", resident_pages);
}
/*
* test_blk -- open/create PMEMblkpool
*/
static void
test_blk(const char *path, int open)
{
PMEMblkpool *pbp;
if (open) {
if ((pbp = pmemblk_open(path, BSIZE)) == NULL)
UT_FATAL("!pmemblk_open: %s", path);
} else {
if ((pbp = pmemblk_create(path, BSIZE, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemblk_create: %s", path);
}
size_t resident_pages = count_resident_pages(pbp, PMEMBLK_MIN_POOL);
pmemblk_close(pbp);
UT_OUT("%ld", resident_pages);
}
/*
* test_log -- open/create PMEMlogpool
*/
static void
test_log(const char *path, int open)
{
PMEMlogpool *plp;
/*
* To test prefaulting, pool must have size at least equal to 2 pages.
* If 2MB huge pages are used this is at least 4MB.
*/
size_t pool_size = 2 * PMEMLOG_MIN_POOL;
if (open) {
if ((plp = pmemlog_open(path)) == NULL)
UT_FATAL("!pmemlog_open: %s", path);
} else {
if ((plp = pmemlog_create(path, pool_size,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemlog_create: %s", path);
}
size_t resident_pages = count_resident_pages(plp, pool_size);
pmemlog_close(plp);
UT_OUT("%ld", resident_pages);
}
#define USAGE() do {\
UT_FATAL("usage: %s file-name type(obj/blk/log) prefault(0/1/2) "\
"open(0/1)", argv[0]);\
} while (0)
int
main(int argc, char *argv[])
{
START(argc, argv, "ctl_prefault");
if (argc != 5)
USAGE();
char *type = argv[1];
const char *path = argv[2];
int prefault = atoi(argv[3]);
int open = atoi(argv[4]);
if (strcmp(type, OBJ_STR) == 0) {
prefault_fun(prefault, (fun)pmemobj_ctl_get,
(fun)pmemobj_ctl_set);
test_obj(path, open);
} else if (strcmp(type, BLK_STR) == 0) {
prefault_fun(prefault, (fun)pmemblk_ctl_get,
(fun)pmemblk_ctl_set);
test_blk(path, open);
} else if (strcmp(type, LOG_STR) == 0) {
prefault_fun(prefault, (fun)pmemlog_ctl_get,
(fun)pmemlog_ctl_set);
test_log(path, open);
} else
USAGE();
DONE(NULL);
}
| 4,326 | 20.527363 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/memcpy_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memcpy_common.c -- common part for tests doing a persistent memcpy
*/
#include "unittest.h"
#include "memcpy_common.h"
#include "valgrind_internal.h"
/*
* do_memcpy: Worker function for memcpy
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn persist)
{
void *ret;
char *buf = MALLOC(bytes);
memset(buf, 0, bytes);
memset(dest, 0, bytes);
persist(dest, bytes);
memset(src, 0, bytes);
persist(src, bytes);
memset(src, 0x5A, bytes / 4);
persist(src, bytes / 4);
memset(src + bytes / 4, 0x46, bytes / 4);
persist(src + bytes / 4, bytes / 4);
/* dest == src */
ret = fn(dest + dest_off, dest + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
/* len == 0 */
ret = fn(dest + dest_off, src, 0, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
ret = fn(dest + dest_off, src + src_off, bytes / 2, flags);
if (flags & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST((dest + dest_off), bytes / 2);
UT_ASSERTeq(ret, dest + dest_off);
/* memcmp will validate that what I expect in memory. */
if (memcmp(src + src_off, dest + dest_off, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
/* Now validate the contents of the file */
LSEEK(fd, (os_off_t)(dest_off + (int)(mapped_len / 2)), SEEK_SET);
if (READ(fd, buf, bytes / 2) == bytes / 2) {
if (memcmp(src + src_off, buf, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
}
FREE(buf);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 2,491 | 27.643678 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
from collections import namedtuple
import testframework as t
TC = namedtuple('TC', ['dest', 'src', 'length'])
class Pmem2Memcpy(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
envs0 = ()
envs1 = ()
test_cases = (
# aligned everything
TC(dest=0, src=0, length=4096),
# unaligned dest
TC(dest=7, src=0, length=4096),
# unaligned dest, unaligned src
TC(dest=7, src=9, length=4096),
# aligned dest, unaligned src
TC(dest=0, src=9, length=4096)
)
def run(self, ctx):
for env in self.envs0:
ctx.env[env] = '0'
for env in self.envs1:
ctx.env[env] = '1'
if ctx.wc_workaround() == 'on':
ctx.env['PMEM_WC_WORKAROUND'] = '1'
elif ctx.wc_workaround() == 'off':
ctx.env['PMEM_WC_WORKAROUND'] = '0'
for tc in self.test_cases:
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
ctx.exec('pmem2_memcpy', filepath,
tc.dest, tc.src, tc.length)
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST0(Pmem2Memcpy):
pass
@t.require_architectures('x86_64')
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST1(Pmem2Memcpy):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
@t.add_params('wc_workaround', ['on', 'off', 'default'])
class TEST2(Pmem2Memcpy):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
@t.add_params('wc_workaround', ['default'])
class TEST3(Pmem2Memcpy):
envs1 = ("PMEM_NO_MOVNT",)
@t.add_params('wc_workaround', ['default'])
class TEST4(Pmem2Memcpy):
envs1 = ("PMEM_NO_MOVNT", "PMEM_NO_GENERIC_MEMCPY")
| 1,802 | 23.04 | 72 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/memcpy_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memcpy_common.h -- header file for common memcpy utilities
*/
#ifndef MEMCPY_COMMON_H
#define MEMCPY_COMMON_H 1
#include "unittest.h"
#include "file.h"
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
extern unsigned Flags[10];
void do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn p);
#endif
| 611 | 23.48 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/pmem2_memcpy.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_memcpy.c -- test for doing a memcpy from libpmem2
*
* usage: pmem2_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "memcpy_common.h"
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p, memcpy_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *src_orig;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file destoff srcoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
/* src > dst */
mapped_len = pmem2_map_get_size(map);
dest = pmem2_map_get_address(map);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = dest + mapped_len / 2;
UT_ASSERT(src > dest);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
persist(src, 2 * bytes);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes,
0, argv[1], persist, memcpy_fn);
src = dest;
dest = src_orig;
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, mapped_len,
argv[1], persist, memcpy_fn);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 2,527 | 22.849057 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_check_remote/obj_check_remote.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_check_remote.c -- unit tests for pmemobj_check_remote
*/
#include <stddef.h>
#include "unittest.h"
#include "libpmemobj.h"
struct vector {
int x;
int y;
int z;
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_check_remote");
if (argc < 3)
UT_FATAL("insufficient number of arguments");
const char *path = argv[1];
const char *action = argv[2];
const char *layout = NULL;
PMEMobjpool *pop = NULL;
if (strcmp(action, "abort") == 0) {
pop = pmemobj_open(path, layout);
if (pop == NULL)
UT_FATAL("usage: %s filename abort|check", argv[0]);
PMEMoid root = pmemobj_root(pop, sizeof(struct vector));
struct vector *vectorp = pmemobj_direct(root);
TX_BEGIN(pop) {
pmemobj_tx_add_range(root, 0, sizeof(struct vector));
vectorp->x = 5;
vectorp->y = 10;
vectorp->z = 15;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
int *to_modify = &vectorp->x;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(to_modify, sizeof(int));
*to_modify = 30;
pmemobj_persist(pop, to_modify, sizeof(*to_modify));
abort();
} TX_END
} else if (strcmp(action, "check") == 0) {
int ret = pmemobj_check(path, layout);
if (ret == 1)
return 0;
else
return ret;
} else {
UT_FATAL("%s is not a valid action", action);
}
return 0;
}
| 1,372 | 19.191176 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_check_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
#
# obj_check_remote/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=medium
CONF_GLOBAL_RPMEM_PROVIDER=sockets
CONF_GLOBAL_RPMEM_PMETHOD=all
| 313 | 19.933333 | 50 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_is_zeroed/util_is_zeroed.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* util_is_zeroed.c -- unit test for util_is_zeroed
*/
#include "unittest.h"
#include "util.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_zeroed");
util_init();
char bigbuf[3000];
memset(bigbuf + 0, 0x11, 1000);
memset(bigbuf + 1000, 0x0, 1000);
memset(bigbuf + 2000, 0xff, 1000);
UT_ASSERTeq(util_is_zeroed(bigbuf, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1000), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 2000, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf, 0), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 999, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1001), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1001, 1000), 0);
char *buf = bigbuf + 1000;
buf[0] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[239] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[999] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1000] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 1);
DONE(NULL);
}
| 1,196 | 20.763636 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_rm_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# pmempool_rm_remote/config.sh -- test configuration
#
set -e
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=sockets
CONF_GLOBAL_RPMEM_PMETHOD=GPSPM
| 295 | 18.733333 | 52 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_rpmem_basic_integration/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2017, Intel Corporation
#
#
# obj_rpmem_basic_integration/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=medium
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
CONF_RPMEM_PROVIDER[9]=verbs
CONF_RPMEM_PROVIDER[10]=verbs
CONF_RPMEM_PROVIDER[11]=verbs
CONF_RPMEM_PROVIDER[13]=verbs
CONF_RPMEM_PROVIDER[14]=verbs
CONF_RPMEM_PROVIDER[15]=verbs
CONF_RPMEM_PROVIDER[16]=verbs
CONF_RPMEM_PROVIDER[17]=verbs
CONF_RPMEM_PROVIDER[18]=verbs
CONF_RPMEM_VALGRIND[9]=y
CONF_RPMEM_VALGRIND[10]=y
CONF_RPMEM_VALGRIND[11]=y
CONF_RPMEM_VALGRIND[13]=y
CONF_RPMEM_VALGRIND[14]=y
CONF_RPMEM_VALGRIND[15]=y
CONF_RPMEM_VALGRIND[16]=y
CONF_RPMEM_VALGRIND[17]=y
CONF_RPMEM_VALGRIND[18]=y
| 830 | 22.742857 | 61 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/mocks_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/mocks_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in pmem_map_file.c (Posix-specific)
*/
#define _GNU_SOURCE
#include "unittest.h"
#include <dlfcn.h>
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
int
posix_fallocate(int fd, os_off_t offset, off_t len)
{
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
static int (*posix_fallocate_ptr)(int fd, os_off_t offset, off_t len);
if (posix_fallocate_ptr == NULL)
posix_fallocate_ptr = dlsym(RTLD_NEXT, "posix_fallocate");
if (len > MAX_LEN)
return ENOSPC;
return (*posix_fallocate_ptr)(fd, offset, len);
}
/*
* ftruncate -- interpose on libc ftruncate()
*/
int
ftruncate(int fd, os_off_t len)
{
UT_OUT("ftruncate: len %ju", len);
static int (*ftruncate_ptr)(int fd, os_off_t len);
if (ftruncate_ptr == NULL)
ftruncate_ptr = dlsym(RTLD_NEXT, "ftruncate");
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return (*ftruncate_ptr)(fd, len);
}
| 1,064 | 19.09434 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/pmem_map_file.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pmem_map_file.c -- unit test for mapping persistent memory for raw access
*
* usage: pmem_map_file file
*/
#define _GNU_SOURCE
#include "unittest.h"
#include <stdlib.h>
#define CHECK_BYTES 4096 /* bytes to compare before/after map call */
static ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
static int is_dev_dax = 0;
/*
* parse_err_code -- parse 'err_code' string
*/
static int
parse_err_code(const char *err_str)
{
int ret = 0;
if (strcmp(err_str, "ENOENT") == 0) {
ret = ENOENT;
} else if (strcmp(err_str, "EEXIST") == 0) {
ret = EEXIST;
} else if (strcmp(err_str, "0") == 0) {
ret = 0;
} else {
UT_FATAL("unknown err_code: %c", *err_str);
}
return ret;
}
/*
* parse_flags -- parse 'flags' string
*/
static int
parse_flags(const char *flags_str)
{
int ret = 0;
while (*flags_str != '\0') {
switch (*flags_str) {
case '0':
case '-':
/* no flags */
break;
case 'T':
ret |= PMEM_FILE_TMPFILE;
break;
case 'S':
ret |= PMEM_FILE_SPARSE;
break;
case 'C':
ret |= PMEM_FILE_CREATE;
break;
case 'E':
ret |= PMEM_FILE_EXCL;
break;
case 'X':
/* not supported flag */
ret |= (PMEM_FILE_ALL_FLAGS + 1);
break;
case 'D':
is_dev_dax = 1;
break;
default:
UT_FATAL("unknown flags: %c", *flags_str);
}
flags_str++;
};
return ret;
}
/*
* do_check -- check the mapping
*/
static void
do_check(int fd, void *addr, size_t mlen)
{
/* arrange to catch SEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char pat[CHECK_BYTES];
char buf[CHECK_BYTES];
/* write some pattern to the file */
memset(pat, 0x5A, CHECK_BYTES);
WRITE(fd, pat, CHECK_BYTES);
if (memcmp(pat, addr, CHECK_BYTES))
UT_OUT("first %d bytes do not match", CHECK_BYTES);
/* fill up mapped region with new pattern */
memset(pat, 0xA5, CHECK_BYTES);
memcpy(addr, pat, CHECK_BYTES);
UT_ASSERTeq(pmem_msync(addr, CHECK_BYTES), 0);
UT_ASSERTeq(pmem_unmap(addr, mlen), 0);
if (!ut_sigsetjmp(Jmp)) {
/* same memcpy from above should now fail */
memcpy(addr, pat, CHECK_BYTES);
} else {
UT_OUT("unmap successful");
}
LSEEK(fd, (os_off_t)0, SEEK_SET);
if (READ(fd, buf, CHECK_BYTES) == CHECK_BYTES) {
if (memcmp(pat, buf, CHECK_BYTES))
UT_OUT("first %d bytes do not match", CHECK_BYTES);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_map_file");
int fd;
void *addr;
size_t mlen;
size_t *mlenp;
const char *path;
unsigned long long len;
int flags;
unsigned mode;
int is_pmem;
int *is_pmemp;
int use_mlen;
int use_is_pmem;
int err_code;
if (argc < 8)
UT_FATAL("usage: %s path len flags mode use_mlen "
"use_is_pmem err_code...", argv[0]);
for (int i = 1; i + 6 < argc; i += 7) {
path = argv[i];
len = strtoull(argv[i + 1], NULL, 0);
flags = parse_flags(argv[i + 2]);
mode = STRTOU(argv[i + 3], NULL, 8);
use_mlen = atoi(argv[i + 4]);
use_is_pmem = atoi(argv[i + 5]);
err_code = parse_err_code(argv[i + 6]);
mlen = SIZE_MAX;
if (use_mlen)
mlenp = &mlen;
else
mlenp = NULL;
if (use_is_pmem)
is_pmemp = &is_pmem;
else
is_pmemp = NULL;
UT_OUT("%s %lld %s %o %d %d %d",
path, len, argv[i + 2], mode, use_mlen,
use_is_pmem, err_code);
addr = pmem_map_file(path, len, flags, mode, mlenp, is_pmemp);
if (err_code != 0) {
UT_ASSERTeq(errno, err_code);
}
if (addr == NULL) {
UT_OUT("!pmem_map_file");
continue;
}
if (use_mlen) {
UT_ASSERTne(mlen, SIZE_MAX);
UT_OUT("mapped_len %zu", mlen);
} else {
mlen = len;
}
if (addr) {
/* is_pmem must be true for device DAX */
int is_pmem_check = pmem_is_pmem(addr, mlen);
UT_ASSERT(!is_dev_dax || is_pmem_check);
/* check is_pmem returned from pmem_map_file */
if (use_is_pmem)
UT_ASSERTeq(is_pmem, is_pmem_check);
if ((flags & PMEM_FILE_TMPFILE) == 0 && !is_dev_dax) {
fd = OPEN(argv[i], O_RDWR);
if (!use_mlen) {
os_stat_t stbuf;
FSTAT(fd, &stbuf);
mlen = (size_t)stbuf.st_size;
}
if (fd != -1) {
do_check(fd, addr, mlen);
(void) CLOSE(fd);
} else {
UT_OUT("!cannot open file: %s",
argv[i]);
}
} else {
UT_ASSERTeq(pmem_unmap(addr, mlen), 0);
}
}
}
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmem is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmem_init)
MSVC_DESTR(libpmem_fini)
#endif
| 4,720 | 18.427984 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_callbacks/obj_tx_callbacks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* obj_tx_callbacks.c -- unit test for transaction stage callbacks
*/
#include "unittest.h"
#define LAYOUT_NAME "tx_callback"
POBJ_LAYOUT_BEGIN(tx_callback);
POBJ_LAYOUT_ROOT(tx_callback, struct pmem_root);
POBJ_LAYOUT_TOID(tx_callback, struct pmem_obj);
POBJ_LAYOUT_END(tx_callback);
struct runtime_info {
int something;
};
struct pmem_obj {
struct runtime_info *rt;
int pmem_info;
};
struct pmem_root {
TOID(struct pmem_obj) obj;
};
struct free_info {
void *to_free;
};
static int freed = 0;
static const char *desc[] = {
"TX_STAGE_NONE",
"TX_STAGE_WORK",
"TX_STAGE_ONCOMMIT",
"TX_STAGE_ONABORT",
"TX_STAGE_FINALLY",
"WTF?"
};
static void
free_onabort(PMEMobjpool *pop, enum pobj_tx_stage stage, void *arg)
{
UT_OUT("cb stage: %s", desc[stage]);
if (stage == TX_STAGE_ONABORT) {
struct free_info *f = (struct free_info *)arg;
UT_OUT("rt_onabort: free");
free(f->to_free);
freed++;
}
}
static void
allocate_pmem(struct free_info *f, TOID(struct pmem_root) root, int val)
{
TOID(struct pmem_obj) obj = TX_NEW(struct pmem_obj);
D_RW(obj)->pmem_info = val;
D_RW(obj)->rt =
(struct runtime_info *)malloc(sizeof(struct runtime_info));
f->to_free = D_RW(obj)->rt;
D_RW(obj)->rt->something = val;
TX_ADD_FIELD(root, obj);
D_RW(root)->obj = obj;
}
static void
do_something_fishy(TOID(struct pmem_root) root)
{
TX_ADD_FIELD(root, obj);
D_RW(root)->obj = TX_ALLOC(struct pmem_obj, 1 << 30);
}
static void
free_oncommit(PMEMobjpool *pop, enum pobj_tx_stage stage, void *arg)
{
UT_OUT("cb stage: %s", desc[stage]);
if (stage == TX_STAGE_ONCOMMIT) {
struct free_info *f = (struct free_info *)arg;
UT_OUT("rt_oncommit: free");
free(f->to_free);
freed++;
}
}
static void
free_pmem(struct free_info *f, TOID(struct pmem_root) root)
{
TOID(struct pmem_obj) obj = D_RW(root)->obj;
f->to_free = D_RW(obj)->rt;
TX_FREE(obj);
TX_SET(root, obj, TOID_NULL(struct pmem_obj));
}
static void
log_stages(PMEMobjpool *pop, enum pobj_tx_stage stage, void *arg)
{
UT_OUT("cb stage: %s", desc[stage]);
}
static void
test(PMEMobjpool *pop, TOID(struct pmem_root) root)
{
struct free_info *volatile f = (struct free_info *)ZALLOC(sizeof(*f));
TX_BEGIN_CB(pop, free_onabort, f) {
allocate_pmem(f, root, 7);
do_something_fishy(root);
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_OUT("on abort 1");
} TX_FINALLY {
UT_OUT("finally 1");
} TX_END
UT_OUT("end of tx 1\n");
memset(f, 0, sizeof(*f));
UT_ASSERTeq(freed, 1);
freed = 0;
TX_BEGIN_CB(pop, free_onabort, f) {
allocate_pmem(f, root, 7);
} TX_ONCOMMIT {
UT_OUT("on commit 2");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("finally 2");
} TX_END
UT_OUT("end of tx 2\n");
memset(f, 0, sizeof(*f));
UT_ASSERTeq(freed, 0);
TX_BEGIN_CB(pop, free_oncommit, f) {
free_pmem(f, root);
do_something_fishy(root);
UT_ASSERT(0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_OUT("on abort 3");
} TX_FINALLY {
UT_OUT("finally 3");
} TX_END
UT_OUT("end of tx 3\n");
memset(f, 0, sizeof(*f));
UT_ASSERTeq(freed, 0);
TX_BEGIN_CB(pop, free_oncommit, f) {
free_pmem(f, root);
} TX_ONCOMMIT {
UT_OUT("on commit 4");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("finally 4");
} TX_END
UT_OUT("end of tx 4\n");
memset(f, 0, sizeof(*f));
UT_ASSERTeq(freed, 1);
freed = 0;
TX_BEGIN_CB(pop, log_stages, NULL) {
TX_BEGIN(pop) {
UT_OUT("inner tx work 5");
} TX_ONCOMMIT {
UT_OUT("inner tx on commit 5");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("inner tx finally 5");
} TX_END
} TX_ONCOMMIT {
UT_OUT("on commit 5");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("finally 5");
} TX_END
UT_OUT("end of tx 5\n");
TX_BEGIN(pop) {
TX_BEGIN_CB(pop, log_stages, NULL) {
UT_OUT("inner tx work 6");
} TX_ONCOMMIT {
UT_OUT("inner tx on commit 6");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("inner tx finally 6");
} TX_END
} TX_ONCOMMIT {
UT_OUT("on commit 6");
} TX_ONABORT {
UT_ASSERT(0);
} TX_FINALLY {
UT_OUT("finally 6");
} TX_END
UT_OUT("end of tx 6\n");
UT_OUT("start of tx 7");
if (pmemobj_tx_begin(pop, NULL, TX_PARAM_CB, log_stages, NULL,
TX_PARAM_NONE))
UT_FATAL("!pmemobj_tx_begin");
UT_OUT("work");
pmemobj_tx_commit();
UT_OUT("on commit");
if (pmemobj_tx_end())
UT_FATAL("!pmemobj_tx_end");
UT_OUT("end of tx 7\n");
FREE(f);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_callbacks");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!pmemobj_create");
TOID(struct pmem_root) root = POBJ_ROOT(pop, struct pmem_root);
test(pop, root);
pmemobj_close(pop);
DONE(NULL);
}
| 4,900 | 19.168724 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_cpuid/util_cpuid.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* util_cpuid.c -- unit test for CPU features detection
*/
#define _GNU_SOURCE
#include <emmintrin.h>
#include "unittest.h"
#include "cpu.h"
#ifndef _MSC_VER
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
#define _mm_clflushopt(addr)\
asm volatile(".byte 0x66; clflush %0" :\
"+m" (*(volatile char *)(addr)));
#define _mm_clwb(addr)\
asm volatile(".byte 0x66; xsaveopt %0" :\
"+m" (*(volatile char *)(addr)));
#endif
static char Buf[32];
/*
* check_cpu_features -- validates CPU features detection
*/
static void
check_cpu_features(void)
{
if (is_cpu_clflush_present()) {
UT_OUT("CLFLUSH supported");
_mm_clflush(Buf);
} else {
UT_OUT("CLFLUSH not supported");
}
if (is_cpu_clflushopt_present()) {
UT_OUT("CLFLUSHOPT supported");
_mm_clflushopt(Buf);
} else {
UT_OUT("CLFLUSHOPT not supported");
}
if (is_cpu_clwb_present()) {
UT_OUT("CLWB supported");
_mm_clwb(Buf);
} else {
UT_OUT("CLWB not supported");
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_cpuid");
check_cpu_features();
DONE(NULL);
}
| 1,306 | 18.507463 | 65 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_heap/obj_heap.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_heap.c -- unit test for heap
*
* operations are: 't', 'b', 'r', 'c', 'h', 'a', 'n', 's'
* t: do test_heap, test_recycler
* b: do fault_injection in function container_new_ravl
* r: do fault_injection in function recycler_new
* c: do fault_injection in function container_new_seglists
* h: do fault_injection in function heap_boot
* a: do fault_injection in function alloc_class_new
* n: do fault_injection in function alloc_class_collection_new
* s: do fault_injection in function stats_new
*/
#include "libpmemobj.h"
#include "palloc.h"
#include "heap.h"
#include "recycler.h"
#include "obj.h"
#include "unittest.h"
#include "util.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "container.h"
#include "alloc_class.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE PMEMOBJ_MIN_POOL
#define MAX_BLOCKS 3
struct mock_pop {
PMEMobjpool p;
void *heap;
};
static int
obj_heap_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static int
obj_heap_flush(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static void
obj_heap_drain(void *ctx)
{
}
static void *
obj_heap_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return ptr;
}
static void
init_run_with_score(struct heap_layout *l, uint32_t chunk_id, int score)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
run->hdr.alignment = 0;
run->hdr.block_size = 1024;
memset(run->content, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
UT_ASSERTeq(score % 64, 0);
score /= 64;
uint64_t *bitmap = (uint64_t *)run->content;
for (; score >= 0; --score) {
bitmap[score] = 0;
}
}
static void
init_run_with_max_block(struct heap_layout *l, uint32_t chunk_id)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
uint64_t *bitmap = (uint64_t *)run->content;
run->hdr.block_size = 1024;
run->hdr.alignment = 0;
memset(bitmap, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
/* the biggest block is 10 bits */
bitmap[3] =
0b1000001110111000111111110000111111000000000011111111110000000011;
}
static void
test_container(struct block_container *bc, struct palloc_heap *heap)
{
UT_ASSERTne(bc, NULL);
struct memory_block a = {1, 0, 1, 4};
struct memory_block b = {1, 0, 2, 8};
struct memory_block c = {1, 0, 3, 16};
struct memory_block d = {1, 0, 5, 32};
init_run_with_score(heap->layout, 1, 128);
memblock_rebuild_state(heap, &a);
memblock_rebuild_state(heap, &b);
memblock_rebuild_state(heap, &c);
memblock_rebuild_state(heap, &d);
int ret;
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &d);
UT_ASSERTeq(ret, 0);
struct memory_block invalid_ret = {0, 0, 6, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &invalid_ret);
UT_ASSERTeq(ret, ENOMEM);
struct memory_block b_ret = {0, 0, 2, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &b_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(b_ret.chunk_id, b.chunk_id);
struct memory_block a_ret = {0, 0, 1, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &a_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(a_ret.chunk_id, a.chunk_id);
struct memory_block c_ret = {0, 0, 3, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(c_ret.chunk_id, c.chunk_id);
struct memory_block d_ret = {0, 0, 4, 0}; /* less one than target */
ret = bc->c_ops->get_rm_bestfit(bc, &d_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(d_ret.chunk_id, d.chunk_id);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
bc->c_ops->rm_all(bc);
ret = bc->c_ops->is_empty(bc);
UT_ASSERTeq(ret, 1);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
bc->c_ops->destroy(bc);
}
static void
do_fault_injection_new_ravl()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_ravl");
struct block_container *bc = container_new_ravl(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_new_seglists()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_seglists");
struct block_container *bc = container_new_seglists(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_heap_boot()
{
if (!pmemobj_fault_injection_enabled())
return;
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
pop->p_ops.persist = obj_heap_persist;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct pmem_ops *p_ops = &pop->p_ops;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "heap_boot");
int r = heap_boot(NULL, NULL, heap_size, &pop->heap_size, NULL, p_ops,
NULL, NULL);
UT_ASSERTne(r, 0);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_recycler()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "recycler_new");
size_t active_arenas = 1;
struct recycler *r = recycler_new(NULL, 0, &active_arenas);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_new(int i)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, i, "alloc_class_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_collection_new()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "alloc_class_collection_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_stats()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "stats_new");
struct stats *s = stats_new(NULL);
UT_ASSERTeq(s, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_heap(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
test_container((struct block_container *)container_new_ravl(heap),
heap);
test_container((struct block_container *)container_new_seglists(heap),
heap);
struct alloc_class *c_small = heap_get_best_class(heap, 1);
struct alloc_class *c_big = heap_get_best_class(heap, 2048);
UT_ASSERT(c_small->unit_size < c_big->unit_size);
/* new small buckets should be empty */
UT_ASSERT(c_big->type == CLASS_RUN);
struct memory_block blocks[MAX_BLOCKS] = {
{0, 0, 1, 0},
{0, 0, 1, 0},
{0, 0, 1, 0}
};
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
for (int i = 0; i < MAX_BLOCKS; ++i) {
heap_get_bestfit_block(heap, b_def, &blocks[i]);
UT_ASSERT(blocks[i].block_off == 0);
}
heap_bucket_release(heap, b_def);
struct memory_block old_run = {0, 0, 1, 0};
struct memory_block new_run = {0, 0, 0, 0};
struct alloc_class *c_run = heap_get_best_class(heap, 1024);
struct bucket *b_run = heap_bucket_acquire(heap, c_run->id,
HEAP_ARENA_PER_THREAD);
/*
* Allocate blocks from a run until one run is exhausted.
*/
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM);
do {
new_run.chunk_id = 0;
new_run.block_off = 0;
new_run.size_idx = 1;
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run),
ENOMEM);
UT_ASSERTne(new_run.size_idx, 0);
} while (old_run.block_off != new_run.block_off);
heap_bucket_release(heap, b_run);
stats_delete(pop, s);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
/*
* test_heap_with_size -- tests scenarios with not-nicely aligned sizes
*/
static void
test_heap_with_size()
{
/*
* To trigger bug with incorrect metadata alignment we need to
* use a size that uses exactly the size used in bugged zone size
* calculations.
*/
size_t size = PMEMOBJ_MIN_POOL + sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK +
sizeof(PMEMobjpool);
struct mock_pop *mpop = MMAP_ANON_ALIGNED(size,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, size);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = size - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, NULL, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
struct memory_block mb;
mb.size_idx = 1;
while (heap_get_bestfit_block(heap, b_def, &mb) == 0)
;
/* mb should now be the last chunk in the heap */
char *ptr = mb.m_ops->get_real_data(&mb);
size_t s = mb.m_ops->get_real_size(&mb);
/* last chunk should be within the heap and accessible */
UT_ASSERT((size_t)ptr + s <= (size_t)mpop + size);
VALGRIND_DO_MAKE_MEM_DEFINED(ptr, s);
memset(ptr, 0xc, s);
heap_bucket_release(heap, b_def);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, size);
}
static void
test_recycler(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
/* trigger heap bucket populate */
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = 1;
struct bucket *b = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
heap_bucket_release(heap, b);
int ret;
size_t active_arenas = 1;
struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */,
&active_arenas);
UT_ASSERTne(r, NULL);
init_run_with_score(pop->heap.layout, 0, 64);
init_run_with_score(pop->heap.layout, 1, 128);
init_run_with_score(pop->heap.layout, 15, 0);
struct memory_block mrun = {0, 0, 1, 0};
struct memory_block mrun2 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun);
memblock_rebuild_state(&pop->heap, &mrun2);
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
mrun_ret.size_idx = 1;
struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
mrun2_ret.size_idx = 1;
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
init_run_with_score(pop->heap.layout, 7, 64);
init_run_with_score(pop->heap.layout, 2, 128);
init_run_with_score(pop->heap.layout, 5, 192);
init_run_with_score(pop->heap.layout, 10, 256);
mrun.chunk_id = 7;
mrun2.chunk_id = 2;
struct memory_block mrun3 = {5, 0, 1, 0};
struct memory_block mrun4 = {10, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun3);
memblock_rebuild_state(&pop->heap, &mrun4);
mrun_ret.size_idx = 1;
mrun2_ret.size_idx = 1;
struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
mrun3_ret.size_idx = 1;
struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
mrun4_ret.size_idx = 1;
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun3,
recycler_element_new(&pop->heap, &mrun3));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun4,
recycler_element_new(&pop->heap, &mrun4));
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun3_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun4_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);
init_run_with_max_block(pop->heap.layout, 1);
struct memory_block mrun5 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun5);
ret = recycler_put(r, &mrun5,
recycler_element_new(&pop->heap, &mrun5));
UT_ASSERTeq(ret, 0);
struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 11;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, ENOMEM);
mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 10;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, 0);
recycler_delete(r);
stats_delete(pop, s);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_heap");
if (argc < 2)
UT_FATAL("usage: %s path <t|b|r|c|h|a|n|s>", argv[0]);
switch (argv[1][0]) {
case 't':
test_heap();
test_heap_with_size();
test_recycler();
break;
case 'b':
do_fault_injection_new_ravl();
break;
case 'r':
do_fault_injection_recycler();
break;
case 'c':
do_fault_injection_new_seglists();
break;
case 'h':
do_fault_injection_heap_boot();
break;
case 'a':
/* first call alloc_class_new */
do_fault_injection_class_new(1);
/* second call alloc_class_new */
do_fault_injection_class_new(2);
break;
case 'n':
do_fault_injection_class_collection_new();
break;
case 's':
do_fault_injection_stats();
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 16,917 | 25.027692 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmreorder_stack/pmreorder_stack.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* pmreorder_stack.c -- unit test for engines pmreorder stack
*
* usage: pmreorder_stack w|c file
* w - write data in a possibly inconsistent manner
* c - check data consistency
*
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* Consistent only if field 'e' is set and field 'f' is not.
*/
struct fields {
int a;
int b;
int c;
int d;
int e;
int f;
int g;
int h;
int i;
int j;
int k;
int l;
};
/*
* write_fields -- (internal) write data in a consistent manner.
*/
static void
write_fields(struct fields *fieldsp)
{
VALGRIND_EMIT_LOG("FIELDS_PACK_TWO.BEGIN");
VALGRIND_EMIT_LOG("FIELDS_PACK_ONE.BEGIN");
fieldsp->a = 1;
fieldsp->b = 1;
fieldsp->c = 1;
fieldsp->d = 1;
pmem_persist(&fieldsp->a, sizeof(int) * 4);
VALGRIND_EMIT_LOG("FIELDS_PACK_ONE.END");
fieldsp->e = 1;
fieldsp->f = 1;
fieldsp->g = 1;
fieldsp->h = 1;
pmem_persist(&fieldsp->e, sizeof(int) * 4);
VALGRIND_EMIT_LOG("FIELDS_PACK_TWO.END");
fieldsp->i = 1;
fieldsp->j = 1;
fieldsp->k = 1;
fieldsp->l = 1;
pmem_persist(&fieldsp->i, sizeof(int) * 4);
}
/*
* check_consistency -- (internal) check struct fields consistency.
*/
static int
check_consistency(struct fields *fieldsp)
{
int consistency = 1;
if (fieldsp->e == 1 && fieldsp->f == 0)
consistency = 0;
return consistency;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_stack");
util_init();
if ((argc != 3) || (strchr("wc", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s w|c file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
UT_ASSERT(size >= sizeof(struct fields));
struct fields *fieldsp = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("w", opt))
pmem_memset_persist(fieldsp, 0, sizeof(*fieldsp));
switch (opt) {
case 'w':
write_fields(fieldsp);
break;
case 'c':
return check_consistency(fieldsp);
default:
UT_FATAL("Unrecognized option %c", opt);
}
CLOSE(fd);
DONE(NULL);
}
| 2,263 | 17.258065 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/movnt_align_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* movnt_align_common.c -- common part for tests doing a persistent movnt align
*/
#include "unittest.h"
#include "movnt_align_common.h"
char *Src;
char *Dst;
char *Scratch;
/*
* check_memmove -- invoke check function with pmem_memmove_persist
*/
void
check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags)
{
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst + doff, Src + soff, len))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memmove -- invoke check function with pmem_memcpy_persist
*/
void
check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags)
{
memset(Dst, 2, N_BYTES);
memset(Src, 3, N_BYTES);
memset(Scratch, 2, N_BYTES);
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
memcpy(Scratch + doff, Src + soff, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memset -- check pmem_memset_no_drain function
*/
void
check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags)
{
memset(Scratch, 2, N_BYTES);
memset(Scratch + off, 1, len);
memset(Dst, 2, N_BYTES);
fn(Dst + off, 1, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memset failed");
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 1,830 | 21.060241 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
class MovntAlignCommon(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
def run_cases(self, ctx):
ctx.exec('pmem2_movnt_align', self.filepath, "C")
ctx.exec('pmem2_movnt_align', self.filepath, "F")
ctx.exec('pmem2_movnt_align', self.filepath, "B")
ctx.exec('pmem2_movnt_align', self.filepath, "S")
def run(self, ctx):
self.filepath = ctx.create_holey_file(self.filesize, 'testfile',)
self.run_cases(ctx)
class Pmem2MovntAlign(MovntAlignCommon):
threshold = None
threshold_values = ['0', '99999']
envs0 = ()
def run(self, ctx):
for env in self.envs0:
ctx.env[env] = '0'
super().run(ctx)
for tv in self.threshold_values:
ctx.env['PMEM_MOVNT_THRESHOLD'] = tv
self.run_cases(ctx)
@t.require_valgrind_enabled('pmemcheck')
class MovntAlignCommonValgrind(Pmem2MovntAlign):
test_type = t.Medium
def run(self, ctx):
ctx.env['VALGRIND_OPTS'] = "--mult-stores=yes"
super().run(ctx)
class TEST0(Pmem2MovntAlign):
pass
@t.require_architectures('x86_64')
class TEST1(Pmem2MovntAlign):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
class TEST2(Pmem2MovntAlign):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
class TEST3(MovntAlignCommon):
def run(self, ctx):
ctx.env['PMEM_NO_MOVNT'] = '1'
super().run(ctx)
class TEST4(MovntAlignCommon):
def run(self, ctx):
ctx.env['PMEM_NO_MOVNT'] = '1'
ctx.env['PMEM_NO_GENERIC_MEMCPY'] = '1'
super().run(ctx)
class TEST5(MovntAlignCommonValgrind):
pass
@t.require_architectures('x86_64')
class TEST6(MovntAlignCommonValgrind):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
class TEST7(MovntAlignCommonValgrind):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
class TEST8(MovntAlignCommonValgrind):
def run(self, ctx):
ctx.env['PMEM_NO_MOVNT'] = '1'
super().run(ctx)
class TEST9(MovntAlignCommonValgrind):
def run(self, ctx):
ctx.env['PMEM_NO_MOVNT'] = '1'
ctx.env['PMEM_NO_GENERIC_MEMCPY'] = '1'
super().run(ctx)
| 2,281 | 21.82 | 73 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/pmem2_movnt_align.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt_align.c -- test for functions with non-temporal stores
*
* usage: pmem2_movnt_align file [C|F|B|S]
*
* C - pmem2_memcpy()
* B - pmem2_memmove() in backward direction
* F - pmem2_memmove() in forward direction
* S - pmem2_memset()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem2.h"
#include "unittest.h"
#include "movnt_align_common.h"
#include "ut_pmem2.h"
static pmem2_memset_fn memset_fn;
static pmem2_memcpy_fn memcpy_fn;
static pmem2_memmove_fn memmove_fn;
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, memmove_fn, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, memcpy_fn, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, memset_fn, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file type", argv[0]);
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
int fd;
char type = argv[2][0];
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
memset_fn = pmem2_get_memset_fn(map);
memcpy_fn = pmem2_get_memcpy_fn(map);
memmove_fn = pmem2_get_memmove_fn(map);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 5,283 | 24.042654 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/movnt_align_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* movnt_align_common.h -- header file for common movnt_align test utilities
*/
#ifndef MOVNT_ALIGN_COMMON_H
#define MOVNT_ALIGN_COMMON_H 1
#include "unittest.h"
#include "file.h"
#define N_BYTES (Ut_pagesize * 2)
extern char *Src;
extern char *Dst;
extern char *Scratch;
extern unsigned Flags[10];
typedef void *(*mem_fn)(void *, const void *, size_t);
typedef void *pmem_memcpy_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
void check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags);
void check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags);
void check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags);
#endif
| 989 | 26.5 | 80 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.