Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memmove/pmem_memmove.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memmove.c -- unit test for doing a memmove
*
* usage:
* pmem_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memmove_common.h"
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - given to mmapped regions swap them.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
* In the event of an error caller must unmap all passed in mappings.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *ts;
char *td;
MUNMAP(*src, size);
/* mmap destination using src addr as hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p)
{
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_persist_wrapper, 0, p);
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file %s", argv[1]);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
if (argc < 3)
USAGE();
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
/* src > dest */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!could not mmap dest file %s", argv[1]);
src = MMAP(dst + mapped_len, mapped_len,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. Log
* the error if the mapped addresses cannot be swapped
* but allow the test to continue.
*/
if (src <= dst) {
swap_mappings(&dst, &src, mapped_len, fd);
if (src <= dst)
UT_FATAL("cannot map files in memory order");
}
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
/* dest > src */
swap_mappings(&dst, &src, mapped_len, fd);
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src, mapped_len);
} else {
/* use the same buffer for source and destination */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!Could not mmap %s: \n", argv[1]);
memset(dst, 0, bytes);
p(dst, bytes);
do_memmove_variants(dst, dst, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
}
CLOSE(fd);
DONE(NULL);
}
| 5,226 | 22.334821 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_heap_size/obj_ctl_heap_size.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_heap_size.c -- tests for the ctl entry points: heap.size.*
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_heap_size"
#define CUSTOM_GRANULARITY ((1 << 20) * 10)
#define OBJ_SIZE 1024
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_heap_size");
if (argc != 3)
UT_FATAL("usage: %s poolset [w|x]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
size_t disable_granularity = 0;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&disable_granularity);
UT_ASSERTeq(ret, 0);
/* allocate until OOM */
while (pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL) == 0)
;
if (t == 'x') {
ssize_t extend_size = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_exec(pop, "heap.size.extend", &extend_size);
UT_ASSERTeq(ret, 0);
} else if (t == 'w') {
ssize_t new_granularity = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&new_granularity);
UT_ASSERTeq(ret, 0);
ssize_t curr_granularity;
ret = pmemobj_ctl_get(pop, "heap.size.granularity",
&curr_granularity);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(new_granularity, curr_granularity);
} else {
UT_ASSERT(0);
}
/* should succeed */
ret = pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
DONE(NULL);
}
| 1,500 | 21.402985 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_basic_integration/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# obj_basic_integration/config.sh -- test configuration
#
# Extend timeout for this test, as it may take a few minutes
# when run on a non-pmem file system.
CONF_GLOBAL_TIMEOUT='10m'
| 286 | 19.5 | 60 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_basic_integration/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
import testframework as t
class BASIC(t.Test):
test_type = t.Medium
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile1')
ctx.exec('obj_basic_integration', filepath)
@t.require_valgrind_disabled('memcheck')
class TEST0(BASIC):
pass
@t.require_valgrind_enabled('pmemcheck')
class TEST1(BASIC):
pass
| 451 | 17.08 | 65 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_basic_integration/obj_basic_integration.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_basic_integration.c -- Basic integration tests
*
*/
#include <stddef.h>
#include "unittest.h"
#include "obj.h"
#define TEST_STR "abcdefgh"
#define TEST_STR_LEN 8
#define TEST_VALUE 5
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(basic);
POBJ_LAYOUT_ROOT(basic, struct dummy_root);
POBJ_LAYOUT_TOID(basic, struct dummy_node);
POBJ_LAYOUT_TOID(basic, struct dummy_node_c);
POBJ_LAYOUT_END(basic);
struct dummy_node {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_node_c {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_root {
int value;
PMEMmutex lock;
TOID(struct dummy_node) node;
POBJ_LIST_HEAD(dummy_list, struct dummy_node) dummies;
POBJ_LIST_HEAD(moved_list, struct dummy_node) moved;
};
static int
dummy_node_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct dummy_node *n = (struct dummy_node *)ptr;
int *test_val = (int *)arg;
n->value = *test_val;
pmemobj_persist(pop, &n->value, sizeof(n->value));
return 0;
}
static void
test_alloc_api(PMEMobjpool *pop)
{
TOID(struct dummy_node) node_zeroed;
TOID(struct dummy_node_c) node_constructed;
POBJ_ZNEW(pop, &node_zeroed, struct dummy_node);
UT_ASSERT_rt(OID_INSTANCEOF(node_zeroed.oid, struct dummy_node));
int *test_val = (int *)MALLOC(sizeof(*test_val));
*test_val = TEST_VALUE;
POBJ_NEW(pop, &node_constructed, struct dummy_node_c,
dummy_node_constructor, test_val);
FREE(test_val);
TOID(struct dummy_node) iter;
POBJ_FOREACH_TYPE(pop, iter) {
UT_ASSERTeq(D_RO(iter)->value, 0);
}
TOID(struct dummy_node_c) iter_c;
POBJ_FOREACH_TYPE(pop, iter_c) {
UT_ASSERTeq(D_RO(iter_c)->value, TEST_VALUE);
}
PMEMoid oid_iter;
int nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTne(nodes_count, 0);
POBJ_FREE(&node_zeroed);
POBJ_FREE(&node_constructed);
nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int val = 10;
POBJ_ALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c),
dummy_node_constructor, &val);
POBJ_REALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 1000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_ZREALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 2000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_FREE(&node_constructed);
POBJ_ZALLOC(pop, &node_zeroed, struct dummy_node,
sizeof(struct dummy_node));
POBJ_FREE(&node_zeroed);
PMEMoid oid = OID_NULL;
POBJ_FREE(&oid);
int err = 0;
err = pmemobj_alloc(pop, NULL, SIZE_MAX, 0, NULL, NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, SIZE_MAX, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_alloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0, NULL,
NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_realloc_api(PMEMobjpool *pop)
{
PMEMoid oid = OID_NULL;
int ret;
ret = pmemobj_alloc(pop, &oid, 128, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("alloc: %u, size: %zu", 128,
pmemobj_alloc_usable_size(oid));
/* grow */
ret = pmemobj_realloc(pop, &oid, 655360, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 128, 655360,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 655360, 1,
pmemobj_alloc_usable_size(oid));
/* free */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 777, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 777,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 777, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_ASSERTeq(pmemobj_alloc_usable_size(oid), 0);
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 1,
pmemobj_alloc_usable_size(oid));
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 1, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
/* grow beyond reasonable size */
ret = pmemobj_realloc(pop, &oid, SIZE_MAX, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
ret = pmemobj_realloc(pop, &oid, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
}
static void
test_list_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
root = POBJ_ROOT(pop, struct dummy_root);
int nodes_count = 0;
UT_ASSERTeq(pmemobj_type_num(root.oid), POBJ_ROOT_TYPE_NUM);
UT_COMPILE_ERROR_ON(TOID_TYPE_NUM_OF(root) != POBJ_ROOT_TYPE_NUM);
TOID(struct dummy_node) first;
TOID(struct dummy_node) iter;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int test_val = TEST_VALUE;
PMEMoid ret;
/* should fail */
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
SIZE_MAX, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
errno = 0;
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
PMEMOBJ_MAX_ALLOC_SIZE + 1, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_TAIL(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
TOID(struct dummy_node) inserted =
POBJ_LIST_FIRST(&D_RW(root)->dummies);
UT_ASSERTeq(pmemobj_type_num(inserted.oid),
TOID_TYPE_NUM(struct dummy_node));
TOID(struct dummy_node) node;
POBJ_ZNEW(pop, &node, struct dummy_node);
POBJ_LIST_INSERT_HEAD(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH: dummy_node %d", D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 3);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_NEXT: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_NEXT(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 3);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_REMOVE(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_INSERT_TAIL(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_REMOVE_FREE(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 2);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 2);
test_val++;
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(root)->dummies,
POBJ_LIST_FIRST(&D_RO(root)->dummies), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(root)->dummies,
POBJ_LIST_LAST(&D_RO(root)->dummies, plist), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 4);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_LAST(&D_RO(root)->dummies, plist);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 4);
}
static void
test_tx_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct dummy_root)));
int *vstate = NULL; /* volatile state */
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
vstate = (int *)MALLOC(sizeof(*vstate));
*vstate = TEST_VALUE;
TX_ADD(root);
D_RW(root)->value = *vstate;
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_FINALLY {
FREE(vstate);
vstate = NULL;
} TX_END
UT_ASSERTeq(vstate, NULL);
UT_ASSERTeq(D_RW(root)->value, TEST_VALUE);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_XALLOC(struct dummy_node, SIZE_MAX,
POBJ_XALLOC_ZERO);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_LOCK(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
TX_MEMSET(D_RW(D_RW(root)->node)->teststr, 'a', TEST_STR_LEN);
TX_MEMCPY(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN);
TX_SET(D_RW(root)->node, value, TEST_VALUE);
} TX_END
UT_ASSERTeq(D_RW(D_RW(root)->node)->value, TEST_VALUE);
UT_ASSERT(strncmp(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN) == 0);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->node));
TX_FREE(D_RW(root)->node);
D_RW(root)->node = TOID_NULL(struct dummy_node);
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN(NULL) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EFAULT);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN((PMEMobjpool *)(uintptr_t)7) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EINVAL);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
TX_BEGIN(pop) {
pmemobj_tx_abort(ECANCELED);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
}
static void
test_action_api(PMEMobjpool *pop)
{
struct pobj_action act[2];
uint64_t dest_value = 0;
PMEMoid oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_publish(pop, act, 2);
UT_ASSERTeq(dest_value, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
oid = pmemobj_reserve(pop, &act[0], 1, 1);
TX_BEGIN(pop) {
pmemobj_tx_publish(act, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
dest_value = 0;
oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_cancel(pop, act, 2);
UT_ASSERTeq(dest_value, 0);
TOID(struct dummy_node) n =
POBJ_RESERVE_NEW(pop, struct dummy_node, &act[0]);
TOID(struct dummy_node_c) c =
POBJ_RESERVE_ALLOC(pop, struct dummy_node_c,
sizeof(struct dummy_node_c), &act[1]);
pmemobj_publish(pop, act, 2);
/* valgrind would warn in case they were not allocated */
D_RW(n)->value = 1;
D_RW(c)->value = 1;
pmemobj_persist(pop, D_RW(n), sizeof(struct dummy_node));
pmemobj_persist(pop, D_RW(c), sizeof(struct dummy_node_c));
}
static void
test_offsetof(void)
{
TOID(struct dummy_root) r;
TOID(struct dummy_node) n;
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, value) !=
offsetof(struct dummy_root, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, lock) !=
offsetof(struct dummy_root, lock));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, node) !=
offsetof(struct dummy_root, node));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, dummies) !=
offsetof(struct dummy_root, dummies));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, moved) !=
offsetof(struct dummy_root, moved));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, value) !=
offsetof(struct dummy_node, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, teststr) !=
offsetof(struct dummy_node, teststr));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist) !=
offsetof(struct dummy_node, plist));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist_m) !=
offsetof(struct dummy_node, plist_m));
}
static void
test_layout(void)
{
/* get number of declared types when there are no types declared */
POBJ_LAYOUT_BEGIN(mylayout);
POBJ_LAYOUT_END(mylayout);
size_t number_of_declared_types = POBJ_LAYOUT_TYPES_NUM(mylayout);
UT_ASSERTeq(number_of_declared_types, 0);
}
static void
test_root_size(PMEMobjpool *pop)
{
UT_ASSERTeq(pmemobj_root_size(pop), 0);
size_t alloc_size = sizeof(struct dummy_root);
pmemobj_root(pop, alloc_size);
UT_ASSERTeq(pmemobj_root_size(pop), sizeof(struct dummy_root));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_basic_integration");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(basic) != 2);
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file-name [inject_fault]", argv[0]);
const char *path = argv[1];
const char *opt = argv[2];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_root_size(pop);
test_alloc_api(pop);
test_realloc_api(pop);
test_list_api(pop);
test_tx_api(pop);
test_action_api(pop);
test_offsetof();
test_layout();
pmemobj_close(pop);
/* fault injection */
if (argc == 3 && strcmp(opt, "inject_fault") == 0) {
if (pmemobj_fault_injection_enabled()) {
pmemobj_inject_fault_at(PMEM_MALLOC, 1,
"heap_check_remote");
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic));
UT_ASSERTeq(pop, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
}
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
/* second open should fail, checks file locking */
if ((pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) != NULL)
UT_FATAL("!pmemobj_open: %s", path);
pmemobj_close(pop);
int result = pmemobj_check(path, POBJ_LAYOUT_NAME(basic));
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
DONE(NULL);
}
| 17,784 | 25.154412 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_feature/common.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018-2020, Intel Corporation
#
# src/test/pmempool_feature/common.sh -- common part of pmempool_feature tests
#
# for feature values please see: pmempool feature help
PART_SIZE=$(convert_to_bytes 10M)
# define files and directories
POOLSET=$DIR/testset
TEST_SET_LOCAL=testset_local
TEST_SET_REMOTE=testset_remote
LOG=grep${UNITTEST_NUM}.log
pmempool_exe=$PMEMPOOL$EXESUFFIX
exit_func=expect_normal_exit
sds_enabled=$(is_ndctl_enabled $pmempool_exe; echo $?)
# pmempool_feature_query -- query feature
#
# usage: pmempool_feature_query <feature> [<query-exit-type>]
function pmempool_feature_query() {
query_exit_type=${2-normal}
query_exit_func=expect_${query_exit_type}_exit
val=$($query_exit_func $pmempool_exe feature -q $1 $POOLSET 2>> $LOG)
if [ "$query_exit_type" == "normal" ]; then
echo "query $1 result is $val" &>> $LOG
fi
}
# pmempool_feature_enable -- enable feature
#
# usage: pmempool_feature_enable <feature> [no-query]
function pmempool_feature_enable() {
$exit_func $pmempool_exe feature -e $1 $POOLSET &>> $LOG
if [ "x$2" != "xno-query" ]; then
pmempool_feature_query $1
fi
}
# pmempool_feature_disable -- disable feature
#
# usage: pmempool_feature_disable <feature> [no-query]
function pmempool_feature_disable() {
$exit_func $pmempool_exe feature -d $1 $POOLSET '&>>' $LOG
if [ "x$2" != "xno-query" ]; then
pmempool_feature_query $1
fi
}
# pmempool_feature_create_poolset -- create poolset
#
# usage: pmempool_feature_create_poolset <poolset-type>
function pmempool_feature_create_poolset() {
POOLSET_TYPE=$1
case "$1" in
"no_dax_device")
create_poolset $POOLSET \
$PART_SIZE:$DIR/testfile11:x \
$PART_SIZE:$DIR/testfile12:x \
r \
$PART_SIZE:$DIR/testfile21:x \
$PART_SIZE:$DIR/testfile22:x \
r \
$PART_SIZE:$DIR/testfile31:x
;;
"dax_device")
create_poolset $POOLSET \
AUTO:$DEVICE_DAX_PATH
;;
"remote")
create_poolset $DIR/$TEST_SET_LOCAL \
$PART_SIZE:${NODE_DIR[1]}/testfile_local11:x \
$PART_SIZE:${NODE_DIR[1]}/testfile_local12:x \
m ${NODE_ADDR[0]}:$TEST_SET_REMOTE
create_poolset $DIR/$TEST_SET_REMOTE \
$PART_SIZE:${NODE_DIR[0]}/testfile_remote21:x \
$PART_SIZE:${NODE_DIR[0]}/testfile_remote22:x
copy_files_to_node 0 ${NODE_DIR[0]} $DIR/$TEST_SET_REMOTE
copy_files_to_node 1 ${NODE_DIR[1]} $DIR/$TEST_SET_LOCAL
rm_files_from_node 1 \
${NODE_DIR[1]}testfile_local11 ${NODE_DIR[1]}testfile_local12
rm_files_from_node 0 \
${NODE_DIR[0]}testfile_remote21 ${NODE_DIR[0]}testfile_remote22
POOLSET="${NODE_DIR[1]}/$TEST_SET_LOCAL"
;;
esac
expect_normal_exit $pmempool_exe rm -f $POOLSET
# create pool
# pmempool create under valgrind pmemcheck takes too long
# it is not part of the test so it is run here without valgrind
VALGRIND_DISABLED=y expect_normal_exit $pmempool_exe create obj $POOLSET
}
# pmempool_feature_test_SINGLEHDR -- test SINGLEHDR
function pmempool_feature_test_SINGLEHDR() {
exit_func=expect_abnormal_exit
pmempool_feature_enable "SINGLEHDR" "no-query" # UNSUPPORTED
pmempool_feature_disable "SINGLEHDR" "no-query" # UNSUPPORTED
exit_func=expect_normal_exit
pmempool_feature_query "SINGLEHDR"
}
# pmempool_feature_test_CKSUM_2K -- test CKSUM_2K
function pmempool_feature_test_CKSUM_2K() {
# PMEMPOOL_FEAT_CHCKSUM_2K is enabled by default
pmempool_feature_query "CKSUM_2K"
# SHUTDOWN_STATE is disabled on Linux if PMDK is compiled with old ndctl
# enable it to interfere toggling CKSUM_2K
if [ $sds_enabled -eq 1 ]; then
pmempool_feature_enable SHUTDOWN_STATE "no-query"
fi
# disable PMEMPOOL_FEAT_SHUTDOWN_STATE prior to success
exit_func=expect_abnormal_exit
pmempool_feature_disable "CKSUM_2K" # should fail
exit_func=expect_normal_exit
pmempool_feature_disable "SHUTDOWN_STATE"
pmempool_feature_disable "CKSUM_2K" # should succeed
pmempool_feature_enable "CKSUM_2K"
}
# pmempool_feature_test_SHUTDOWN_STATE -- test SHUTDOWN_STATE
function pmempool_feature_test_SHUTDOWN_STATE() {
pmempool_feature_query "SHUTDOWN_STATE"
if [ $sds_enabled -eq 0 ]; then
pmempool_feature_disable SHUTDOWN_STATE
fi
# PMEMPOOL_FEAT_SHUTDOWN_STATE requires PMEMPOOL_FEAT_CHCKSUM_2K
pmempool_feature_disable "CKSUM_2K"
exit_func=expect_abnormal_exit
pmempool_feature_enable "SHUTDOWN_STATE" # should fail
exit_func=expect_normal_exit
pmempool_feature_enable "CKSUM_2K"
pmempool_feature_enable "SHUTDOWN_STATE" # should succeed
}
# pmempool_feature_test_CHECK_BAD_BLOCKS -- test SHUTDOWN_STATE
function pmempool_feature_test_CHECK_BAD_BLOCKS() {
# PMEMPOOL_FEAT_CHECK_BAD_BLOCKS is disabled by default
pmempool_feature_query "CHECK_BAD_BLOCKS"
pmempool_feature_enable "CHECK_BAD_BLOCKS"
pmempool_feature_disable "CHECK_BAD_BLOCKS"
}
# pmempool_feature_remote_init -- initialization remote replics
function pmempool_feature_remote_init() {
require_nodes 2
require_node_libfabric 0 $RPMEM_PROVIDER
require_node_libfabric 1 $RPMEM_PROVIDER
init_rpmem_on_node 1 0
pmempool_exe="run_on_node 1 ../pmempool"
}
# pmempool_feature_test_remote -- run remote tests
function pmempool_feature_test_remote() {
# create pool
expect_normal_exit $pmempool_exe rm -f $POOLSET
expect_normal_exit $pmempool_exe create obj $POOLSET
# poolset with remote replicas are not supported
exit_func=expect_abnormal_exit
pmempool_feature_enable $1 no-query
pmempool_feature_disable $1 no-query
pmempool_feature_query $1 abnormal
}
| 5,473 | 28.430108 | 78 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/arch_flags/arch_flags.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* arch_flags.c -- unit test for architecture flags
*/
#include <inttypes.h>
#include <string.h>
#include "unittest.h"
#include "pool_hdr.h"
#include "pmemcommon.h"
#define FATAL_USAGE()\
UT_FATAL(\
"usage: arch_flags <machine>:<machine_class>:<data>:<alignment_desc>:<reserved>")
#define ARCH_FLAGS_LOG_PREFIX "arch_flags"
#define ARCH_FLAGS_LOG_LEVEL_VAR "ARCH_FLAGS_LOG_LEVEL"
#define ARCH_FLAGS_LOG_FILE_VAR "ARCH_FLAGS_LOG_FILE"
#define ARCH_FLAGS_LOG_MAJOR 0
#define ARCH_FLAGS_LOG_MINOR 0
/*
* read_arch_flags -- read arch flags from file
*/
static int
read_arch_flags(char *opts, struct arch_flags *arch_flags)
{
uint64_t alignment_desc;
uint64_t reserved;
uint16_t machine;
uint8_t machine_class;
uint8_t data;
if (sscanf(opts, "%" SCNu16 ":%" SCNu8 ":%" SCNu8
":0x%" SCNx64 ":0x%" SCNx64,
&machine, &machine_class, &data,
&alignment_desc, &reserved) != 5)
return -1;
util_get_arch_flags(arch_flags);
if (machine)
arch_flags->machine = machine;
if (machine_class)
arch_flags->machine_class = machine_class;
if (data)
arch_flags->data = data;
if (alignment_desc)
arch_flags->alignment_desc = alignment_desc;
if (reserved)
memcpy(arch_flags->reserved,
&reserved, sizeof(arch_flags->reserved));
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "arch_flags");
common_init(ARCH_FLAGS_LOG_PREFIX,
ARCH_FLAGS_LOG_LEVEL_VAR,
ARCH_FLAGS_LOG_FILE_VAR,
ARCH_FLAGS_LOG_MAJOR,
ARCH_FLAGS_LOG_MINOR);
if (argc < 2)
FATAL_USAGE();
int i;
for (i = 1; i < argc; i++) {
int ret;
struct arch_flags arch_flags;
if ((ret = read_arch_flags(argv[i], &arch_flags)) < 0)
FATAL_USAGE();
else if (ret == 0) {
ret = util_check_arch_flags(&arch_flags);
UT_OUT("check: %d", ret);
}
}
common_fini();
DONE(NULL);
}
| 1,895 | 19.387097 | 82 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_transform/libpmempool_transform.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool_transform -- a unittest for libpmempool transform.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_transform");
if (argc != 4)
UT_FATAL("usage: %s poolset_in poolset_out flags", argv[0]);
int ret = pmempool_transform(argv[1], argv[2],
(unsigned)strtoul(argv[3], NULL, 0));
if (ret)
UT_OUT("result: %d, errno: %d", ret, errno);
else
UT_OUT("result: %d", ret);
DONE(NULL);
}
| 623 | 19.129032 | 65 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmemcheck/obj_pmemcheck.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
struct foo {
PMEMmutex bar;
};
static void
test_mutex_pmem_mapping_register(PMEMobjpool *pop)
{
PMEMoid foo;
int ret = pmemobj_alloc(pop, &foo, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(foo));
struct foo *foop = pmemobj_direct(foo);
ret = pmemobj_mutex_lock(pop, &foop->bar);
/* foo->bar has been removed from pmem mappings collection */
VALGRIND_PRINT_PMEM_MAPPINGS;
UT_ASSERTeq(ret, 0);
ret = pmemobj_mutex_unlock(pop, &foop->bar);
UT_ASSERTeq(ret, 0);
pmemobj_free(&foo);
/* the entire foo object has been re-registered as pmem mapping */
VALGRIND_PRINT_PMEM_MAPPINGS;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmemcheck");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], "pmemcheck", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
test_mutex_pmem_mapping_register(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,127 | 21.56 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/log_include/log_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* log_include.c -- include test for libpmemlog
*
* this is only a compilation test - do not run this program
*/
#include <libpmemlog.h>
int
main(int argc, char *argv[])
{
return 0;
}
| 275 | 15.235294 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_info_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# pmempool_info_remote/config.sh -- test configuration
#
set -e
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=sockets
CONF_GLOBAL_RPMEM_PMETHOD=GPSPM
| 297 | 18.866667 | 54 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmreorder_simple/pmreorder_simple.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmreorder_simple.c -- a simple unit test for store reordering
*
* usage: pmreorder_simple g|b|c|m file
* g - write data in a consistent manner
* b - write data in a possibly inconsistent manner
* c - check data consistency
* m - write data to the pool in a consistent way,
* but at the beginning logs some inconsistent values
*
* See README file for more details.
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* The struct three_field is inconsistent if flag is set and the fields have
* different values.
*/
struct three_field {
int first_field;
int second_field;
int third_field;
int flag;
};
/*
* write_consistent -- (internal) write data in a consistent manner
*/
static void
write_consistent(struct three_field *structp)
{
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(&structp->first_field, sizeof(int) * 3);
structp->flag = 1;
pmem_persist(&structp->flag, sizeof(structp->flag));
}
/*
* write_inconsistent -- (internal) write data in an inconsistent manner.
*/
static void
write_inconsistent(struct three_field *structp)
{
structp->flag = 1;
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(structp, sizeof(*structp));
}
/*
* check_consistency -- (internal) check struct three_field consistency
*/
static int
check_consistency(struct three_field *structp)
{
int consistent = 0;
if (structp->flag)
consistent = (structp->first_field != structp->second_field) ||
(structp->first_field != structp->third_field);
return consistent;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_simple");
util_init();
if ((argc != 3) || (strchr("gbcm", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|b|c|m file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct three_field *structp = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("gb", opt))
pmem_memset_persist(structp, 0, sizeof(*structp));
else if (strchr("m", opt)) {
/* set test values to log an inconsistent start state */
pmem_memset_persist(&structp->flag, 1, sizeof(int));
pmem_memset_persist(&structp->first_field, 0, sizeof(int) * 2);
pmem_memset_persist(&structp->third_field, 1, sizeof(int));
/* clear the struct to get back a consistent start state */
pmem_memset_persist(structp, 0, sizeof(*structp));
}
/* verify that DEFAULT_REORDER restores default engine */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.BEGIN");
switch (opt) {
case 'g':
write_consistent(structp);
break;
case 'b':
write_inconsistent(structp);
break;
case 'm':
write_consistent(structp);
break;
case 'c':
return check_consistency(structp);
default:
UT_FATAL("Unrecognized option %c", opt);
}
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.END");
/* check if undefined marker will not cause an issue */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.BEGIN");
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.END");
CLOSE(fd);
DONE(NULL);
}
| 3,335 | 24.082707 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_vec/util_vec.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* util_vec.c -- unit test for vec implementation
*/
#include "unittest.h"
#include "vec.h"
struct test {
int foo;
int bar;
};
static void
vec_test()
{
VEC(testvec, struct test) v = VEC_INITIALIZER;
UT_ASSERTeq(VEC_SIZE(&v), 0);
struct test t = {1, 2};
struct test t2 = {3, 4};
VEC_PUSH_BACK(&v, t);
VEC_PUSH_BACK(&v, t2);
UT_ASSERTeq(VEC_ARR(&v)[0].foo, 1);
UT_ASSERTeq(VEC_GET(&v, 1)->foo, 3);
UT_ASSERTeq(VEC_SIZE(&v), 2);
int n = 0;
VEC_FOREACH(t, &v) {
switch (n) {
case 0:
UT_ASSERTeq(t.foo, 1);
UT_ASSERTeq(t.bar, 2);
break;
case 1:
UT_ASSERTeq(t.foo, 3);
UT_ASSERTeq(t.bar, 4);
break;
}
n++;
}
UT_ASSERTeq(n, 2);
UT_ASSERTeq(VEC_SIZE(&v), n);
VEC_POP_BACK(&v);
n = 0;
VEC_FOREACH(t, &v) {
UT_ASSERTeq(t.foo, 1);
UT_ASSERTeq(t.bar, 2);
n++;
}
UT_ASSERTeq(n, 1);
UT_ASSERTeq(VEC_SIZE(&v), n);
VEC_CLEAR(&v);
UT_ASSERTeq(VEC_SIZE(&v), 0);
VEC_DELETE(&v);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_vec");
vec_test();
DONE(NULL);
}
| 1,123 | 13.597403 | 49 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_libpmem2/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import futils
import testframework as t
@t.require_build(['debug', 'release'])
class EX_LIBPMEM2(t.Test):
test_type = t.Medium
file_size = 1 * t.MiB
offset = str(97 * t.KiB)
length = str(65 * t.KiB)
class TEST0(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'basic')
file_path = ctx.create_non_zero_file(self.file_size, 'testfile0')
ctx.exec(example_path, file_path)
class TEST1(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'advanced')
file_path = ctx.create_non_zero_file(self.file_size, 'testfile0')
ctx.exec(example_path, file_path, self.offset, self.length)
class TEST2(EX_LIBPMEM2):
file_size = 16 * t.MiB
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'log')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
args = ['appendv', '4', 'PMDK ', 'is ', 'the best ', 'open source ',
'append', 'project in the world.', 'dump', 'rewind', 'dump',
'appendv', '2', 'End of ', 'file.', 'dump']
ctx.exec(example_path, file_path, *args, stdout_file='out2.log')
class TEST3(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'redo')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
for x in range(1, 100):
ctx.exec(example_path, "add", file_path, x, x)
ctx.exec(example_path, "check", file_path)
ctx.exec(example_path, "print", file_path, stdout_file='out3.log')
| 1,740 | 28.508475 | 76 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/scope/TESTS.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
#
# src/test/scope/TESTS.py -- scope tests to check libraries symbols
#
import os
import sys
import subprocess as sp
import futils as ft
import testframework as t
from testframework import granularity as g
def parse_lib(ctx, lib, static=False):
if sys.platform.startswith('linux'):
return parse_lib_linux(ctx, lib, static)
elif sys.platform == 'win32':
return parse_lib_win(ctx, lib, static)
def parse_lib_linux(ctx, lib, static):
if static:
arg = '-g'
else:
arg = '-D'
cmd = ['nm', arg, lib]
proc = sp.run(cmd,
universal_newlines=True, stdout=sp.PIPE, stderr=sp.STDOUT)
if proc.returncode != 0:
raise ft.Fail('command "{}" failed: {}'
.format(' '.join(cmd), proc.stdout))
symbols = []
for line in proc.stdout.splitlines():
try:
# penultimate column of 'nm' output must be either
# 'T' (defined function) or 'B' (global variable).
# Example output lines:
# 000000000003fde4 T pmemobj_create
# 0000000000000018 B _pobj_cached_pool
# U read
if line.split()[-2] in 'TB':
symbols.append(line.split()[-1] + os.linesep)
except IndexError:
continue
symbols.sort()
return ''.join(symbols)
def parse_lib_win(ctx, lib, static):
dllview = ft.get_test_tool_path(ctx.build, 'dllview') + '.exe'
cmd = [dllview, lib]
proc = sp.run(cmd, universal_newlines=True,
stdout=sp.PIPE, stderr=sp.STDOUT)
if proc.returncode != 0:
raise ft.Fail('command "{}" failed: {}'
.format(' '.join(cmd), proc.stdout))
out = sorted(proc.stdout.splitlines())
return '\n'.join(out) + '\n'
@t.require_valgrind_disabled('drd', 'helgrind', 'memcheck', 'pmemcheck')
@g.no_testdir()
class Common(t.Test):
test_type = t.Medium
checked_lib = ''
def run(self, ctx):
static = False
if sys.platform == 'win32':
lib = '{}.dll'.format(self.checked_lib)
elif str(self.ctx.build) in ['debug', 'release']:
lib = '{}.so.1'.format(self.checked_lib)
else:
static = True
lib = '{}.a'.format(self.checked_lib)
libpath = os.path.join(ft.get_lib_dir(ctx), lib)
log = 'out{}.log'.format(self.testnum)
out = parse_lib(ctx, libpath, static)
with open(os.path.join(self.cwd, log), 'w') as f:
f.write(out)
@t.windows_exclude
class TEST2(Common):
"""Check scope of libpmem library (*nix)"""
checked_lib = 'libpmem'
@t.windows_exclude
class TEST3(Common):
"""Check scope of libpmemlog library (*nix)"""
checked_lib = 'libpmemlog'
@t.windows_exclude
class TEST4(Common):
"""Check scope of libpmemblk library (*nix)"""
checked_lib = 'libpmemblk'
@t.windows_exclude
class TEST5(Common):
"""Check scope of libpmemobj library (*nix)"""
checked_lib = 'libpmemobj'
@t.windows_exclude
class TEST6(Common):
"""Check scope of libpmempool library (*nix)"""
checked_lib = 'libpmempool'
@t.windows_only
class TEST8(Common):
"""Check scope of libpmem library (windows)"""
checked_lib = 'libpmem'
@t.windows_only
class TEST9(Common):
"""Check scope of libpmemlog library (windows)"""
checked_lib = 'libpmemlog'
@t.windows_only
class TEST10(Common):
"""Check scope of libpmemblk library (windows)"""
checked_lib = 'libpmemblk'
@t.windows_only
class TEST11(Common):
"""Check scope of libpmemobj library (windows)"""
checked_lib = 'libpmemobj'
@t.windows_only
class TEST12(Common):
"""Check scope of libpmempool library (windows)"""
checked_lib = 'libpmempool'
@t.windows_exclude
class TEST13(Common):
"""Check scope of libpmem2 library (*nix)"""
checked_lib = 'libpmem2'
@t.windows_only
class TEST14(Common):
"""Check scope of libpmem2 library (windows)"""
checked_lib = 'libpmem2'
| 4,130 | 24.189024 | 76 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_addr/rpmem_addr.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_addr.c -- unit test for parsing target address
*/
#include "unittest.h"
#include "rpmem_common.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_addr");
struct rpmem_target_info *info;
for (int i = 1; i < argc; i++) {
info = rpmem_target_parse(argv[i]);
if (info) {
UT_OUT("'%s': '%s' '%s' '%s'", argv[i],
info->flags & RPMEM_HAS_USER ?
info->user : "(null)",
*info->node ? info->node : "(null)",
info->flags & RPMEM_HAS_SERVICE ?
info->service : "(null)");
free(info);
} else {
UT_OUT("!%s", argv[i]);
}
}
DONE(NULL);
}
| 683 | 18.542857 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/log_pool_lock/log_pool_lock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* log_pool_lock.c -- unit test which checks whether it's possible to
* simultaneously open the same log pool
*/
#include "unittest.h"
static void
test_reopen(const char *path)
{
PMEMlogpool *log1 = pmemlog_create(path, PMEMLOG_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!log1)
UT_FATAL("!create");
PMEMlogpool *log2 = pmemlog_open(path);
if (log2)
UT_FATAL("pmemlog_open should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemlog_open failed but for unexpected reason");
pmemlog_close(log1);
log2 = pmemlog_open(path);
if (!log2)
UT_FATAL("pmemlog_open should succeed after close");
pmemlog_close(log2);
UNLINK(path);
}
#ifndef _WIN32
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
pid_t pid = fork();
PMEMlogpool *log;
char *path = argv[1];
if (pid < 0)
UT_FATAL("fork failed");
if (pid == 0) {
/* child */
if (sleep)
usleep(sleep);
while (os_access(path, R_OK))
usleep(100 * 1000);
log = pmemlog_open(path);
if (log)
UT_FATAL("pmemlog_open after fork should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemlog_open after fork failed but for "
"unexpected reason");
exit(0);
}
log = pmemlog_create(path, PMEMLOG_MIN_POOL, S_IWUSR | S_IRUSR);
if (!log)
UT_FATAL("!create");
int status;
if (waitpid(pid, &status, 0) < 0)
UT_FATAL("!waitpid failed");
if (!WIFEXITED(status))
UT_FATAL("child process failed");
pmemlog_close(log);
UNLINK(path);
}
#else
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
PMEMlogpool *log;
if (sleep > 0)
return;
char *path = argv[1];
/* before starting the 2nd process, create a pool */
log = pmemlog_create(path, PMEMLOG_MIN_POOL, S_IWUSR | S_IRUSR);
if (!log)
UT_FATAL("!create");
/*
* "X" is pass as an additional param to the new process
* created by ut_spawnv to distinguish second process on Windows
*/
uintptr_t result = ut_spawnv(argc, argv, "X", NULL);
if (result == -1)
UT_FATAL("Create new process failed error: %d", GetLastError());
pmemlog_close(log);
}
#endif
int
main(int argc, char *argv[])
{
START(argc, argv, "log_pool_lock");
if (argc < 2)
UT_FATAL("usage: %s path", argv[0]);
if (argc == 2) {
test_reopen(argv[1]);
test_open_in_different_process(argc, argv, 0);
for (unsigned i = 1; i < 100000; i *= 2)
test_open_in_different_process(argc, argv, i);
} else if (argc == 3) {
PMEMlogpool *log;
/* 2nd arg used by windows for 2 process test */
log = pmemlog_open(argv[1]);
if (log)
UT_FATAL("pmemlog_open after create process should "
"not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemlog_open after create process failed "
"but for unexpected reason");
}
DONE(NULL);
}
| 2,877 | 19.705036 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/remote_obj_basic/remote_obj_basic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* remote_obj_basic.c -- unit test for remote tests support
*
* usage: remote_obj_basic <create|open> <poolset-file>
*/
#include "unittest.h"
#define LAYOUT_NAME "remote_obj_basic"
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "remote_obj_basic");
if (argc != 3)
UT_FATAL("usage: %s <create|open> <poolset-file>", argv[0]);
const char *mode = argv[1];
const char *file = argv[2];
if (strcmp(mode, "create") == 0) {
if ((pop = pmemobj_create(file, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", file);
else
UT_OUT("The pool set %s has been created", file);
} else if (strcmp(mode, "open") == 0) {
if ((pop = pmemobj_open(file, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", file);
else
UT_OUT("The pool set %s has been opened", file);
} else {
UT_FATAL("wrong mode: %s\n", argv[1]);
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,019 | 20.25 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_debug/obj_ctl_debug.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_ctl_debug.c -- tests for the ctl debug namesapce entry points
*/
#include "unittest.h"
#include "../../libpmemobj/obj.h"
#define LAYOUT "obj_ctl_debug"
#define BUFFER_SIZE 128
#define ALLOC_PATTERN 0xAC
static void
test_alloc_pattern(PMEMobjpool *pop)
{
int ret;
int pattern;
PMEMoid oid;
/* check default pattern */
ret = pmemobj_ctl_get(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pattern, PALLOC_CTL_DEBUG_NO_PATTERN);
/* check set pattern */
pattern = ALLOC_PATTERN;
ret = pmemobj_ctl_set(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pop->heap.alloc_pattern, pattern);
/* check alloc with pattern */
ret = pmemobj_alloc(pop, &oid, BUFFER_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
char *buff = pmemobj_direct(oid);
int i;
for (i = 0; i < BUFFER_SIZE; i++)
/* should trigger memcheck error: read uninitialized values */
UT_ASSERTeq(*(buff + i), (char)pattern);
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_debug");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
test_alloc_pattern(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,452 | 20.367647 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list_macro/obj_list_macro.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_macro.c -- unit tests for list module
*/
#include <stddef.h>
#include "libpmemobj.h"
#include "unittest.h"
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
/* global lists */
static TOID(struct list) List;
static TOID(struct list) List_sec;
#define LAYOUT_NAME "list_macros"
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list_macro <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list_macro <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list_macro <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list_macro <file> i:<where>:<num>[:<id>]")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list_macro <file> n:<where>:<num>[:<id>]")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list_macro <file> f:<list>:<num>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list_macro <file> r:<list>:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list_macro <file> m:<num>:<where>:<num>")
/*
* get_item_list -- get nth item from list
*/
static TOID(struct item)
get_item_list(TOID(struct list) list, int n)
{
TOID(struct item) item;
if (n >= 0) {
POBJ_LIST_FOREACH(item, &D_RO(list)->head, next) {
if (n == 0)
return item;
n--;
}
} else {
POBJ_LIST_FOREACH_REVERSE(item, &D_RO(list)->head, next) {
n++;
if (n == 0)
return item;
}
}
return TOID_NULL(struct item);
}
/*
* do_print -- print list elements in normal order
*/
static void
do_print(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "P:%d", &L) != 1)
FATAL_USAGE_PRINT();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list:");
POBJ_LIST_FOREACH(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec:");
POBJ_LIST_FOREACH(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT();
}
}
/*
* do_print_reverse -- print list elements in reverse order
*/
static void
do_print_reverse(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "R:%d", &L) != 1)
FATAL_USAGE_PRINT_REVERSE();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT_REVERSE();
}
}
/*
* item_constructor -- constructor which sets the item's id to
* new value
*/
static int
item_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct item *item = (struct item *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
return 0;
}
/*
* do_insert_new -- insert new element to list
*/
static void
do_insert_new(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "n:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT_NEW();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(List)->head, next,
sizeof(struct item), item_constructor, &ptr);
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_NEW_HEAD");
} else {
item = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(item));
if (!before) {
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_NEXT(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_AFTER");
} else {
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_PREV(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_BEFORE");
}
}
}
/*
* do_insert -- insert element to list
*/
static void
do_insert(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "i:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
POBJ_NEW(pop, &item, struct item, item_constructor, &ptr);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
ret = POBJ_LIST_INSERT_HEAD(pop, &D_RW(List)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_HEAD");
}
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_HEAD");
} else {
TOID(struct item) elm = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(elm));
if (!before) {
ret = POBJ_LIST_INSERT_AFTER(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_AFTER");
}
if (!TOID_EQUALS(item, POBJ_LIST_NEXT(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_AFTER");
} else {
ret = POBJ_LIST_INSERT_BEFORE(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
if (!TOID_EQUALS(item, POBJ_LIST_PREV(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
}
}
/*
* do_remove_free -- remove and free element from list
*/
static void
do_remove_free(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "f:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE_FREE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(tmp_list)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE_FREE");
}
}
/*
* do_remove -- remove element from list
*/
static void
do_remove(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "r:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE(pop, &D_RW(tmp_list)->head, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE");
}
POBJ_FREE(&item);
}
/*
* do_move -- move element from one list to another
*/
static void
do_move(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "m:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
int ret;
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
return;
if (POBJ_LIST_EMPTY(&D_RW(List_sec)->head)) {
ret = POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_HEAD");
}
} else {
if (before) {
ret = POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_BEFORE");
}
} else {
ret = POBJ_LIST_MOVE_ELEMENT_AFTER(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_AFTER");
}
}
}
}
/*
* do_cleanup -- de-initialization function
*/
static void
do_cleanup(PMEMobjpool *pop, TOID(struct list) list)
{
int ret;
errno = 0;
while (!POBJ_LIST_EMPTY(&D_RW(list)->head)) {
TOID(struct item) tmp = POBJ_LIST_FIRST(&D_RW(list)->head);
ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(list)->head, tmp, next);
UT_ASSERTeq(errno, 0);
UT_ASSERTeq(ret, 0);
}
POBJ_FREE(&list);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_list_macro");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
POBJ_ZNEW(pop, &List, struct list);
POBJ_ZNEW(pop, &List_sec, struct list);
int i;
for (i = 2; i < argc; i++) {
switch (argv[i][0]) {
case 'P':
do_print(pop, argv[i]);
break;
case 'R':
do_print_reverse(pop, argv[i]);
break;
case 'n':
do_insert_new(pop, argv[i]);
break;
case 'i':
do_insert(pop, argv[i]);
break;
case 'f':
do_remove_free(pop, argv[i]);
break;
case 'r':
do_remove(pop, argv[i]);
break;
case 'm':
do_move(pop, argv[i]);
break;
default:
FATAL_USAGE();
}
}
do_cleanup(pop, List);
do_cleanup(pop, List_sec);
pmemobj_close(pop);
DONE(NULL);
}
| 9,625 | 21.756501 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_librpmem_basic/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
#
# ex_librpmem_basic/config.sh -- test configuration
#
# Filesystem-DAX cannot be used for RDMA
# since it is missing support in Linux kernel
CONF_GLOBAL_FS_TYPE=non-pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=short
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 402 | 21.388889 | 51 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_transform_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# pmempool_transform_remote/config.sh -- configuration of unit tests
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 298 | 20.357143 | 68 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_transform_remote/common.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2018, Intel Corporation
#
#
# pmempool_transform_remote/common.sh -- commons for pmempool transform tests
# with remote replication
#
set -e
require_nodes 2
require_node_libfabric 0 $RPMEM_PROVIDER
require_node_libfabric 1 $RPMEM_PROVIDER
setup
init_rpmem_on_node 1 0
require_node_log_files 1 pmemobj$UNITTEST_NUM.log
require_node_log_files 1 pmempool$UNITTEST_NUM.log
LOG=out${UNITTEST_NUM}.log
LOG_TEMP=out${UNITTEST_NUM}_part.log
rm -f $LOG && touch $LOG
rm -f $LOG_TEMP && touch $LOG_TEMP
rm_files_from_node 0 ${NODE_TEST_DIR[0]}/$LOG
rm_files_from_node 1 ${NODE_TEST_DIR[1]}/$LOG
LAYOUT=OBJ_LAYOUT
POOLSET_LOCAL_IN=poolset.in
POOLSET_LOCAL_OUT=poolset.out
POOLSET_REMOTE=poolset.remote
POOLSET_REMOTE1=poolset.remote1
POOLSET_REMOTE2=poolset.remote2
# CLI scripts for writing and reading some data hitting all the parts
WRITE_SCRIPT="pmemobjcli.write.script"
READ_SCRIPT="pmemobjcli.read.script"
copy_files_to_node 1 ${NODE_DIR[1]} $WRITE_SCRIPT $READ_SCRIPT
DUMP_INFO_LOG="../pmempool info"
DUMP_INFO_LOG_REMOTE="$DUMP_INFO_LOG -f obj"
DUMP_INFO_SED="sed -e '/^Checksum/d' -e '/^Creation/d' -e '/^Previous replica UUID/d' -e '/^Next replica UUID/d'"
DUMP_INFO_SED_REMOTE="$DUMP_INFO_SED -e '/^Previous part UUID/d' -e '/^Next part UUID/d'"
function dump_info_log() {
local node=$1
local poolset=$2
local name=$3
local ignore=$4
local sed_cmd="$DUMP_INFO_SED"
if [ -n "$ignore" ]; then
sed_cmd="$sed_cmd -e '/^$ignore/d'"
fi
expect_normal_exit run_on_node $node "\"$DUMP_INFO_LOG $poolset | $sed_cmd >> $name\""
}
function dump_info_log_remote() {
local node=$1
local poolset=$2
local name=$3
local ignore=$4
local sed_cmd="$DUMP_INFO_SED_REMOTE"
if [ -n "$ignore" ]; then
sed_cmd="$sed_cmd -e '/^$ignore/d'"
fi
expect_normal_exit run_on_node $node "\"$DUMP_INFO_LOG_REMOTE $poolset | $sed_cmd >> $name\""
}
function diff_log() {
local node=$1
local f1=$2
local f2=$3
expect_normal_exit run_on_node $node "\"[ -s $f1 ] && [ -s $f2 ] && diff $f1 $f2\""
}
exec_pmemobjcli_script() {
local node=$1
local script=$2
local poolset=$3
local out=$4
expect_normal_exit run_on_node $node "\"../pmemobjcli -s $script $poolset > $out \""
}
| 2,298 | 23.98913 | 113 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_critnib_mt/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
@g.no_testdir()
class ObjCritnibMt(t.Test):
def run(self, ctx):
ctx.exec('obj_critnib_mt')
class TEST0(ObjCritnibMt):
test_type = t.Medium
@t.require_valgrind_enabled('helgrind')
class TEST1(ObjCritnibMt):
test_type = t.Long
@t.require_valgrind_enabled('drd')
class TEST2(ObjCritnibMt):
test_type = t.Long
| 498 | 16.821429 | 42 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_critnib_mt/obj_critnib_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_critnib_mt.c -- multithreaded unit test for critnib
*/
#include <errno.h>
#include "critnib.h"
#include "rand.h"
#include "os_thread.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define NITER_FAST 200000000
#define NITER_MID 20000000
#define NITER_SLOW 2000000
#define MAXTHREADS 4096
static int nthreads; /* number of threads */
static int nrthreads; /* in mixed tests, read threads */
static int nwthreads; /* ... and write threads */
static uint64_t
rnd_thid_r64(rng_t *seedp, uint16_t thid)
{
/*
* Stick arg (thread index) onto bits 16..31, to make it impossible for
* two worker threads to write the same value, while keeping both ends
* pseudo-random.
*/
uint64_t r = rnd64_r(seedp);
r &= ~0xffff0000ULL;
r |= ((uint64_t)thid) << 16;
return r;
}
static uint64_t
helgrind_count(uint64_t x)
{
/* Convert total number of ops to per-thread. */
x /= (unsigned)nthreads;
/*
* Reduce iteration count when running on foogrind, by a factor of 64.
* Multiple instances of foogrind cause exponential slowdown, so handle
* that as well (not that it's very useful for us...).
*/
return x >> (6 * On_valgrind);
}
/* 1024 random numbers, shared between threads. */
static uint64_t the1024[1024];
static struct critnib *c;
#define K 0xdeadbeefcafebabe
static void *
thread_read1(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++)
UT_ASSERTeq(critnib_get(c, K), (void *)K);
return NULL;
}
static void *
thread_read1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = the1024[count % ARRAY_SIZE(the1024)];
UT_ASSERTeq(critnib_get(c, v), (void *)v);
}
return NULL;
}
static void *
thread_write1024(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t w1024[1024];
for (int i = 0; i < ARRAY_SIZE(w1024); i++)
w1024[i] = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = w1024[count % ARRAY_SIZE(w1024)];
critnib_insert(c, v, (void *)v);
uint64_t r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(v, r);
}
return NULL;
}
static void *
thread_read_write_remove(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t r, v = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
critnib_insert(c, v, (void *)v);
r = (uint64_t)critnib_get(c, v);
UT_ASSERTeq(r, v);
r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(r, v);
}
return NULL;
}
/*
* Reverse bits in a number: 1234 -> 4321 (swap _bit_ endianness).
*
* Doing this on successive numbers produces a van der Corput sequence,
* which covers the space nicely (relevant for <= tests).
*/
static uint64_t
revbits(uint64_t x)
{
uint64_t y = 0;
uint64_t a = 1;
uint64_t b = 0x8000000000000000;
for (; b; a <<= 1, b >>= 1) {
if (x & a)
y |= b;
}
return y;
}
static void *
thread_le1(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
if (y < K)
UT_ASSERTeq(critnib_find_le(c, y), NULL);
else
UT_ASSERTeq(critnib_find_le(c, y), (void *)K);
}
return NULL;
}
static void *
thread_le1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
critnib_find_le(c, y);
}
return NULL;
}
typedef void *(*thread_func_t)(void *);
/*
* Before starting the threads, we add "fixed_preload" of static values
* (K and 1), or "random_preload" of random numbers. Can't have both.
*/
static void
test(int fixed_preload, int random_preload, thread_func_t rthread,
thread_func_t wthread)
{
c = critnib_new();
if (fixed_preload >= 1)
critnib_insert(c, K, (void *)K);
if (fixed_preload >= 2)
critnib_insert(c, 1, (void *)1);
for (int i = 0; i < random_preload; i++)
critnib_insert(c, the1024[i], (void *)the1024[i]);
os_thread_t th[MAXTHREADS], wr[MAXTHREADS];
int ntr = wthread ? nrthreads : nthreads;
int ntw = wthread ? nwthreads : 0;
for (int i = 0; i < ntr; i++)
THREAD_CREATE(&th[i], 0, rthread, (void *)(uint64_t)i);
for (int i = 0; i < ntw; i++)
THREAD_CREATE(&wr[i], 0, wthread, (void *)(uint64_t)i);
/* The threads work here... */
for (int i = 0; i < ntr; i++) {
void *retval;
THREAD_JOIN(&th[i], &retval);
}
for (int i = 0; i < ntw; i++) {
void *retval;
THREAD_JOIN(&wr[i], &retval);
}
critnib_delete(c);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_critnib_mt");
util_init();
randomize(1); /* use a fixed reproducible seed */
for (int i = 0; i < ARRAY_SIZE(the1024); i++)
the1024[i] = rnd64();
nthreads = sysconf(_SC_NPROCESSORS_ONLN);
if (nthreads > MAXTHREADS)
nthreads = MAXTHREADS;
if (!nthreads)
nthreads = 8;
nwthreads = nthreads / 2;
if (!nwthreads)
nwthreads = 1;
nrthreads = nthreads - nwthreads;
if (!nrthreads)
nrthreads = 1;
test(1, 0, thread_read1, thread_write1024);
test(0, 1024, thread_read1024, thread_write1024);
test(0, 0, thread_read_write_remove, NULL);
test(1, 0, thread_le1, NULL);
test(0, 1024, thread_le1024, NULL);
DONE(NULL);
}
| 5,467 | 20.527559 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_signal/win_signal.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* win_signal.c -- test signal related routines
*/
#include "unittest.h"
extern int sys_siglist_size;
int
main(int argc, char *argv[])
{
int sig;
START(argc, argv, "win_signal");
for (sig = 0; sig < sys_siglist_size; sig++) {
UT_OUT("%d; %s", sig, os_strsignal(sig));
}
for (sig = 33; sig < 66; sig++) {
UT_OUT("%d; %s", sig, os_strsignal(sig));
}
DONE(NULL);
}
| 468 | 17.038462 | 47 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_arenas/obj_ctl_arenas.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_ctl_arenas.c -- tests for the ctl entry points
* usage:
* obj_ctl_arenas <file> n - test for heap.narenas.total
*
* obj_ctl_arenas <file> s - test for heap.arena.[idx].size
* and heap.thread.arena_id (RW)
*
* obj_ctl_arenas <file> c - test for heap.arena.create,
* heap.arena.[idx].automatic and heap.narenas.automatic
* obj_ctl_arenas <file> a - mt test for heap.arena.create
* and heap.thread.arena_id
*
* obj_ctl_arenas <file> f - test for POBJ_ARENA_ID flag,
*
* obj_ctl_arenas <file> q - test for POBJ_ARENA_ID with
* non-exists arena id
*
* obj_ctl_arenas <file> m - test for heap.narenas.max (RW)
*/
#include <sched.h>
#include "sys_util.h"
#include "unittest.h"
#include "util.h"
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define LAYOUT "obj_ctl_arenas"
#define CTL_QUERY_LEN 256
#define NTHREAD 2
#define NTHREAD_ARENA 32
#define NOBJECT_THREAD 64
#define ALLOC_CLASS_ARENA 2
#define NTHREADX 16
#define NARENAS 16
#define DEFAULT_ARENAS_MAX (1 << 10)
static os_mutex_t lock;
static os_cond_t cond;
static PMEMobjpool *pop;
static int nth;
static struct pobj_alloc_class_desc alloc_class[] = {
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 128,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 1024,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 111,
.units_per_block = CHUNKSIZE / 111,
.alignment = 0
},
};
struct arena_alloc {
unsigned arena;
PMEMoid oid;
};
static struct arena_alloc ref;
static void
check_arena_size(unsigned arena_id, unsigned class_id)
{
int ret;
size_t arena_size;
char arena_idx_size[CTL_QUERY_LEN];
SNPRINTF(arena_idx_size, CTL_QUERY_LEN,
"heap.arena.%u.size", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_size, &arena_size);
UT_ASSERTeq(ret, 0);
size_t test = ALIGN_UP(alloc_class[class_id].unit_size *
alloc_class[class_id].units_per_block, CHUNKSIZE);
UT_ASSERTeq(test, arena_size);
}
static void
create_alloc_class(void)
{
int ret;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class[0]);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class[1]);
UT_ASSERTeq(ret, 0);
}
static void *
worker_arenas_size(void *arg)
{
int ret = -1;
int idx = (int)(intptr_t)arg;
int off_idx = idx + 128;
unsigned arena_id;
unsigned arena_id_new;
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id",
&arena_id_new);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[idx].unit_size, 0,
POBJ_CLASS_ID(off_idx), NULL, NULL);
UT_ASSERTeq(ret, 0);
/* we need to test 2 arenas so 2 threads are needed here */
util_mutex_lock(&lock);
nth++;
if (nth == NTHREAD)
os_cond_broadcast(&cond);
else
while (nth < NTHREAD)
os_cond_wait(&cond, &lock);
util_mutex_unlock(&lock);
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id_new, arena_id);
check_arena_size(arena_id, (unsigned)idx);
return NULL;
}
static void *
worker_arenas_flag(void *arg)
{
int ret;
unsigned arenas[NARENAS];
for (unsigned i = 0; i < NARENAS; ++i) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arenas[i]);
UT_ASSERTeq(ret, 0);
}
/*
* Tests POBJ_ARENA_ID with pmemobj_xalloc.
* All object are frees after pthread join.
*/
for (unsigned i = 0; i < 2; i++) {
ret = pmemobj_xalloc(pop,
NULL, alloc_class[i].unit_size, 0,
POBJ_CLASS_ID(i + 128) | \
POBJ_ARENA_ID(arenas[i]),
NULL, NULL);
UT_ASSERTeq(ret, 0);
check_arena_size(arenas[i], i);
}
/* test POBJ_ARENA_ID with pmemobj_xreserve */
struct pobj_action act;
PMEMoid oid = pmemobj_xreserve(pop, &act,
alloc_class[0].unit_size, 1,
POBJ_CLASS_ID(128) |
POBJ_ARENA_ID(arenas[2]));
pmemobj_publish(pop, &act, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
/* test POBJ_ARENA_ID with pmemobj_tx_xalloc */
TX_BEGIN(pop) {
pmemobj_tx_xalloc(alloc_class[1].unit_size, 0,
POBJ_CLASS_ID(129) | POBJ_ARENA_ID(arenas[3]));
} TX_END
check_arena_size(arenas[3], 1);
return NULL;
}
static void *
worker_arena_threads(void *arg)
{
int ret = -1;
struct arena_alloc *ref = (struct arena_alloc *)arg;
unsigned arena_id;
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id != 0);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
PMEMoid oid[NOBJECT_THREAD];
unsigned d;
for (int i = 0; i < NOBJECT_THREAD; i++) {
ret = pmemobj_xalloc(pop, &oid[i],
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128),
NULL, NULL);
UT_ASSERTeq(ret, 0);
d = labs((long)ref->oid.off - (long)oid[i].off);
/* objects are in the same block as the first one */
ASSERT(d <= alloc_class[ALLOC_CLASS_ARENA].unit_size *
(alloc_class[ALLOC_CLASS_ARENA].units_per_block - 1));
}
for (int i = 0; i < NOBJECT_THREAD; i++)
pmemobj_free(&oid[i]);
return NULL;
}
static void
worker_arena_ref_obj(struct arena_alloc *ref)
{
int ret = -1;
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &ref->oid,
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_arenas");
if (argc != 3)
UT_FATAL("usage: %s poolset [n|s|c|f|q|m|a]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
if (t == 'n') {
unsigned narenas = 0;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(narenas, 0);
} else if (t == 's') {
os_thread_t threads[NTHREAD];
util_mutex_init(&lock);
util_cond_init(&cond);
create_alloc_class();
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker_arenas_size,
(void *)(intptr_t)i);
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
} else if (t == 'c') {
char arena_idx_auto[CTL_QUERY_LEN];
unsigned narenas_b = 0;
unsigned narenas_a = 0;
unsigned narenas_n = 4;
unsigned arena_id;
unsigned all_auto;
int automatic;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_b);
UT_ASSERTeq(ret, 0);
/* all arenas created at the start should be set to auto */
for (unsigned i = 1; i <= narenas_b; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic", &all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b, all_auto);
/* all arenas created by user should not be auto */
for (unsigned i = 1; i <= narenas_n; i++) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id, narenas_b + i);
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(automatic, 0);
/*
* after creation, number of auto
* arenas should be the same
*/
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i - 1, all_auto);
/* change the state of created arena to auto */
int activate = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&activate);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
/* number of auto arenas should increase */
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i, all_auto);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_a);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + narenas_n, narenas_a);
/* at least one automatic arena must exist */
for (unsigned i = 1; i <= narenas_a; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
automatic = 0;
if (i < narenas_a) {
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
} else {
/*
* last auto arena -
* cannot change the state to 0...
*/
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, -1);
/* ...but can change (overwrite) to 1 */
automatic = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
}
}
} else if (t == 'a') {
int ret;
unsigned arena_id_new;
char alloc_class_idx_desc[CTL_QUERY_LEN];
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
SNPRINTF(alloc_class_idx_desc, CTL_QUERY_LEN,
"heap.alloc_class.%d.desc",
ALLOC_CLASS_ARENA + 128);
ret = pmemobj_ctl_set(pop, alloc_class_idx_desc,
&alloc_class[ALLOC_CLASS_ARENA]);
UT_ASSERTeq(ret, 0);
ref.arena = arena_id_new;
worker_arena_ref_obj(&ref);
os_thread_t threads[NTHREAD_ARENA];
for (int i = 0; i < NTHREAD_ARENA; i++) {
THREAD_CREATE(&threads[i], NULL, worker_arena_threads,
&ref);
}
for (int i = 0; i < NTHREAD_ARENA; i++)
THREAD_JOIN(&threads[i], NULL);
} else if (t == 'f') {
os_thread_t threads[NTHREADX];
create_alloc_class();
for (int i = 0; i < NTHREADX; i++)
THREAD_CREATE(&threads[i], NULL,
worker_arenas_flag, NULL);
for (int i = 0; i < NTHREADX; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
} else if (t == 'q') {
unsigned total;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &total);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[0].unit_size, 0,
POBJ_ARENA_ID(total), NULL, NULL);
UT_ASSERTne(ret, 0);
} else if (t == 'm') {
unsigned max;
unsigned new_max;
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should not decrease */
new_max = DEFAULT_ARENAS_MAX - 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTne(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should increase */
new_max = DEFAULT_ARENAS_MAX + 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX + 1, max);
} else {
UT_ASSERT(0);
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,314 | 23.651416 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_extent/util_extent.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_extent.c -- unit test for the linux fs extent query API
*
*/
#include "unittest.h"
#include "extent.h"
#include "libpmem2.h"
/*
* test_size -- test if sum of all file's extents sums up to the file's size
*/
static void
test_size(int fd, size_t size)
{
size_t total_length = 0;
struct extents *exts = NULL;
UT_ASSERTeq(pmem2_extents_create_get(fd, &exts), 0);
UT_ASSERT(exts->extents_count > 0);
UT_OUT("exts->extents_count: %u", exts->extents_count);
unsigned e;
for (e = 0; e < exts->extents_count; e++)
total_length += exts->extents[e].length;
pmem2_extents_destroy(&exts);
UT_ASSERTeq(total_length, size);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_extent");
if (argc != 3)
UT_FATAL("usage: %s file file-size", argv[0]);
const char *file = argv[1];
long long isize = atoi(argv[2]);
UT_ASSERT(isize > 0);
size_t size = (size_t)isize;
int fd = OPEN(file, O_RDONLY);
test_size(fd, size);
close(fd);
DONE(NULL);
}
| 1,070 | 17.789474 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_poolset_unmap/win_poolset_unmap.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_poolset_unmap.c -- test for windows mmap destructor.
*
* It checks whether all mappings are properly unmpapped and memory is properly
* unreserved when auto growing pool is used.
*/
#include "unittest.h"
#include "os.h"
#include "libpmemobj.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
#define LAYOUT_NAME "poolset_unmap"
int
main(int argc, char *argv[])
{
START(argc, argv, "win_poolset_unmap");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
SIZE_T offset = 0;
bytes_returned = VirtualQuery(pop, &basic_info,
sizeof(basic_info));
/*
* When opening pool, we try to remove all permissions on header.
* If this action fails VirtualQuery will return one region with
* size 8MB. If it succeeds, RegionSize will be equal to 4KB due
* to different header and rest of the mapping permissions.
*/
if (basic_info.RegionSize == 4 * KILOBYTE) {
/* header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
offset += basic_info.RegionSize;
/* first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE - 4 * KILOBYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
} else {
/* first part with header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
}
offset += basic_info.RegionSize;
/* reservation after first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, (50 - 8) * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
DONE(NULL);
}
| 2,117 | 25.810127 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/remote_basic/remote_basic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* remote_basic.c -- unit test for remote tests support
*
* usage: remote_basic <file-to-be-checked>
*/
#include "file.h"
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "remote_basic");
if (argc != 2)
UT_FATAL("usage: %s <file-to-be-checked>", argv[0]);
const char *file = argv[1];
int exists = util_file_exists(file);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists)
UT_FATAL("File '%s' does not exist", file);
else
UT_OUT("File '%s' exists", file);
UT_OUT("An example of OUT message");
UT_ERR("An example of ERR message");
DONE(NULL);
}
| 698 | 17.394737 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_compat/pmem2_compat.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem2_compat.c -- compatibility test for libpmem vs libpmem2
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NODRAIN != PMEM2_F_MEM_NODRAIN);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NONTEMPORAL != PMEM2_F_MEM_NONTEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_TEMPORAL != PMEM2_F_MEM_TEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WC != PMEM2_F_MEM_WC);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WB != PMEM2_F_MEM_WB);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NOFLUSH != PMEM2_F_MEM_NOFLUSH);
return 0;
}
| 606 | 26.590909 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_badblock/obj_badblock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_badblock.c -- Badblock tests on obj pool
*
*/
#include <stddef.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "obj_badblock"
#define TEST_EXTEND_COUNT 32
#define EXTEND_SIZE (1024 * 1024 * 10)
static void
do_create_and_extend(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
ssize_t extend_size = EXTEND_SIZE;
for (int i = 0; i < TEST_EXTEND_COUNT; ++i) {
int ret = pmemobj_ctl_exec(pop, "heap.size.extend",
&extend_size);
UT_ASSERTeq(ret, 0);
}
pmemobj_close(pop);
UT_ASSERT(pmemobj_check(path, LAYOUT_NAME) == 1);
}
static void
do_open(const char *path)
{
PMEMobjpool *pop = pmemobj_open(path, LAYOUT_NAME);
UT_ASSERT(pop != NULL);
pmemobj_close(pop);
}
int main(int argc, char **argv) {
START(argc, argv, "obj_badblock");
if (argc < 3)
UT_FATAL("usage: %s file-name, o|c", argv[0]);
const char *path = argv[1];
for (int arg = 2; arg < argc; arg++) {
if (argv[arg][1] != '\0')
UT_FATAL(
"op must be c or o (c=create, o=open)");
switch (argv[arg][0]) {
case 'c':
do_create_and_extend(path);
break;
case 'o':
do_open(path);
default:
UT_FATAL(
"op must be c or o (c=clear, o=open)");
break;
}
}
DONE(NULL);
}
| 1,416 | 18.680556 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_strdup/obj_tx_strdup.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_strdup.c -- unit test for pmemobj_tx_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#define LAYOUT_NAME "tx_strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_NO_TX,
TYPE_WCS_NO_TX,
TYPE_COMMIT,
TYPE_WCS_COMMIT,
TYPE_ABORT,
TYPE_WCS_ABORT,
TYPE_FREE_COMMIT,
TYPE_WCS_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_WCS_FREE_ABORT,
TYPE_COMMIT_NESTED1,
TYPE_WCS_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_WCS_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_WCS_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_WCS_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_WCS_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_WCS_ABORT_AFTER_NESTED2,
TYPE_NOFLUSH,
TYPE_WCS_NOFLUSH,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define MAX_FUNC 2
typedef void (*fn_tx_strdup)(TOID(char) *str, const char *s,
unsigned type_num);
typedef void (*fn_tx_wcsdup)(TOID(wchar_t) *wcs, const wchar_t *s,
unsigned type_num);
static unsigned counter;
/*
* tx_strdup -- duplicate a string using pmemobj_tx_strdup
*/
static void
tx_strdup(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, pmemobj_tx_strdup(s, type_num));
}
/*
* tx_wcsdup -- duplicate a string using pmemobj_tx_wcsdup
*/
static void
tx_wcsdup(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, pmemobj_tx_wcsdup(s, type_num));
}
/*
* tx_strdup_macro -- duplicate a string using macro
*/
static void
tx_strdup_macro(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, TX_STRDUP(s, type_num));
}
/*
* tx_wcsdup_macro -- duplicate a wide character string using macro
*/
static void
tx_wcsdup_macro(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, TX_WCSDUP(s, type_num));
}
static fn_tx_strdup do_tx_strdup[MAX_FUNC] = {tx_strdup, tx_strdup_macro};
static fn_tx_wcsdup do_tx_wcsdup[MAX_FUNC] = {tx_wcsdup, tx_wcsdup_macro};
/*
* do_tx_strdup_commit -- duplicate a string and commit the transaction
*/
static void
do_tx_strdup_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT));
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs)), 0);
}
/*
* do_tx_strdup_abort -- duplicate a string and abort the transaction
*/
static void
do_tx_strdup_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_null -- duplicate a NULL string to trigger tx abort
*/
static void
do_tx_strdup_null(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, NULL, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, NULL, TYPE_WCS_ABORT);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
TX_BEGIN(pop) {
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, POBJ_XALLOC_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_strdup(NULL, TYPE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_strdup_free_commit -- duplicate a string, free and commit the
* transaction
*/
static void
do_tx_strdup_free_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_free_abort -- duplicate a string, free and abort the
* transaction
*/
static void
do_tx_strdup_free_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_commit_nested -- duplicate two string suing nested
* transaction and commit the transaction
*/
static void
do_tx_strdup_commit_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_COMMIT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_COMMIT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_COMMIT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_COMMIT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str1)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs1)), 0);
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
UT_ASSERTeq(strcmp(TEST_STR_2, D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_2, D_RO(wcs2)), 0);
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort the transaction
*/
static void
do_tx_strdup_abort_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_ABORT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort after the nested transaction
*/
static void
do_tx_strdup_abort_after_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1,
TYPE_ABORT_AFTER_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_AFTER_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_AFTER_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_AFTER_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_noflush -- allocates zeroed object
*/
static void
do_tx_strdup_noflush(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
errno = 0;
pmemobj_tx_xstrdup(TEST_STR_1, TYPE_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
pmemobj_tx_xwcsdup(TEST_WCS_1, TYPE_WCS_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
for (counter = 0; counter < MAX_FUNC; counter++) {
do_tx_strdup_commit(pop);
do_tx_strdup_abort(pop);
do_tx_strdup_null(pop);
do_tx_strdup_free_commit(pop);
do_tx_strdup_free_abort(pop);
do_tx_strdup_commit_nested(pop);
do_tx_strdup_abort_nested(pop);
do_tx_strdup_abort_after_nested(pop);
}
do_tx_strdup_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 11,087 | 24.315068 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_realloc/obj_tx_realloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_tx_realloc.c -- unit test for pmemobj_tx_realloc and pmemobj_tx_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#define LAYOUT_NAME "tx_realloc"
#define TEST_VALUE_1 1
#define OBJ_SIZE 1024
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_TYPE,
TYPE_COMMIT_ZERO,
TYPE_COMMIT_ZERO_MACRO,
TYPE_ABORT_ZERO,
TYPE_ABORT_ZERO_MACRO,
TYPE_COMMIT_ALLOC,
TYPE_ABORT_ALLOC,
TYPE_ABORT_HUGE,
TYPE_ABORT_ZERO_HUGE,
TYPE_ABORT_ZERO_HUGE_MACRO,
TYPE_FREE,
};
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object, 0);
struct object_macro {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object_macro, TYPE_COMMIT_ZERO_MACRO);
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num, size_t value)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, OID_NULL);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), type_num));
if (!TOID_IS_NULL(obj)) {
D_RW(obj)->value = value;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
return obj.oid;
}
/*
* do_tx_realloc_commit -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_realloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_HUGE, TEST_VALUE_1));
size_t new_size = PMEMOBJ_MAX_ALLOC_SIZE + 1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit_macro -- reallocate an object, zero it and commit
* the transaction using macro
*/
static void
do_tx_zrealloc_commit_macro(PMEMobjpool *pop)
{
TOID(struct object_macro) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit -- reallocate an object, zero it and commit
* the transaction
*/
static void
do_tx_zrealloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort_macro -- reallocate an object, zero it and commit the
* transaction using macro
*/
static void
do_tx_zrealloc_abort_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_MACRO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_zrealloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge_macro -- reallocate an object to a huge size to trigger
* tx abort and zero it using macro
*/
static void
do_tx_zrealloc_huge_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_zrealloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_commit -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_abort -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_root_realloc -- retrieve root inside of transaction
*/
static void
do_tx_root_realloc(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
sizeof(struct object)));
UT_ASSERTeq(sizeof(struct object), pmemobj_root_size(pop));
root = pmemobj_root(pop, 2 * sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
2 * sizeof(struct object)));
UT_ASSERTeq(2 * sizeof(struct object), pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_realloc_free -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_free(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_FREE, TEST_VALUE_1));
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
0, TYPE_COMMIT));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE));
UT_ASSERT(TOID_IS_NULL(obj));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_realloc");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root_realloc(pop);
do_tx_realloc_commit(pop);
do_tx_realloc_abort(pop);
do_tx_realloc_huge(pop);
do_tx_zrealloc_commit(pop);
do_tx_zrealloc_commit_macro(pop);
do_tx_zrealloc_abort(pop);
do_tx_zrealloc_abort_macro(pop);
do_tx_zrealloc_huge(pop);
do_tx_zrealloc_huge_macro(pop);
do_tx_realloc_alloc_commit(pop);
do_tx_realloc_alloc_abort(pop);
do_tx_realloc_free(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 12,874 | 25.767152 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/log_pool/log_pool.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* log_pool.c -- unit test for pmemlog_create() and pmemlog_open()
*
* usage: log_pool op path [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const char *path, size_t poolsize, unsigned mode)
{
PMEMlogpool *plp = pmemlog_create(path, poolsize, mode);
if (plp == NULL)
UT_OUT("!%s: pmemlog_create", path);
else {
os_stat_t stbuf;
STAT(path, &stbuf);
UT_OUT("%s: file size %zu usable space %zu mode 0%o",
path, stbuf.st_size,
pmemlog_nbyte(plp),
stbuf.st_mode & 0777);
pmemlog_close(plp);
int result = pmemlog_check(path);
if (result < 0)
UT_OUT("!%s: pmemlog_check", path);
else if (result == 0)
UT_OUT("%s: pmemlog_check: not consistent", path);
}
}
static void
pool_open(const char *path)
{
PMEMlogpool *plp = pmemlog_open(path);
if (plp == NULL)
UT_OUT("!%s: pmemlog_open", path);
else {
UT_OUT("%s: pmemlog_open: Success", path);
pmemlog_close(plp);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "log_pool");
if (argc < 3)
UT_FATAL("usage: %s op path [poolsize mode]", argv[0]);
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = strtoul(argv[3], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[4], NULL, 8);
pool_create(argv[2], poolsize, mode);
break;
case 'o':
pool_open(argv[2]);
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 1,629 | 17.735632 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_lists_atomic_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_lists_atomic_include.c -- include test for libpmemobj
*/
#include <libpmemobj/lists_atomic.h>
| 188 | 20 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_lists_atomic_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_lists_atomic_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/lists_atomic_base.h>
| 198 | 21.111111 | 65 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_tx_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_tx_include.c -- include test for libpmemobj
*/
#include <libpmemobj/tx.h>
| 168 | 17.777778 | 50 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/base.h>
int
main(int argc, char *argv[])
{
return 0;
}
| 221 | 13.8 | 52 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_atomic_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_atomic_include.c -- include test for libpmemobj
*/
#include <libpmemobj/atomic.h>
| 176 | 18.666667 | 54 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_iterator_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_iterator_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/iterator_base.h>
| 190 | 20.222222 | 61 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_thread_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_thread_include.c -- include test for libpmemobj
*/
#include <libpmemobj/thread.h>
| 176 | 18.666667 | 54 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_iterator_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_iterator_include.c -- include test for libpmemobj
*/
#include <libpmemobj/iterator.h>
| 180 | 19.111111 | 56 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_types_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_types_include.c -- include test for libpmemobj
*/
#include <libpmemobj/types.h>
| 174 | 18.444444 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_tx_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_tx_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/tx_base.h>
| 178 | 18.888889 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_pool_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_pool_include.c -- include test for libpmemobj
*/
#include <libpmemobj/pool.h>
| 172 | 18.222222 | 52 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_pool_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_pool_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/pool_base.h>
| 182 | 19.333333 | 57 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_include/obj_atomic_base_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* obj_atomic_base_include.c -- include test for libpmemobj
*/
#include <libpmemobj/atomic_base.h>
| 186 | 19.777778 | 59 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_rm_win/libpmempool_rm_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* libpmempool_rm_win -- a unittest for pmempool_rm.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "unittest.h"
#define FATAL_USAGE(n) UT_FATAL("usage: %s [-f -l -r] path..", (n))
static PMEMobjpool *Pop;
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "libpmempool_rm_win");
if (argc < 2)
FATAL_USAGE(ut_toUTF8(argv[0]));
unsigned flags = 0;
int do_open = 0;
int i = 1;
for (; i < argc - 1; i++) {
wchar_t *optarg = argv[i + 1];
if (wcscmp(L"-f", argv[i]) == 0)
flags |= PMEMPOOL_RM_FORCE;
else if (wcscmp(L"-r", argv[i]) == 0)
flags |= PMEMPOOL_RM_POOLSET_REMOTE;
else if (wcscmp(L"-l", argv[i]) == 0)
flags |= PMEMPOOL_RM_POOLSET_LOCAL;
else if (wcscmp(L"-o", argv[i]) == 0)
do_open = 1;
else if (wcschr(argv[i], L'-') == argv[i])
FATAL_USAGE(argv[0]);
else
break;
}
for (; i < argc; i++) {
const wchar_t *path = argv[i];
if (do_open) {
Pop = pmemobj_openW(path, NULL);
UT_ASSERTne(Pop, NULL);
}
int ret = pmempool_rmW(path, flags);
if (ret) {
UT_OUT("!%s: %s", ut_toUTF8(path),
pmempool_errormsgU());
}
if (do_open) {
UT_ASSERTne(Pop, NULL);
pmemobj_close(Pop);
}
}
DONEW(NULL);
}
| 1,341 | 19.333333 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_rw/blk_rw.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* blk_rw.c -- unit test for pmemblk_read/write/set_zero/set_error
*
* usage: blk_rw bsize file func operation:lba...
*
* func is 'c' or 'o' (create or open)
* operations are 'r' or 'w' or 'z' or 'e'
*
*/
#include "unittest.h"
static size_t Bsize;
/*
* construct -- build a buffer for writing
*/
static void
construct(unsigned char *buf)
{
static int ord = 1;
for (int i = 0; i < Bsize; i++)
buf[i] = ord;
ord++;
if (ord > 255)
ord = 1;
}
/*
* ident -- identify what a buffer holds
*/
static char *
ident(unsigned char *buf)
{
static char descr[100];
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
sprintf(descr, "{%u} TORN at byte %d", val, i);
return descr;
}
sprintf(descr, "{%u}", val);
return descr;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_rw");
if (argc < 5)
UT_FATAL("usage: %s bsize file func op:lba...", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
PMEMblkpool *handle;
switch (*argv[3]) {
case 'c':
handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR);
if (handle == NULL)
UT_FATAL("!%s: pmemblk_create", path);
break;
case 'o':
handle = pmemblk_open(path, Bsize);
if (handle == NULL)
UT_FATAL("!%s: pmemblk_open", path);
break;
}
UT_OUT("%s block size %zu usable blocks %zu",
argv[1], Bsize, pmemblk_nblock(handle));
unsigned char *buf = MALLOC(Bsize);
if (buf == NULL)
UT_FATAL("cannot allocate buf");
/* map each file argument with the given map type */
for (int arg = 4; arg < argc; arg++) {
if (strchr("rwze", argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be r: or w: or z: or e:");
os_off_t lba = strtol(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'r':
if (pmemblk_read(handle, buf, lba) < 0)
UT_OUT("!read lba %jd", lba);
else
UT_OUT("read lba %jd: %s", lba,
ident(buf));
break;
case 'w':
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_OUT("!write lba %jd", lba);
else
UT_OUT("write lba %jd: %s", lba,
ident(buf));
break;
case 'z':
if (pmemblk_set_zero(handle, lba) < 0)
UT_OUT("!set_zero lba %jd", lba);
else
UT_OUT("set_zero lba %jd", lba);
break;
case 'e':
if (pmemblk_set_error(handle, lba) < 0)
UT_OUT("!set_error lba %jd", lba);
else
UT_OUT("set_error lba %jd", lba);
break;
}
}
FREE(buf);
pmemblk_close(handle);
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
DONE(NULL);
}
| 2,773 | 18.956835 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_lock/obj_tx_lock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_tx_lock.c -- unit test for pmemobj_tx_lock()
*/
#include "unittest.h"
#include "libpmemobj.h"
#include "obj.h"
#define LAYOUT_NAME "obj_tx_lock"
#define NUM_LOCKS 2
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
};
static PMEMobjpool *Pop;
#define DO_LOCK(mtx, rwlock)\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[0]);\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[1]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[0]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[1])
#define IS_UNLOCKED(pop, mtx, rwlock)\
ret = 0;\
ret += pmemobj_mutex_trylock((pop), &(mtx)[0]);\
ret += pmemobj_mutex_trylock((pop), &(mtx)[1]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERTeq(ret, 0);\
pmemobj_mutex_unlock((pop), &(mtx)[0]);\
pmemobj_mutex_unlock((pop), &(mtx)[1]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[0]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[1])
#define IS_LOCKED(pop, mtx, rwlock)\
ret = pmemobj_mutex_trylock((pop), &(mtx)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_mutex_trylock((pop), &(mtx)[1]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERT(ret != 0)
/*
* do_tx_add_locks -- (internal) transaction where locks are added after
* transaction begins
*/
static void *
do_tx_add_locks(struct transaction_data *data)
{
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT { /* not called */
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested -- (internal) transaction where locks
* are added after nested transaction begins
*/
static void *
do_tx_add_locks_nested(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested_all -- (internal) transaction where all locks
* are added in both transactions after transaction begins
*/
static void *
do_tx_add_locks_nested_all(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_taken_lock -- (internal) verify that failed tx_lock doesn't add
* the lock to transaction
*/
static void *
do_tx_add_taken_lock(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
UT_ASSERTne(pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]),
0);
} TX_END
UT_ASSERTne(pmemobj_rwlock_trywrlock(Pop, &data->rwlocks[0]), 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
#endif
return NULL;
}
/*
* do_tx_lock_fail -- call pmemobj_tx_lock with POBJ_TX_NO_ABORT flag
* and taken lock
*/
static void *
do_tx_lock_fail(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
int ret = 0;
/* return errno and abort transaction */
TX_BEGIN(Pop) {
pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONABORT {
UT_ASSERTne(errno, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0],
POBJ_XLOCK_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#endif
return NULL;
}
static void
do_fault_injection(struct transaction_data *data)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "add_to_tx_and_lock");
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
int err = pmemobj_tx_lock(TX_PARAM_MUTEX, &data->mutexes[0]);
if (err)
pmemobj_tx_abort(err);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_lock");
if (argc < 3)
UT_FATAL("usage: %s <file> [l|n|a|t|f|w]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
/* Scan the character of each argument. */
if (strchr("lnatfw", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be l or n or a or t or f or w");
switch (argv[arg][0]) {
case 'l':
do_tx_add_locks(test_obj);
break;
case 'n':
do_tx_add_locks_nested(test_obj);
break;
case 'a':
do_tx_add_locks_nested_all(test_obj);
break;
case 't':
do_tx_add_taken_lock(test_obj);
break;
case 'f':
do_fault_injection(test_obj);
break;
case 'w':
do_tx_lock_fail(test_obj);
break;
}
}
pmemobj_close(Pop);
DONE(NULL);
}
| 7,003 | 24.75 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memops/obj_memops.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_memops.c -- basic memory operations tests
*
*/
#include <stddef.h>
#include "obj.h"
#include "memops.h"
#include "ulog.h"
#include "unittest.h"
#define TEST_ENTRIES 256
#define TEST_VALUES TEST_ENTRIES
enum fail_types {
FAIL_NONE,
FAIL_CHECKSUM,
FAIL_MODIFY_NEXT,
FAIL_MODIFY_VALUE,
};
struct test_object {
struct ULOG(TEST_ENTRIES) redo;
struct ULOG(TEST_ENTRIES) undo;
uint64_t values[TEST_VALUES];
};
static void
clear_test_values(struct test_object *object)
{
memset(object->values, 0, sizeof(uint64_t) * TEST_VALUES);
}
static int
redo_log_constructor(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(ctx, ptr), capacity,
*(uint64_t *)arg, 1, 0, p_ops);
return 0;
}
static int
pmalloc_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(TEST_ENTRIES);
return pmalloc_construct(base, redo, s, redo_log_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
static void
test_free_entry(void *base, uint64_t *next)
{
/* noop for fake ulog entries */
}
static void
test_set_entries(PMEMobjpool *pop,
struct operation_context *ctx, struct test_object *object,
size_t nentries, enum fail_types fail, enum operation_log_type type)
{
operation_start(ctx);
UT_ASSERT(nentries <= ARRAY_SIZE(object->values));
for (size_t i = 0; i < nentries; ++i) {
operation_add_typed_entry(ctx,
&object->values[i], i + 1,
ULOG_OPERATION_SET, type);
}
operation_reserve(ctx, nentries * 16);
if (fail != FAIL_NONE) {
operation_cancel(ctx);
switch (fail) {
case FAIL_CHECKSUM:
object->redo.checksum += 1;
break;
case FAIL_MODIFY_NEXT:
pmalloc_redo_extend(pop,
&object->redo.next, 0);
break;
case FAIL_MODIFY_VALUE:
object->redo.data[16] += 8;
break;
default:
UT_ASSERT(0);
}
ulog_recover((struct ulog *)&object->redo,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], 0);
} else {
operation_process(ctx);
operation_finish(ctx, 0);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], i + 1);
}
}
static void
test_merge_op(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 0b10,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b00,
ULOG_OPERATION_AND, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_process(ctx);
operation_finish(ctx, 0);
UT_ASSERTeq(object->values[0], 0b01);
}
static void
test_same_twice(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 5,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 10,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 10);
operation_cancel(ctx);
}
static void
test_redo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
/*
* Keep this test first.
* It tests a situation where the number of objects being added
* is equal to the capacity of the log.
*/
test_set_entries(pop, ctx, object, TEST_ENTRIES - 1,
FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_TRANSIENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_merge_op(ctx, object);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_same_twice(ctx, object);
clear_test_values(object);
operation_delete(ctx);
/*
* Verify that rebuilding redo_next works. This requires that
* object->redo->next is != 0 - to achieve that, this test must
* be preceded by a test that fails to finish the ulog's operation.
*/
ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
NULL, test_free_entry, &pop->p_ops, LOG_TYPE_REDO);
test_set_entries(pop, ctx, object, 100, 0, LOG_PERSISTENT);
clear_test_values(object);
/* FAIL_MODIFY_NEXT tests can only happen after redo_next test */
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
operation_delete(ctx);
}
static void
test_undo_small_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_CPY);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
operation_start(ctx);
UT_ASSERTeq(object->values[0], 1);
UT_ASSERTeq(object->values[1], 2);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
UT_ASSERTeq(object->values[0], 2);
UT_ASSERTeq(object->values[1], 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_single_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values, &c, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_multiple_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values[0], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_add_buffer(ctx,
&object->values[1], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_checksum_mismatch(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object, struct ulog *log)
{
operation_start(ctx);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 20,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 2;
pmemobj_persist(pop, &object->values, sizeof(*object->values) * 20);
log->data[100] += 1; /* corrupt the log somewhere */
pmemobj_persist(pop, &log->data[100], sizeof(log->data[100]));
operation_process(ctx);
/* the log shouldn't get applied */
for (uint64_t i = 0; i < 20; ++i)
UT_ASSERTeq(object->values[i], i + 2);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_copy(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 3;
operation_start(ctx);
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 26,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 4;
pmemobj_persist(pop, &object->values, sizeof(object->values));
operation_process(ctx);
for (uint64_t i = 0; i < 26; ++i)
UT_ASSERTeq(object->values[i], i + 3);
for (uint64_t i = 26; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 4);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static int
test_undo_foreach(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
size_t *nentries = arg;
++(*nentries);
return 0;
}
/*
* drain_empty -- drain for pmem_ops
*/
static void
drain_empty(void *ctx)
{
/* do nothing */
}
/*
* persist_empty -- persist for pmem_ops
*/
static int
persist_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* flush_empty -- flush for pmem_ops
*/
static int
flush_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* memcpy_libc -- memcpy for pmem_ops
*/
static void *
memcpy_libc(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* memset_libc -- memset for pmem_ops
*/
static void *
memset_libc(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return memset(ptr, c, sz);
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_undo_log_reuse()
{
#define ULOG_SIZE 1024
struct pmem_ops ops = {
.persist = persist_empty,
.flush = flush_empty,
.drain = drain_empty,
.memcpy = memcpy_libc,
.memmove = NULL,
.memset = memset_libc,
.base = NULL,
};
struct ULOG(ULOG_SIZE) *first = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
struct ULOG(ULOG_SIZE) *second = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
ulog_construct((uint64_t)(first), ULOG_SIZE, 0, 0, 0, &ops);
ulog_construct((uint64_t)(second), ULOG_SIZE, 0, 0, 0, &ops);
first->next = (uint64_t)(second);
struct operation_context *ctx = operation_new(
(struct ulog *)first, ULOG_SIZE,
NULL, test_free_entry,
&ops, LOG_TYPE_UNDO);
size_t nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, 0);
/* first, let's populate the log with some valid entries */
size_t entry_size = (ULOG_SIZE / 2) - sizeof(struct ulog_entry_buf);
size_t total_entries = ((ULOG_SIZE * 2) / entry_size);
char *data = MALLOC(entry_size);
memset(data, 0xc, entry_size); /* fill it with something */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
operation_init(ctx); /* initialize a new operation */
/* let's overwrite old entries and see if they are no longer visible */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
FREE(data);
operation_delete(ctx);
util_aligned_free(first);
util_aligned_free(second);
#undef ULOG_SIZE
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_redo_cleanup_same_size(PMEMobjpool *pop, struct test_object *object)
{
#define ULOG_SIZE 1024
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
int ret = pmalloc(pop, &object->redo.next, ULOG_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_start(ctx); /* initialize a new operation */
struct pobj_action act;
pmemobj_reserve(pop, &act, ULOG_SIZE, 0);
palloc_publish(&pop->heap, &act, 1, ctx);
operation_delete(ctx);
#undef ULOG_SIZE
}
static void
test_undo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->undo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_UNDO);
test_undo_small_single_copy(ctx, object);
test_undo_small_single_set(ctx, object);
test_undo_small_multiple_set(ctx, object);
test_undo_large_single_copy(ctx, object);
test_undo_large_copy(pop, ctx, object);
test_undo_checksum_mismatch(pop, ctx, object,
(struct ulog *)&object->undo);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_delete(ctx);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memops");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "obj_memops",
PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/*
* The ulog API requires cacheline alignment. A cacheline aligned new
* new allocator is created here to properly test the ulog api.
* A aligned object can then be allocated using pmemobj_xalloc.
*/
struct pobj_alloc_class_desc new_ac = {
.unit_size = sizeof(struct test_object),
.alignment = CACHELINE_SIZE,
.units_per_block = 1,
.header_type = POBJ_HEADER_NONE,
};
if (pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &new_ac) == -1)
UT_FATAL("Failed to set allocation class");
PMEMoid pobject;
if (pmemobj_xalloc(pop, &pobject, sizeof(struct test_object), 0,
POBJ_CLASS_ID(new_ac.class_id), NULL, NULL) == -1)
UT_FATAL("Failed to allocate object");
struct test_object *object = pmemobj_direct(pobject);
UT_ASSERTne(object, NULL);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->undo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->redo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
test_redo(pop, object);
test_undo(pop, object);
test_redo_cleanup_same_size(pop, object);
test_undo_log_reuse();
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 15,904 | 23.319572 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_include/libpmempool_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* libpmempool_include.c -- include test for libpmempool
*
* this is only a compilation test - do not run this program
*/
#include <libpmempool.h>
int
main(int argc, char *argv[])
{
return 0;
}
| 285 | 15.823529 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ctl_cow/ctl_cow.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* ctl_cow.c -- unit tests for copy on write feature which check
* if changes are reverted after pool close when copy_on_write.at_open = 1
*/
#include <stddef.h>
#include "unittest.h"
#include <string.h>
struct test_st {
int x;
};
POBJ_LAYOUT_BEGIN(test_layout);
POBJ_LAYOUT_ROOT(test_layout, struct my_root);
POBJ_LAYOUT_TOID(test_layout, struct test_st);
POBJ_LAYOUT_END(test_layout);
struct my_root {
TOID(struct test_st) x;
TOID(struct test_st) y;
TOID(struct test_st) z;
};
static void
test_obj(const char *path)
{
PMEMobjpool *pop = pmemobj_open(path, NULL);
if (pop == NULL)
UT_FATAL("!%s: pmemobj_open", path);
TOID(struct my_root) root = POBJ_ROOT(pop, struct my_root);
TX_BEGIN(pop) {
TX_ADD(root);
TOID(struct test_st) x = TX_NEW(struct test_st);
TOID(struct test_st) y = TX_NEW(struct test_st);
TOID(struct test_st) z = TX_NEW(struct test_st);
D_RW(x)->x = 5;
D_RW(y)->x = 10;
D_RW(z)->x = 15;
D_RW(root)->x = x;
D_RW(root)->y = y;
D_RW(root)->z = z;
} TX_ONABORT {
abort();
} TX_END
TX_BEGIN(pop) {
TX_ADD(root);
TX_FREE(D_RW(root)->x);
D_RW(root)->x = TOID_NULL(struct test_st);
TX_ADD(D_RW(root)->y);
TOID(struct test_st) y = D_RO(root)->y;
D_RW(y)->x = 100;
} TX_ONABORT {
abort();
} TX_END
pmemobj_close(pop);
}
static void
test_blk(const char *path)
{
PMEMblkpool *pbp = pmemblk_open(path, 512);
if (pbp == NULL)
UT_FATAL("!cannot open %s", path);
char x[512] = "Test blk x";
char y[512] = "Test blk y";
if (pmemblk_write(pbp, &x, 1) < 0)
UT_FATAL("cannot write to %s", path);
if (pmemblk_write(pbp, &y, 2) < 0)
UT_FATAL("cannot write to %s", path);
if (pmemblk_set_zero(pbp, 2) < 0)
UT_FATAL("cannot write to %s", path);
pmemblk_close(pbp);
}
static void
test_log(const char *path)
{
PMEMlogpool *plp = pmemlog_open(path);
if (plp == NULL)
UT_FATAL("!cannot open %s", path);
char buf[] = "pmemlog test";
char buf_2[] = "pmemlog test 2";
if (pmemlog_append(plp, buf, strlen(buf)) < 0)
UT_FATAL("cannot append to %s", path);
if (pmemlog_append(plp, buf_2, strlen(buf_2)) < 0)
UT_FATAL("cannot append to %s", path);
pmemlog_close(plp);
}
static void
test_dax(const char *path)
{
PMEMobjpool *pop = pmemobj_open(path, NULL);
if (pop == NULL)
UT_FATAL("!cannot open %s", path);
else
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "ctl_cow");
if (argc < 3)
UT_FATAL("usage: %s filename obj|log|blk|dax", argv[0]);
const char *path = argv[1];
const char *action = argv[2];
if (strcmp(action, "obj") == 0) {
test_obj(path);
} else if (strcmp(action, "blk") == 0) {
test_blk(path);
} else if (strcmp(action, "log") == 0) {
test_log(path);
} else if (strcmp(action, "dax") == 0) {
test_dax(path);
} else {
UT_FATAL("%s is not a valid action", action);
}
DONE(NULL);
}
| 2,939 | 18.6 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_strdup/obj_strdup.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_strdup.c -- unit test for pmemobj_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_SIMPLE,
TYPE_NULL,
TYPE_SIMPLE_ALLOC,
TYPE_SIMPLE_ALLOC_1,
TYPE_SIMPLE_ALLOC_2,
TYPE_NULL_ALLOC,
TYPE_NULL_ALLOC_1,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define TEST_STR_EMPTY ""
#define TEST_WCS_EMPTY L""
/*
* do_strdup -- duplicate a string to not allocated toid using pmemobj_strdup
*/
static void
do_strdup(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, TEST_STR_1, TYPE_SIMPLE);
pmemobj_wcsdup(pop, &wcs.oid, TEST_WCS_1, TYPE_SIMPLE);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
UT_ASSERTeq(strcmp(D_RO(str), TEST_STR_1), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs), TEST_WCS_1), 0);
}
/*
* do_strdup_null -- duplicate a NULL string to not allocated toid
*/
static void
do_strdup_null(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, NULL, TYPE_NULL);
pmemobj_wcsdup(pop, &wcs.oid, NULL, TYPE_NULL);
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_alloc -- allocate toid and duplicate a string
*/
static TOID(char)
do_alloc(PMEMobjpool *pop, const char *s, unsigned type_num)
{
TOID(char) str;
POBJ_ZNEW(pop, &str, char);
pmemobj_strdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(D_RO(str), s), 0);
return str;
}
/*
* do_wcs_alloc -- allocate toid and duplicate a wide character string
*/
static TOID(wchar_t)
do_wcs_alloc(PMEMobjpool *pop, const wchar_t *s, unsigned type_num)
{
TOID(wchar_t) str;
POBJ_ZNEW(pop, &str, wchar_t);
pmemobj_wcsdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(wcscmp(D_RO(str), s), 0);
return str;
}
/*
* do_strdup_alloc -- duplicate a string to allocated toid
*/
static void
do_strdup_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_2);
TOID(wchar_t) wcs2 = do_wcs_alloc(pop, TEST_WCS_2, TYPE_SIMPLE_ALLOC_2);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), D_RO(wcs2)), 0);
}
/*
* do_strdup_null_alloc -- duplicate a NULL string to allocated toid
*/
static void
do_strdup_null_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_NULL_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_NULL_ALLOC_1);
TOID(char) str2 = TOID_NULL(char);
TOID(wchar_t) wcs2 = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_NULL_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_NULL_ALLOC);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
}
/*
* do_strdup_uint64_range -- duplicate string with
* type number equal to range of unsigned long long int
*/
static void
do_strdup_uint64_range(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
TOID(char) str3;
TOID(char) str4 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), UINT64_MAX);
pmemobj_strdup(pop, &str3.oid, D_RO(str4), UINT64_MAX - 1);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(strcmp(D_RO(str3), D_RO(str4)), 0);
}
/*
* do_strdup_alloc_empty_string -- duplicate string to internal container
* associated with type number equal to range of unsigned long long int
* and unsigned long long int - 1
*/
static void
do_strdup_alloc_empty_string(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, TEST_STR_EMPTY, TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, TEST_WCS_EMPTY, TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), TEST_STR_EMPTY), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), TEST_WCS_EMPTY), 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_strdup(pop);
do_strdup_null(pop);
do_strdup_alloc(pop);
do_strdup_null_alloc(pop);
do_strdup_uint64_range(pop);
do_strdup_alloc_empty_string(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 5,017 | 26.571429 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_is_pmem/pmem_is_pmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem.c -- unit test for pmem_is_pmem()
*
* usage: pmem_is_pmem file [env]
*/
#include "unittest.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
*ret = pmem_is_pmem(Addr, Size);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_is_pmem");
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file [env]", argv[0]);
if (argc == 3)
UT_ASSERTeq(os_setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0);
Addr = pmem_map_file(argv[1], 0, 0, 0, &Size, NULL);
UT_ASSERTne(Addr, NULL);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("threads.is_pmem(Addr, Size): %d", ret[0]);
UT_ASSERTeq(os_unsetenv("PMEM_IS_PMEM_FORCE"), 0);
UT_OUT("is_pmem(Addr, Size): %d", pmem_is_pmem(Addr, Size));
/* zero-sized region is not pmem */
UT_OUT("is_pmem(Addr, 0): %d", pmem_is_pmem(Addr, 0));
UT_OUT("is_pmem(Addr + Size / 2, 0): %d",
pmem_is_pmem((char *)Addr + Size / 2, 0));
UT_OUT("is_pmem(Addr + Size, 0): %d",
pmem_is_pmem((char *)Addr + Size, 0));
DONE(NULL);
}
| 3,216 | 30.23301 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc_int/rpmem_obc_int.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc_int.c -- integration test for rpmem_obc and rpmemd_obc modules
*/
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmemd_obc.h"
#include "rpmemd_log.h"
#include "os.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define PERSIST_METHOD RPMEM_PM_GPSPM
#define RESP_ATTR_INIT {\
.port = PORT,\
.rkey = RKEY,\
.raddr = RADDR,\
.persist_method = PERSIST_METHOD,\
.nlanes = NLANES_RESP,\
}
#define REQ_ATTR_INIT {\
.pool_size = POOL_SIZE,\
.nlanes = NLANES,\
.provider = PROVIDER,\
.pool_desc = POOL_DESC,\
}
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server);
/*
* client_create -- perform create request
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_open -- perform open request
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
struct rpmem_pool_attr pool_attr;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
UT_ASSERTeq(memcmp(&ex_pool_attr, &pool_attr,
sizeof(ex_pool_attr)), 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_set_attr -- perform set attributes request
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
const struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_set_attr(rpc, &pool_attr);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* req_arg -- request callbacks argument
*/
struct req_arg {
struct rpmem_resp_attr resp;
struct rpmem_pool_attr pool_attr;
int closing;
};
/*
* req_create -- process create request
*/
static int
req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
struct req_arg *args = arg;
return rpmemd_obc_create_resp(obc, 0, &args->resp);
}
/*
* req_open -- process open request
*/
static int
req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
struct req_arg *args = arg;
return rpmemd_obc_open_resp(obc, 0,
&args->resp, &args->pool_attr);
}
/*
* req_set_attr -- process set attributes request
*/
static int
req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_ALT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
return rpmemd_obc_set_attr_resp(obc, 0);
}
/*
* req_close -- process close request
*/
static int
req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
UT_ASSERTne(arg, NULL);
struct req_arg *args = arg;
args->closing = 1;
return rpmemd_obc_close_resp(obc, 0);
}
/*
* REQ -- server request callbacks
*/
static struct rpmemd_obc_requests REQ = {
.create = req_create,
.open = req_open,
.close = req_close,
.set_attr = req_set_attr,
};
/*
* server -- run server and process clients requests
*/
int
server(const struct test_case *tc, int argc, char *argv[])
{
int ret;
struct req_arg arg = {
.resp = RESP_ATTR_INIT,
.pool_attr = POOL_ATTR_INIT,
.closing = 0,
};
struct rpmemd_obc *obc;
obc = rpmemd_obc_init(0, 1);
UT_ASSERTne(obc, NULL);
ret = rpmemd_obc_status(obc, 0);
UT_ASSERTeq(ret, 0);
while (1) {
ret = rpmemd_obc_process(obc, &REQ, &arg);
if (arg.closing) {
break;
} else {
UT_ASSERTeq(ret, 0);
}
}
ret = rpmemd_obc_process(obc, &REQ, &arg);
UT_ASSERTeq(ret, 1);
rpmemd_obc_fini(obc);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(server),
TEST_CASE(client_create),
TEST_CASE(client_open),
TEST_CASE(client_set_attr),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
rpmemd_log_close();
DONE(NULL);
}
| 8,537 | 20.780612 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmempool_feature_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018, Intel Corporation
#
#
# pmempool_feature_remote/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
# pmempool feature does not support poolsets with remote replicas
# unittest contains only negative scenarios so no point to loop over
# all providers and persistency methods
CONF_GLOBAL_RPMEM_PROVIDER=sockets
CONF_GLOBAL_RPMEM_PMETHOD=GPSPM
| 468 | 26.588235 | 68 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/mmap_fixed/mmap_fixed.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* mmap_fixed.c -- test memory mapping with MAP_FIXED for various lengths
*
* This test is intended to be used for testing Windows implementation
* of memory mapping routines - mmap(), munmap(), msync() and mprotect().
* Those functions should provide the same functionality as their Linux
* counterparts, at least with respect to the features that are used
* in PMDK libraries.
*
* Known issues and differences between Linux and Windows implementation
* are described in src/common/mmap_windows.c.
*/
#include "unittest.h"
#include <sys/mman.h>
#define ALIGN(size) ((size) & ~(Ut_mmap_align - 1))
/*
* test_mmap_fixed -- test fixed mappings
*/
static void
test_mmap_fixed(const char *name1, const char *name2, size_t len1, size_t len2)
{
size_t len1_aligned = ALIGN(len1);
size_t len2_aligned = ALIGN(len2);
UT_OUT("len: %zu (%zu) + %zu (%zu) = %zu", len1, len1_aligned,
len2, len2_aligned, len1_aligned + len2_aligned);
int fd1 = OPEN(name1, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
int fd2 = OPEN(name2, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
POSIX_FALLOCATE(fd1, 0, (os_off_t)len1);
POSIX_FALLOCATE(fd2, 0, (os_off_t)len2);
char *ptr1 = mmap(NULL, len1_aligned + len2_aligned,
PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_OUT("ptr1: %p, ptr2: %p", ptr1, ptr1 + len1_aligned);
char *ptr2 = mmap(ptr1 + len1_aligned, len2_aligned,
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, fd2, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + len1_aligned);
UT_ASSERTne(munmap(ptr1, len1_aligned), -1);
UT_ASSERTne(munmap(ptr2, len2_aligned), -1);
CLOSE(fd1);
CLOSE(fd2);
UNLINK(name1);
UNLINK(name2);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "mmap_fixed");
if (argc < 4)
UT_FATAL("usage: %s dirname len1 len2 ...", argv[0]);
size_t *lengths = MALLOC(sizeof(size_t) * (size_t)argc - 2);
UT_ASSERTne(lengths, NULL);
size_t appendix_length = 20; /* a file name length */
char *name1 = MALLOC(strlen(argv[1]) + appendix_length);
char *name2 = MALLOC(strlen(argv[1]) + appendix_length);
sprintf(name1, "%s\\testfile1", argv[1]);
sprintf(name2, "%s\\testfile2", argv[1]);
for (int i = 0; i < argc - 2; i++)
lengths[i] = ATOULL(argv[i + 2]);
for (int i = 0; i < argc - 2; i++)
for (int j = 0; j < argc - 2; j++)
test_mmap_fixed(name1, name2, lengths[i], lengths[j]);
FREE(name1);
FREE(name2);
FREE(lengths);
DONE(NULL);
}
| 2,522 | 26.129032 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_basic/common_pm_policy.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2018, Intel Corporation
#
# src/test/rpmem_basic/common_pm_policy.sh -- common part for TEST[10-11] scripts
#
set -e
LOG=rpmemd$UNITTEST_NUM.log
OUT=out$UNITTEST_NUM.log
rm -f $OUT
# create poolset and upload
run_on_node 0 "rm -rf ${RPMEM_POOLSET_DIR[0]} && mkdir -p ${RPMEM_POOLSET_DIR[0]} && mkdir -p ${NODE_DIR[0]}$POOLS_PART"
create_poolset $DIR/pool.set 8M:$PART_DIR/pool.part0 8M:$PART_DIR/pool.part1
copy_files_to_node 0 ${RPMEM_POOLSET_DIR[0]} $DIR/pool.set
# create pool and close it - local pool from file
SIMPLE_ARGS="test_create 0 pool.set ${NODE_ADDR[0]} pool 8M none test_close 0"
function test_pm_policy()
{
# initialize pmem
PMEM_IS_PMEM_FORCE=$1
export_vars_node 0 PMEM_IS_PMEM_FORCE
init_rpmem_on_node 1 0
# remove rpmemd log and pool parts
run_on_node 0 "rm -rf $PART_DIR && mkdir -p $PART_DIR && rm -f $LOG"
# execute, get log
expect_normal_exit run_on_node 1 ./rpmem_basic$EXESUFFIX $SIMPLE_ARGS
copy_files_from_node 0 . ${NODE_TEST_DIR[0]}/$LOG
# extract persist method and flush function
cat $LOG | $GREP -A2 "persistency policy:" >> $OUT
}
| 1,160 | 28.769231 | 120 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_basic/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2018, Intel Corporation
#
#
# rpmem_basic/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
CONF_RPMEM_PMETHOD[10]=APM
CONF_RPMEM_PMETHOD[11]=GPSPM
# Sockets provider does not detect fi_cq_signal so it does not return
# from fi_cq_sread. It causes this test to hang sporadically.
# https://github.com/ofiwg/libfabric/pull/3645
CONF_RPMEM_PROVIDER[12]=verbs
| 547 | 23.909091 | 69 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_basic/setup2to1.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2017, Intel Corporation
#
# src/test/rpmem_basic/setup2to1.sh -- common part for TEST[7-] scripts
#
POOLS_DIR=pools
POOLS_PART=pool_parts
TEST_LOG_FILE=test$UNITTEST_NUM.log
TEST_LOG_LEVEL=3
#
# This unit test requires 4 nodes, but nodes #0 and #3 should be the same
# physical machine - they should have the same NODE[n] addresses but
# different NODE_ADDR[n] addresses in order to test "2-to-1" configuration.
# Node #1 is being replicated to the node #0 and node #2 is being replicated
# to the node #3.
#
require_nodes 4
REPLICA[1]=0
REPLICA[2]=3
for node in 1 2; do
require_node_libfabric ${node} $RPMEM_PROVIDER
require_node_libfabric ${REPLICA[${node}]} $RPMEM_PROVIDER
export_vars_node ${node} TEST_LOG_FILE
export_vars_node ${node} TEST_LOG_LEVEL
require_node_log_files ${node} $PMEM_LOG_FILE
require_node_log_files ${node} $RPMEM_LOG_FILE
require_node_log_files ${node} $TEST_LOG_FILE
require_node_log_files ${REPLICA[${node}]} $RPMEMD_LOG_FILE
REP_ADDR[${node}]=${NODE_ADDR[${REPLICA[${node}]}]}
PART_DIR[${node}]=${NODE_DIR[${REPLICA[${node}]}]}$POOLS_PART
RPMEM_POOLSET_DIR[${REPLICA[${node}]}]=${NODE_DIR[${REPLICA[${node}]}]}$POOLS_DIR
init_rpmem_on_node ${node} ${REPLICA[${node}]}
PID_FILE[${node}]="pidfile${UNITTEST_NUM}-${node}.pid"
clean_remote_node ${node} ${PID_FILE[${node}]}
done
| 1,484 | 29.306122 | 82 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_basic/rpmem_basic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_basic.c -- unit test for rpmem operations
*/
#include "unittest.h"
#include "librpmem.h"
#include "pool_hdr.h"
#include "file.h"
#include "set.h"
#include "util.h"
#include "out.h"
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
/*
* Use default terminal command for terminating session in user flags field
* in order to make sure this is not interpreted by terminal.
*/
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 2,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS\0\0\0\n~.",\
}
/* as above but with SINGLEHDR incompat feature */
#define POOL_ATTR_INIT_SINGLEHDR {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS\0\0\0\n~.",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 2,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "\0\0\0\n~._ALT_FLAGS",\
}
/* as above but with SINGLEHDR incompat feature */
#define POOL_ATTR_ALT_SINGLEHDR {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 3,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "\0\0\0\n~._ALT_FLAGS",\
}
static const struct rpmem_pool_attr pool_attrs[] = {
POOL_ATTR_INIT,
POOL_ATTR_INIT_SINGLEHDR,
POOL_ATTR_ALT,
POOL_ATTR_ALT_SINGLEHDR
};
static const char *pool_attr_names[] = {
"init",
"init_singlehdr",
"alt",
"alt_singlehdr"
};
#define POOL_ATTR_INIT_INDEX 0
#define NLANES 32
struct pool_entry {
RPMEMpool *rpp;
const char *target;
void *pool;
size_t size;
size_t buff_offset; /* working buffer offset */
size_t buff_size; /* working buffer size */
unsigned nlanes;
int is_mem;
int error_must_occur;
int exp_errno;
};
#define MAX_IDS 1024
static struct pool_entry pools[MAX_IDS];
/*
* init_buff -- default working buffer parameters
*/
static inline void
init_buff(struct pool_entry *pool)
{
pool->buff_offset = POOL_HDR_SIZE;
pool->buff_size = pool->size - POOL_HDR_SIZE;
}
/*
* init_pool -- map local pool file or allocate memory region
*/
static void
init_pool(struct pool_entry *pool, const char *target, const char *pool_path,
const char *pool_size)
{
pool->target = target;
pool->exp_errno = 0;
pool->nlanes = NLANES;
int ret = util_parse_size(pool_size, &pool->size);
UT_ASSERTeq(ret, 0);
int flags = PMEM_FILE_CREATE;
if (pool->size)
flags |= PMEM_FILE_EXCL;
if (strncmp(pool_path, "mem", strlen("mem")) == 0) {
pool->pool = PAGEALIGNMALLOC(pool->size);
pool->is_mem = 1;
} else {
pool->pool = pmem_map_file(pool_path, pool->size,
flags, 0666, &pool->size, NULL);
UT_ASSERTne(pool->pool, NULL);
/*
* This is a workaround for an issue with using device dax with
* libibverbs. The problem is that libfabric to handle fork()
* function calls correctly use ibv_fork_init(3) which makes
* all registered memory being madvised with MADV_DONTFORK flag.
* In libpmemobj the remote replication is performed without
* pool header (first 4k). In such case the address passed to
* madvise(2) is aligned to 4k, but device dax can require
* different alignment (default is 2MB). This workaround
* madvises the entire memory region before registering
* it by fi_mr_reg(3).
*
* The librpmem client requires fork() support to work
* correctly.
*/
ret = os_madvise(pool->pool, pool->size, MADV_DONTFORK);
UT_ASSERTeq(ret, 0);
pool->is_mem = 0;
if (util_file_get_type(pool_path) != TYPE_DEVDAX)
os_unlink(pool_path);
}
init_buff(pool);
}
/*
* free_pool -- unmap local pool file or free memory region
*/
static void
free_pool(struct pool_entry *pool)
{
if (pool->is_mem)
FREE(pool->pool);
else
UT_ASSERTeq(pmem_unmap(pool->pool, pool->size), 0);
pool->pool = NULL;
pool->rpp = NULL;
pool->target = NULL;
}
/*
* str_2_pool_attr_index -- convert string to the index of pool attributes
*/
static int
str_2_pool_attr_index(const char *str)
{
COMPILE_ERROR_ON((sizeof(pool_attr_names) / sizeof(pool_attr_names[0]))
!= (sizeof(pool_attrs) / sizeof(pool_attrs[0])));
const unsigned num_of_names = sizeof(pool_attr_names) /
sizeof(pool_attr_names[0]);
for (int i = 0; i < num_of_names; ++i) {
if (strcmp(str, pool_attr_names[i]) == 0) {
return i;
}
}
UT_FATAL("unrecognized name of pool attributes set: %s", str);
}
/*
* cmp_pool_attr -- check pool attributes
*/
static void
cmp_pool_attr(const struct rpmem_pool_attr *attr1,
const struct rpmem_pool_attr *attr2)
{
if (attr2 == NULL) {
UT_ASSERTeq(util_is_zeroed(attr1, sizeof(*attr1)), 1);
} else {
UT_ASSERTeq(memcmp(attr1->signature, attr2->signature,
sizeof(attr1->signature)), 0);
UT_ASSERTeq(attr1->major, attr2->major);
UT_ASSERTeq(attr1->compat_features, attr2->compat_features);
UT_ASSERTeq(attr1->ro_compat_features,
attr2->ro_compat_features);
UT_ASSERTeq(attr1->incompat_features, attr2->incompat_features);
UT_ASSERTeq(memcmp(attr1->uuid, attr2->uuid,
sizeof(attr1->uuid)), 0);
UT_ASSERTeq(memcmp(attr1->poolset_uuid, attr2->poolset_uuid,
sizeof(attr1->poolset_uuid)), 0);
UT_ASSERTeq(memcmp(attr1->prev_uuid, attr2->prev_uuid,
sizeof(attr1->prev_uuid)), 0);
UT_ASSERTeq(memcmp(attr1->next_uuid, attr2->next_uuid,
sizeof(attr1->next_uuid)), 0);
}
}
/*
* check_return_and_errno - validate return value and errno
*/
#define check_return_and_errno(ret, error_must_occur, exp_errno) \
if ((exp_errno) != 0) { \
if (error_must_occur) { \
UT_ASSERTne(ret, 0); \
UT_ASSERTeq(errno, exp_errno); \
} else { \
if ((ret) != 0) { \
UT_ASSERTeq(errno, exp_errno); \
} \
} \
} else { \
UT_ASSERTeq(ret, 0); \
}
/*
* test_create -- test case for creating remote pool
*
* The <option> argument values:
* - "singlehdr" - the incompat feature flag is set to POOL_FEAT_SINGLEHDR,
* - "noattr" - NULL pool attributes are passed to rpmem_create(),
* - "none" or any other string - no options.
*/
static int
test_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 6)
UT_FATAL(
"usage: test_create <id> <pool set> <target> <pool> <size> <option>");
const char *id_str = argv[0];
const char *pool_set = argv[1];
const char *target = argv[2];
const char *pool_path = argv[3];
const char *size_str = argv[4];
const char *option = argv[5];
int id = atoi(id_str);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
UT_ASSERTeq(pool->rpp, NULL);
struct rpmem_pool_attr *rattr = NULL;
struct rpmem_pool_attr pool_attr = pool_attrs[POOL_ATTR_INIT_INDEX];
if (strcmp(option, "noattr") == 0) {
/* pass NULL pool attributes */
} else {
if (strcmp(option, "singlehdr") == 0)
pool_attr.incompat_features |= POOL_FEAT_SINGLEHDR;
rattr = &pool_attr;
}
init_pool(pool, target, pool_path, size_str);
pool->rpp = rpmem_create(target, pool_set, pool->pool,
pool->size, &pool->nlanes, rattr);
if (pool->rpp) {
UT_ASSERTne(pool->nlanes, 0);
UT_OUT("%s: created", pool_set);
} else {
UT_OUT("!%s", pool_set);
free_pool(pool);
}
return 6;
}
/*
* test_open -- test case for opening remote pool
*
* The <option> argument values:
* - "singlehdr" - the incompat feature flag is set to POOL_FEAT_SINGLEHDR,
* - "noattr" - NULL pool attributes are passed to rpmem_create(),
* - "none" or any other string - no options.
*/
static int
test_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 7)
UT_FATAL(
"usage: test_open <id> <pool set> <target> <pool> <size> <pool attr name> <option>");
const char *id_str = argv[0];
const char *pool_set = argv[1];
const char *target = argv[2];
const char *pool_path = argv[3];
const char *size_str = argv[4];
const char *pool_attr_name = argv[5];
const char *option = argv[6];
int id = atoi(id_str);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
UT_ASSERTeq(pool->rpp, NULL);
const struct rpmem_pool_attr *rattr = NULL;
const int pool_attr_id = str_2_pool_attr_index(pool_attr_name);
struct rpmem_pool_attr pool_attr = pool_attrs[pool_attr_id];
if (strcmp(option, "noattr") == 0) {
/* pass NULL pool attributes */
} else {
/* pass non-NULL pool attributes */
if (strcmp(option, "singlehdr") == 0)
pool_attr.incompat_features |= POOL_FEAT_SINGLEHDR;
rattr = &pool_attr;
}
init_pool(pool, target, pool_path, size_str);
struct rpmem_pool_attr pool_attr_open;
pool->rpp = rpmem_open(target, pool_set, pool->pool,
pool->size, &pool->nlanes, &pool_attr_open);
if (pool->rpp) {
cmp_pool_attr(&pool_attr_open, rattr);
UT_ASSERTne(pool->nlanes, 0);
UT_OUT("%s: opened", pool_set);
} else {
UT_OUT("!%s", pool_set);
free_pool(pool);
}
return 7;
}
/*
* test_close -- test case for closing remote pool
*/
static int
test_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_close <id>");
const char *id_str = argv[0];
int id = atoi(id_str);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
UT_ASSERTne(pool->rpp, NULL);
int ret = rpmem_close(pool->rpp);
check_return_and_errno(ret, pool->error_must_occur, pool->exp_errno);
free_pool(pool);
return 1;
}
typedef int (*flush_func)(
RPMEMpool *rpp, size_t off, size_t size, unsigned lane);
static int
persist_relaxed(RPMEMpool *rpp, size_t off, size_t size, unsigned lane)
{
return rpmem_persist(rpp, off, size, lane, RPMEM_PERSIST_RELAXED);
}
static int
persist_normal(RPMEMpool *rpp, size_t off, size_t size, unsigned lane)
{
return rpmem_persist(rpp, off, size, lane, 0);
}
static int
flush_relaxed(RPMEMpool *rpp, size_t off, size_t size, unsigned lane)
{
return rpmem_flush(rpp, off, size, lane, RPMEM_FLUSH_RELAXED);
}
static int
flush_normal(RPMEMpool *rpp, size_t off, size_t size, unsigned lane)
{
return rpmem_flush(rpp, off, size, lane, 0);
}
/*
* flush_thread_arg -- flush worker thread arguments
*/
struct flush_thread_arg {
RPMEMpool *rpp;
size_t off;
size_t size;
unsigned nops;
unsigned lane;
int error_must_occur;
int exp_errno;
flush_func flush;
};
/*
* flush_thread_func -- worker thread function for flushing ops
*/
static void *
flush_thread_func(void *arg)
{
struct flush_thread_arg *args = arg;
size_t flush_size = args->size / args->nops;
UT_ASSERTeq(args->size % args->nops, 0);
for (unsigned i = 0; i < args->nops; i++) {
size_t off = args->off + i * flush_size;
size_t left = args->size - i * flush_size;
size_t size = left < flush_size ?
left : flush_size;
int ret = args->flush(args->rpp, off, size, args->lane);
check_return_and_errno(ret, args->error_must_occur,
args->exp_errno);
}
return NULL;
}
static void
test_flush_imp(unsigned id, unsigned seed, unsigned nthreads, unsigned nops,
flush_func func)
{
struct pool_entry *pool = &pools[id];
UT_ASSERTne(pool->nlanes, 0);
nthreads = min(nthreads, pool->nlanes);
if (seed) {
srand(seed);
uint8_t *buff = (uint8_t *)((uintptr_t)pool->pool +
pool->buff_offset);
for (size_t i = 0; i < pool->buff_size; i++)
buff[i] = rand();
}
os_thread_t *threads = MALLOC(nthreads * sizeof(*threads));
struct flush_thread_arg *args = MALLOC(nthreads * sizeof(*args));
size_t size_per_thread = pool->buff_size / nthreads;
UT_ASSERTeq(pool->buff_size % nthreads, 0);
for (unsigned i = 0; i < nthreads; i++) {
args[i].rpp = pool->rpp;
args[i].nops = nops;
args[i].lane = (unsigned)i;
args[i].off = pool->buff_offset + i * size_per_thread;
args[i].flush = func;
size_t size_left = pool->buff_size - size_per_thread * i;
args[i].size = size_left < size_per_thread ?
size_left : size_per_thread;
args[i].exp_errno = pool->exp_errno;
args[i].error_must_occur = pool->error_must_occur;
THREAD_CREATE(&threads[i], NULL, flush_thread_func, &args[i]);
}
for (int i = 0; i < nthreads; i++)
THREAD_JOIN(&threads[i], NULL);
FREE(args);
FREE(threads);
}
/*
* test_persist -- test case for persist operation
*/
static int
test_persist(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 5)
UT_FATAL(
"usage: test_persist <id> <seed> <nthreads> <nops> <relaxed>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
unsigned seed = ATOU(argv[1]);
unsigned nthreads = ATOU(argv[2]);
unsigned nops = ATOU(argv[3]);
unsigned relaxed = ATOU(argv[4]);
if (relaxed)
test_flush_imp(id, seed, nthreads, nops, persist_relaxed);
else
test_flush_imp(id, seed, nthreads, nops, persist_normal);
return 5;
}
/*
* test_deep_persist -- test case for deep_persist operation
*/
static int
test_deep_persist(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL(
"usage: test_deep_persist <id> <seed> <nthreads> <nops>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
unsigned seed = ATOU(argv[1]);
unsigned nthreads = ATOU(argv[2]);
unsigned nops = ATOU(argv[3]);
test_flush_imp(id, seed, nthreads, nops, rpmem_deep_persist);
return 4;
}
/*
* test_flush -- test case for flush operation
*/
static int
test_flush(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL("usage: test_flush <id> <seed> <nthreads> <nops> "
"<relaxed>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
unsigned seed = ATOU(argv[1]);
unsigned nthreads = ATOU(argv[2]);
unsigned nops = ATOU(argv[3]);
unsigned relaxed = ATOU(argv[4]);
if (relaxed)
test_flush_imp(id, seed, nthreads, nops, flush_relaxed);
else
test_flush_imp(id, seed, nthreads, nops, flush_normal);
return 5;
}
/*
* drain_thread_arg -- drain worker thread arguments
*/
struct drain_thread_arg {
RPMEMpool *rpp;
unsigned nops;
unsigned lane;
int error_must_occur;
int exp_errno;
};
/*
* drain_thread_func -- worker thread function for drain
*/
static void *
drain_thread_func(void *arg)
{
struct drain_thread_arg *args = arg;
UT_ASSERTeq(args->nops, 1);
int ret = rpmem_drain(args->rpp, args->lane, 0 /* flags */);
check_return_and_errno(ret, args->error_must_occur,
args->exp_errno);
return NULL;
}
static void
test_drain_imp(unsigned id, unsigned nthreads)
{
struct pool_entry *pool = &pools[id];
UT_ASSERTne(pool->nlanes, 0);
nthreads = min(nthreads, pool->nlanes);
os_thread_t *threads = MALLOC(nthreads * sizeof(*threads));
struct drain_thread_arg *args = MALLOC(nthreads * sizeof(*args));
for (unsigned i = 0; i < nthreads; i++) {
args[i].rpp = pool->rpp;
args[i].nops = 1;
args[i].lane = (unsigned)i;
args[i].exp_errno = pool->exp_errno;
args[i].error_must_occur = pool->error_must_occur;
THREAD_CREATE(&threads[i], NULL, drain_thread_func, &args[i]);
}
for (int i = 0; i < nthreads; i++)
THREAD_JOIN(&threads[i], NULL);
FREE(args);
FREE(threads);
}
/*
* test_drain -- test case for drain operation
*/
static int
test_drain(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_drain <id> <nthreads>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
unsigned nthreads = ATOU(argv[1]);
test_drain_imp(id, nthreads);
return 2;
}
/*
* test_read -- test case for read operation
*/
static int
test_read(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_read <id> <seed>");
int id = atoi(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
srand(ATOU(argv[1]));
int ret;
uint8_t *buff = (uint8_t *)((uintptr_t)pool->pool + POOL_HDR_SIZE);
size_t buff_size = pool->size - POOL_HDR_SIZE;
ret = rpmem_read(pool->rpp, buff, POOL_HDR_SIZE, buff_size, 0);
check_return_and_errno(ret, pool->error_must_occur, pool->exp_errno);
if (ret == 0) {
for (size_t i = 0; i < buff_size; i++) {
uint8_t r = rand();
UT_ASSERTeq(buff[i], r);
}
}
return 2;
}
/*
* test_remove -- test case for remove operation
*/
static int
test_remove(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL(
"usage: test_remove <target> <pool set> <force> <rm pool set>");
const char *target = argv[0];
const char *pool_set = argv[1];
int force = atoi(argv[2]);
int rm_pool_set = atoi(argv[3]);
int flags = 0;
if (force)
flags |= RPMEM_REMOVE_FORCE;
if (rm_pool_set)
flags |= RPMEM_REMOVE_POOL_SET;
int ret;
ret = rpmem_remove(target, pool_set, flags);
UT_ASSERTeq(ret, 0);
return 4;
}
/*
* test_set_attr -- test case for set attributes operation
*/
static int
test_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: test_set_attr <id> <pool attr name> <option>");
const char *id_str = argv[0];
const char *pool_attr_name = argv[1];
const char *option = argv[2];
int id = atoi(id_str);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
UT_ASSERTne(pool->rpp, NULL);
struct rpmem_pool_attr *rattr = NULL;
const int pool_attr_id = str_2_pool_attr_index(pool_attr_name);
struct rpmem_pool_attr pool_attr = pool_attrs[pool_attr_id];
if (strcmp(option, "noattr") == 0) {
/* pass NULL pool attributes */
} else {
/* pass non-NULL pool attributes */
if (strcmp(option, "singlehdr") == 0)
pool_attr.incompat_features |= POOL_FEAT_SINGLEHDR;
rattr = &pool_attr;
}
int ret = rpmem_set_attr(pool->rpp, rattr);
if (ret)
UT_OUT("set attributes failed (%s)", pool_attr_name);
else
UT_OUT("set attributes succeeded (%s)", pool_attr_name);
return 3;
}
static void
check_range_imp(char *pool_set, size_t offset, size_t size)
{
struct pool_set *set;
int ret;
ret = util_poolset_create_set(&set, pool_set, 0, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_pool_open_nocheck(set, 0);
UT_ASSERTeq(ret, 0);
uint8_t *data = set->replica[0]->part[0].addr;
for (size_t i = 0; i < size; i++) {
uint8_t r = rand();
UT_ASSERTeq(data[offset + i], r);
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* check_range -- check if remote pool range contains specified random sequence
*/
static int
check_range(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL(
"usage: check_range <pool set> <seed> <offset> <size>");
char *pool_set = argv[0];
srand(ATOU(argv[1]));
size_t offset;
size_t size;
int ret;
ret = util_parse_size(argv[2], &offset);
UT_ASSERTeq(ret, 0);
ret = util_parse_size(argv[3], &size);
UT_ASSERTeq(ret, 0);
check_range_imp(pool_set, offset, size);
return 4;
}
/*
* check_pool -- check if remote pool contains specified random sequence
*/
static int
check_pool(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: check_pool <pool set> <seed> <size>");
char *pool_set = argv[0];
srand(ATOU(argv[1]));
size_t size;
int ret;
ret = util_parse_size(argv[2], &size);
UT_ASSERTeq(ret, 0);
check_range_imp(pool_set, POOL_HDR_SIZE, size - POOL_HDR_SIZE);
return 3;
}
/*
* fill_pool -- fill remote pool with specified random sequence
*/
static int
fill_pool(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: fill_pool <pool set> <seed>");
char *pool_set = argv[0];
srand(ATOU(argv[1]));
int ret;
struct pool_set *set;
ret = util_poolset_create_set(&set, pool_set, 0, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_pool_open_nocheck(set, 0);
UT_ASSERTeq(ret, 0);
uint8_t *data = set->replica[0]->part[0].addr;
for (size_t i = POOL_HDR_SIZE; i < set->poolsize; i++)
data[i] = rand();
util_poolset_close(set, DO_NOT_DELETE_PARTS);
return 2;
}
/*
* buff_limit -- limit working buffer
*/
static int
buff_limit(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: buff_limit <id> <offset> <length>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
size_t offset;
size_t size;
int ret;
ret = util_parse_size(argv[1], &offset);
UT_ASSERTeq(ret, 0);
ret = util_parse_size(argv[2], &size);
UT_ASSERTeq(ret, 0);
struct pool_entry *pool = &pools[id];
UT_ASSERT(offset < pool->size);
UT_ASSERT(offset + size <= pool->size);
pool->buff_offset = offset;
pool->buff_size = size;
return 3;
}
/*
* buff_reset -- reset working buffer to default (whole pool)
*/
static int
buff_reset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: buff_reset <id>");
unsigned id = ATOU(argv[0]);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
init_buff(pool);
return 1;
}
enum wait_type {
WAIT,
NOWAIT
};
static const char *wait_type_str[2] = {
"wait",
"nowait"
};
static enum wait_type
str2wait(const char *str)
{
for (int i = 0; i < ARRAY_SIZE(wait_type_str); ++i) {
if (strcmp(wait_type_str[i], str) == 0)
return (enum wait_type)i;
}
UT_FATAL("'%s' does not match <wait|nowait>", str);
}
#define SSH_EXE "ssh"
#define RPMEMD_TERMINATE_CMD SSH_EXE " -tt %s kill -9 %d"
#define GET_RPMEMD_PID_CMD SSH_EXE " %s cat %s"
#define COUNT_RPMEMD_CMD SSH_EXE " %s ps -A | grep -c %d"
/*
* rpmemd_kill -- kill target rpmemd
*/
static int
rpmemd_kill(const char *target, int pid)
{
char cmd[100];
SNPRINTF(cmd, sizeof(cmd), RPMEMD_TERMINATE_CMD, target, pid);
return system(cmd);
}
/*
* popen_readi -- popen cmd and read integer
*/
static int
popen_readi(const char *cmd)
{
FILE *stream = popen(cmd, "r");
UT_ASSERT(stream != NULL);
int i;
int ret = fscanf(stream, "%d", &i);
UT_ASSERT(ret == 1);
pclose(stream);
return i;
}
/*
* rpmemd_get_pid -- get target rpmemd pid
*/
static int
rpmemd_get_pid(const char *target, const char *pid_file)
{
char cmd[PATH_MAX];
SNPRINTF(cmd, sizeof(cmd), GET_RPMEMD_PID_CMD, target, pid_file);
return popen_readi(cmd);
}
/*
* rpmemd_is_running -- tell if target rpmemd is running
*/
static int
rpmemd_is_running(const char *target, int pid)
{
char cmd[100];
SNPRINTF(cmd, sizeof(cmd), COUNT_RPMEMD_CMD, target, pid);
return popen_readi(cmd) > 0;
}
/*
* rpmemd_kill_wait -- kill target rpmemd and wait for it to stop
*/
static void
rpmemd_kill_wait(const char *target, const char *pid_file, enum wait_type wait)
{
int pid = rpmemd_get_pid(target, pid_file);
int ret;
do {
ret = rpmemd_kill(target, pid);
if (ret != 0 || wait == NOWAIT) {
break;
}
} while (rpmemd_is_running(target, pid));
}
/*
* rpmemd_terminate -- terminate target rpmemd
*/
static int
rpmemd_terminate(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3) {
UT_FATAL(
"usage: rpmemd_terminate <id> <pid file> <wait|nowait>");
}
const char *id_str = argv[0];
const char *pid_file = argv[1];
const char *wait_str = argv[2];
int id = atoi(id_str);
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
UT_ASSERTne(pool->target, NULL);
pool->exp_errno = ECONNRESET;
enum wait_type wait = str2wait(wait_str);
/*
* if process will wait for rpmemd to terminate it is sure error will
* occur
*/
pool->error_must_occur = wait == WAIT;
rpmemd_kill_wait(pool->target, pid_file, wait);
return 3;
}
/*
* test_persist_header -- test case for persisting data with offset < 4096
*
* if 'hdr' argument is used, test passes if rpmem_persist fails
* if 'nohdr' argument is used, test passes if rpmem_persist passes
*/
static int
test_persist_header(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL(
"usage: test_persist_header <id> <hdr|nohdr> <relaxed>");
int id = atoi(argv[0]);
const char *hdr_str = argv[1];
int relaxed = atoi(argv[2]);
unsigned flags = 0;
if (relaxed)
flags |= RPMEM_PERSIST_RELAXED;
UT_ASSERT(id >= 0 && id < MAX_IDS);
struct pool_entry *pool = &pools[id];
int with_hdr;
if (strcmp(hdr_str, "hdr") == 0)
with_hdr = 1;
else if (strcmp(hdr_str, "nohdr") == 0)
with_hdr = 0;
else
UT_ASSERT(0);
for (size_t off = 0; off < POOL_HDR_SIZE; off += 8) {
int ret = rpmem_persist(pool->rpp, off, 8, 0, flags);
UT_ASSERTeq(ret, with_hdr ? -1 : 0);
}
return 3;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_create),
TEST_CASE(test_open),
TEST_CASE(test_set_attr),
TEST_CASE(test_close),
TEST_CASE(test_persist),
TEST_CASE(test_deep_persist),
TEST_CASE(test_flush),
TEST_CASE(test_drain),
TEST_CASE(test_read),
TEST_CASE(test_remove),
TEST_CASE(check_pool),
TEST_CASE(check_range),
TEST_CASE(fill_pool),
TEST_CASE(buff_limit),
TEST_CASE(buff_reset),
TEST_CASE(rpmemd_terminate),
TEST_CASE(test_persist_header),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
util_init();
rpmem_fip_probe_get("localhost", NULL);
START(argc, argv, "rpmem_basic");
out_init("rpmem_basic", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 25,538 | 22.154125 | 88 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_basic/setup.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2017, Intel Corporation
#
# src/test/rpmem_basic/setup.sh -- common part for TEST* scripts
#
set -e
require_nodes 2
require_node_libfabric 0 $RPMEM_PROVIDER $SETUP_LIBFABRIC_VERSION
require_node_libfabric 1 $RPMEM_PROVIDER $SETUP_LIBFABRIC_VERSION
require_node_log_files 0 $RPMEMD_LOG_FILE
require_node_log_files 1 $RPMEM_LOG_FILE
require_node_log_files 1 $PMEM_LOG_FILE
POOLS_DIR=pools
POOLS_PART=pool_parts
PART_DIR=${NODE_DIR[0]}/$POOLS_PART
RPMEM_POOLSET_DIR[0]=${NODE_DIR[0]}$POOLS_DIR
if [ -z "$SETUP_MANUAL_INIT_RPMEM" ]; then
init_rpmem_on_node 1 0
fi
| 642 | 24.72 | 65 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_rm/libpmempool_rm.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* libpmempool_rm -- a unittest for pmempool_rm.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "unittest.h"
#define FATAL_USAGE(n) UT_FATAL("usage: %s [-f -l -r -o] path..", (n))
static PMEMobjpool *Pop;
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_rm");
if (argc < 2)
FATAL_USAGE(argv[0]);
unsigned flags = 0;
char *optstr = "flro";
int do_open = 0;
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 'f':
flags |= PMEMPOOL_RM_FORCE;
break;
case 'r':
flags |= PMEMPOOL_RM_POOLSET_REMOTE;
break;
case 'l':
flags |= PMEMPOOL_RM_POOLSET_LOCAL;
break;
case 'o':
do_open = 1;
break;
default:
FATAL_USAGE(argv[0]);
}
}
for (int i = optind; i < argc; i++) {
const char *path = argv[i];
if (do_open) {
Pop = pmemobj_open(path, NULL);
UT_ASSERTne(Pop, NULL);
}
int ret = pmempool_rm(path, flags);
if (ret) {
UT_OUT("!%s: %s", path, pmempool_errormsg());
}
if (do_open) {
UT_ASSERTne(Pop, NULL);
pmemobj_close(Pop);
}
}
DONE(NULL);
}
| 1,228 | 16.557143 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt/pmem2_movnt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt.c -- test for MOVNT threshold
*
* usage: pmem2_movnt
*/
#include "unittest.h"
#include "ut_pmem2.h"
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
src = MEMALIGN(64, 8192);
dst = MEMALIGN(64, 8192);
memset(src, 0x88, 8192);
memset(dst, 0, 8192);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memcpy_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memmove_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memset_fn(dst, 0x77, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(dst[0], 0x77);
UT_ASSERTeq(dst[size - 1], 0x77);
UT_ASSERTeq(dst[size], 0);
}
ALIGNED_FREE(dst);
ALIGNED_FREE(src);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 1,945 | 21.113636 | 59 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
class Pmem2MovntCommon(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
filepath = None
def create_file(self, ctx):
self.filepath = ctx.create_holey_file(self.filesize, 'testfile',)
class Pmem2Movnt(Pmem2MovntCommon):
threshold = None
threshold_values = ['1024', '5', '-15']
envs0 = ()
def run(self, ctx):
super().create_file(ctx)
for env in self.envs0:
ctx.env[env] = '0'
ctx.exec('pmem2_movnt', self.filepath)
for tv in self.threshold_values:
ctx.env['PMEM_MOVNT_THRESHOLD'] = tv
ctx.exec('pmem2_movnt', self.filepath)
class TEST0(Pmem2Movnt):
pass
@t.require_architectures('x86_64')
class TEST1(Pmem2Movnt):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
class TEST2(Pmem2Movnt):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
class TEST3(Pmem2MovntCommon):
def run(self, ctx):
super().create_file(ctx)
ctx.env['PMEM_NO_MOVNT'] = '1'
ctx.exec('pmem2_movnt', self.filepath)
class TEST4(Pmem2MovntCommon):
def run(self, ctx):
super().create_file(ctx)
ctx.env['PMEM_NO_MOVNT'] = '1'
ctx.env['PMEM_NO_GENERIC_MEMCPY'] = '1'
ctx.exec('pmem2_movnt', self.filepath)
| 1,387 | 21.387097 | 73 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/pmem2_memmove.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem2_memmove.c -- test for doing a memmove
*
* usage:
* pmem2_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "file.h"
#include "memmove_common.h"
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p,
memmove_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
char *src_orig;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
if (argc < 3)
USAGE();
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
mapped_len = pmem2_map_get_size(map);
dst = pmem2_map_get_address(map);
if (dst == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
src_orig = src = dst + mapped_len / 2;
UT_ASSERT(src > dst);
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
/* dest > src */
src = dst;
dst = src_orig;
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
} else {
/* use the same buffer for source and destination */
memset(dst, 0, bytes);
persist(dst, bytes);
do_memmove_variants(dst, dst, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,184 | 20.52027 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/memmove_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memmove_common.h -- header file for common memmove_common test utilities
*/
#ifndef MEMMOVE_COMMON_H
#define MEMMOVE_COMMON_H 1
#include "unittest.h"
#include "file.h"
extern unsigned Flags[10];
#define USAGE() do { UT_FATAL("usage: %s file b:length [d:{offset}] "\
"[s:{offset}] [o:{0|1}]", argv[0]); } while (0)
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
void do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn p);
void verify_contents(const char *file_name, int test, const char *buf1,
const char *buf2, size_t len);
#endif
| 832 | 25.870968 | 75 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/memmove_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memmove_common.c -- common part for tests doing a persistent memmove
*/
#include "unittest.h"
#include "memmove_common.h"
/*
* verify_contents -- verify that buffers match, if they don't - print contents
* of both and abort the test
*/
void
verify_contents(const char *file_name, int test,
const char *buf1, const char *buf2,
size_t len)
{
if (memcmp(buf1, buf2, len) == 0)
return;
for (size_t i = 0; i < len; ++i)
UT_ERR("%04zu 0x%02x 0x%02x %s", i, (uint8_t)buf1[i],
(uint8_t)buf2[i],
buf1[i] != buf2[i] ? "!!!" : "");
UT_FATAL("%s %d: %zu bytes do not match with memcmp",
file_name, test, len);
}
/*
* do_memmove: Worker function for memmove.
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn persist)
{
void *ret;
char *srcshadow = MALLOC(dest_off + src_off + bytes);
char *dstshadow = srcshadow;
if (src != dst)
dstshadow = MALLOC(dest_off + src_off + bytes);
char old;
memset(src, 0x11, bytes);
memset(dst, 0x22, bytes);
memset(src, 0x33, bytes / 4);
memset(src + bytes / 4, 0x44, bytes / 4);
persist(src, bytes);
persist(dst, bytes);
memcpy(srcshadow, src, bytes);
memcpy(dstshadow, dst, bytes);
/* TEST 1, dest == src */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, dst + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, dstshadow + dest_off, bytes / 2);
verify_contents(file_name, 0, dstshadow, dst, bytes);
verify_contents(file_name, 1, srcshadow, src, bytes);
/* TEST 2, len == 0 */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, src + src_off, 0, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, 0);
verify_contents(file_name, 2, dstshadow, dst, bytes);
verify_contents(file_name, 3, srcshadow, src, bytes);
/* TEST 3, len == bytes / 2 */
ret = fn(dst + dest_off, src + src_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
if (flags & PMEM_F_MEM_NOFLUSH)
/* for pmemcheck */
persist(dst + dest_off, bytes / 2);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, bytes / 2);
verify_contents(file_name, 4, dstshadow, dst, bytes);
verify_contents(file_name, 5, srcshadow, src, bytes);
FREE(srcshadow);
if (dstshadow != srcshadow)
FREE(dstshadow);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 3,503 | 28.694915 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
class Pmem2Memmove(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
envs0 = ()
envs1 = ()
test_cases = [
# No offset, no overlap
['b:4096'],
# aligned dest, unaligned source, no overlap
['s:7', 'b:4096'],
# unaligned dest, unaligned source, no overlap
['d:7', 's:13', 'b:4096'],
# all aligned, src overlaps dest
['b:4096', 's:23', 'o:1'],
# unaligned destination
['b:4096', 'd:21'],
# unaligned source and dest
['b:4096', 'd:21', 's:7'],
# overlap of src, aligned src and dest
['b:4096', 'o:1', 's:20'],
# overlap of src, aligned src, unaligned dest
['b:4096', 'd:13', 'o:1', 's:20'],
# dest overlaps src, unaligned dest, aligned src
['b:2048', 'd:33', 'o:1'],
# dest overlaps src, aligned dest and src
['b:4096', 'o:1', 'd:20'],
# aligned dest, no overlap, small length
['b:8'],
# small length, offset 1 byte from 64 byte boundary
['b:4', 'd:63'],
# overlap, src < dest, small length (ensures a copy backwards,
# with number of bytes to align < length)
['o:1', 'd:2', 'b:8']
]
def run(self, ctx):
for env in self.envs0:
ctx.env[env] = '0'
for env in self.envs1:
ctx.env[env] = '1'
for tc in self.test_cases:
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
ctx.exec('pmem2_memmove', filepath, *tc)
class TEST0(Pmem2Memmove):
pass
@t.require_architectures('x86_64')
class TEST1(Pmem2Memmove):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
class TEST2(Pmem2Memmove):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
class TEST3(Pmem2Memmove):
envs1 = ("PMEM_NO_MOVNT",)
class TEST4(Pmem2Memmove):
envs1 = ("PMEM_NO_MOVNT", "PMEM_NO_GENERIC_MEMCPY")
| 2,043 | 22.767442 | 72 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_zones/obj_zones.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_zones.c -- allocates from a very large pool (exceeding 1 zone)
*
*/
#include <stddef.h>
#include <page_size.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_zones"
#define ALLOC_SIZE ((8191 * (256 * 1024)) - 16) /* must evenly divide a zone */
/*
* test_create -- allocate all possible objects and log the number. It should
* exceed what would be possible on a single zone.
* Additionally, free one object so that we can later check that it can be
* allocated after the next open.
*/
static void
test_create(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
int n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid, ALLOC_SIZE, 0, NULL, NULL) != 0)
break;
n++;
}
UT_OUT("allocated: %d", n);
pmemobj_free(&oid);
pmemobj_close(pop);
}
/*
* test_open -- in the open test we should be able to allocate exactly
* one object.
*/
static void
test_open(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTne(ret, 0);
pmemobj_close(pop);
}
/*
* test_malloc_free -- test if alloc until OOM/free/alloc until OOM sequence
* produces the same number of allocations for the second alloc loop.
*/
static void
test_malloc_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t alloc_size = PMEM_PAGESIZE * 32;
size_t max_allocs = 1000000;
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * max_allocs);
size_t n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
UT_ASSERTne(n, max_allocs);
}
size_t first_run_allocated = n;
for (size_t i = 0; i < n; ++i) {
pmemobj_free(&oid[i]);
}
n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
}
UT_ASSERTeq(first_run_allocated, n);
pmemobj_close(pop);
FREE(oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_zones");
if (argc != 3)
UT_FATAL("usage: %s file-name [open|create]", argv[0]);
const char *path = argv[1];
char op = argv[2][0];
if (op == 'c')
test_create(path);
else if (op == 'o')
test_open(path);
else if (op == 'f')
test_malloc_free(path);
else
UT_FATAL("invalid operation");
DONE(NULL);
}
| 2,706 | 20.148438 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_rpmem_heap_interrupt/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2017, Intel Corporation
#
#
# src/test/obj_rpmem_heap_interrupt/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 303 | 20.714286 | 67 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_is_poolset/util_is_poolset.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* util_is_poolset.c -- unit test for util_is_poolset
*
* usage: util_is_poolset file
*/
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include <errno.h>
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_poolset");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 2)
UT_FATAL("usage: %s file...",
argv[0]);
for (int i = 1; i < argc; i++) {
char *fname = argv[i];
int is_poolset = util_is_poolset_file(fname);
UT_OUT("util_is_poolset(%s): %d", fname, is_poolset);
}
common_fini();
DONE(NULL);
}
| 834 | 18.418605 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_config/obj_ctl_config.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_config.c -- tests for ctl configuration
*/
#include "unittest.h"
#include "out.h"
#define LAYOUT "obj_ctl_config"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_config");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_open(path, LAYOUT);
if (pop == NULL)
UT_FATAL("!pmemobj_open: %s", path);
/* dump all available ctl read entry points */
int result;
pmemobj_ctl_get(pop, "prefault.at_open", &result);
UT_OUT("%d", result);
pmemobj_ctl_get(pop, "prefault.at_create", &result);
UT_OUT("%d", result);
pmemobj_close(pop);
DONE(NULL);
}
| 735 | 18.891892 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_locks_abort/obj_tx_locks_abort.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_tx_locks_nested.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "locks"
TOID_DECLARE_ROOT(struct root_obj);
TOID_DECLARE(struct obj, 1);
struct root_obj {
PMEMmutex lock;
TOID(struct obj) head;
};
struct obj {
int data;
PMEMmutex lock;
TOID(struct obj) next;
};
/*
* do_nested_tx-- (internal) nested transaction
*/
static void
do_nested_tx(PMEMobjpool *pop, TOID(struct obj) o, int value)
{
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
} TX_END;
}
/*
* do_aborted_nested_tx -- (internal) aborted nested transaction
*/
static void
do_aborted_nested_tx(PMEMobjpool *pop, TOID(struct obj) oid, int value)
{
TOID(struct obj) o = oid;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
pmemobj_tx_abort(EINVAL);
} TX_FINALLY {
o = oid;
while (!TOID_IS_NULL(o)) {
if (pmemobj_mutex_trylock(pop, &D_RW(o)->lock)) {
UT_OUT("trylock failed");
} else {
UT_OUT("trylock succeeded");
pmemobj_mutex_unlock(pop, &D_RW(o)->lock);
}
o = D_RO(o)->next;
}
} TX_END;
}
/*
* do_check -- (internal) print 'data' value of each object on the list
*/
static void
do_check(TOID(struct obj) o)
{
while (!TOID_IS_NULL(o)) {
UT_OUT("data = %d", D_RO(o)->data);
o = D_RO(o)->next;
}
}
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "obj_tx_locks_abort");
if (argc > 3)
UT_FATAL("usage: %s <file>", argv[0]);
pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct root_obj) root = POBJ_ROOT(pop, struct root_obj);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->head = TX_ZNEW(struct obj);
TOID(struct obj) o;
o = D_RW(root)->head;
D_RW(o)->data = 100;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
for (int i = 0; i < 3; i++) {
D_RW(o)->next = TX_ZNEW(struct obj);
o = D_RO(o)->next;
D_RW(o)->data = 101 + i;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
}
TOID_ASSIGN(D_RW(o)->next, OID_NULL);
} TX_END;
UT_OUT("initial state");
do_check(D_RO(root)->head);
UT_OUT("nested tx");
do_nested_tx(pop, D_RW(root)->head, 200);
do_check(D_RO(root)->head);
UT_OUT("aborted nested tx");
do_aborted_nested_tx(pop, D_RW(root)->head, 300);
do_check(D_RO(root)->head);
pmemobj_close(pop);
DONE(NULL);
}
| 2,994 | 20.392857 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_sync_win/libpmempool_sync_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool_sync_win -- a unittest for libpmempool sync.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "libpmempool_sync_win");
if (argc != 3)
UT_FATAL("usage: %s poolset_file flags", ut_toUTF8(argv[0]));
int ret = pmempool_syncW(argv[1], (unsigned)wcstoul(argv[2], NULL, 0));
if (ret)
UT_OUT("result: %d, errno: %d", ret, errno);
else
UT_OUT("result: 0");
DONEW(NULL);
}
| 602 | 18.451613 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_persist_valgrind/pmem2_persist_valgrind.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_persist_valgrind.c -- pmem2_persist_valgrind tests
*/
#include "out.h"
#include "unittest.h"
#include "ut_pmem2_utils.h"
#define DATA "XXXXXXXX"
#define STRIDE_SIZE 4096
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
struct pmem2_map *map;
};
/*
* test_init -- prepare resources required for testing
*/
static int
test_init(const struct test_case *tc, int argc, char *argv[],
struct test_ctx *ctx)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
char *file = argv[0];
ctx->fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_config *cfg;
/* fill pmem2_config in minimal scope */
ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* execute pmem2_map and validate the result */
ret = pmem2_map(cfg, src, &ctx->map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(ctx->map, NULL);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(pmem2_map_get_size(ctx->map), size);
pmem2_config_delete(&cfg);
/* the function returns the number of consumed arguments */
return 1;
}
/*
* test_fini -- cleanup the test resources
*/
static void
test_fini(struct test_ctx *ctx)
{
pmem2_unmap(&ctx->map);
CLOSE(ctx->fd);
}
/*
* data_write -- write the data in mapped memory
*/
static void
data_write(void *addr, size_t size, size_t stride)
{
for (size_t offset = 0; offset + sizeof(DATA) <= size;
offset += stride) {
memcpy((void *)((uintptr_t)addr + offset), DATA, sizeof(DATA));
}
}
/*
* data_persist -- persist data in a range of mapped memory with defined stride
*/
static void
data_persist(struct pmem2_map *map, size_t len, size_t stride)
{
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn p_func = pmem2_get_persist_fn(map);
for (size_t offset = 0; offset + len <= map_size;
offset += stride) {
p_func(addr + offset, len);
}
}
/*
* test_persist_continuous_range -- persist continuous data in a range of
* the persistent memory
*/
static int
test_persist_continuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, sizeof(DATA) /* stride */);
data_persist(ctx.map, map_size, map_size /* stride */);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range -- persist discontinuous data in a range of
* the persistent memory
*/
static int
test_persist_discontinuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
data_persist(ctx.map, sizeof(DATA), STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range_partially -- persist part of discontinuous
* data in a range of persistent memory
*/
static int
test_persist_discontinuous_range_partially(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
/* persist only a half of the writes */
data_persist(ctx.map, sizeof(DATA), 2 * STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_nonpmem_data -- persist data in a range of the memory mapped
* by mmap()
*/
static int
test_persist_nonpmem_data(const struct test_case *tc, int argc, char *argv[])
{
struct test_ctx ctx = {0};
/* pmem2_map is needed to get persist function */
int ret = test_init(tc, argc, argv, &ctx);
size_t size = pmem2_map_get_size(ctx.map);
int flags = MAP_SHARED;
int proto = PROT_READ | PROT_WRITE;
char *addr;
addr = mmap(NULL, size, proto, flags, ctx.fd, 0);
data_write(addr, size, sizeof(DATA) /* stride */);
pmem2_persist_fn p_func = pmem2_get_persist_fn(ctx.map);
p_func(addr, size);
munmap(addr, size);
test_fini(&ctx);
return ret;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_persist_continuous_range),
TEST_CASE(test_persist_discontinuous_range),
TEST_CASE(test_persist_discontinuous_range_partially),
TEST_CASE(test_persist_nonpmem_data),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_persist_valgrind");
out_init("pmem2_persist_valgrind", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0,
0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 5,072 | 22.37788 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_persist_valgrind/TESTS.py
|
#!../env.py
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
import testframework as t
from testframework import granularity as g
import futils
import os
# All test cases in pmem2_persist_valgrind use Valgrind, which is not available
# on Windows systems.
@t.windows_exclude
@t.require_valgrind_enabled('pmemcheck')
# XXX In the match file, there are two possible numbers of errors. It varies
# from compiler to compiler. There should be only one number when pmemcheck
# will be fixed. Please also remove the below requirement after pmemcheck fix.
# https://github.com/pmem/valgrind/pull/76
@g.require_granularity(g.CL_OR_LESS)
class PMEM2_PERSIST(t.Test):
test_type = t.Medium
available_granularity = None
def run(self, ctx):
filepath = ctx.create_holey_file(2 * t.MiB, 'testfile')
ctx.exec('pmem2_persist_valgrind', self.test_case, filepath)
class TEST0(PMEM2_PERSIST):
"""persist continuous data in a range of pmem"""
test_case = "test_persist_continuous_range"
class TEST1(PMEM2_PERSIST):
"""persist discontinuous data in a range of pmem"""
test_case = "test_persist_discontinuous_range"
class TEST2(PMEM2_PERSIST):
"""persist part of discontinuous data in a range of pmem"""
test_case = "test_persist_discontinuous_range_partially"
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.KiB, 'testfile')
ctx.exec('pmem2_persist_valgrind', self.test_case, filepath)
pmemecheck_log = os.path.join(
os.getcwd(), 'pmem2_persist_valgrind', 'pmemcheck2.log')
futils.tail(pmemecheck_log, 2)
class TEST3(PMEM2_PERSIST):
"""persist data in a range of the memory mapped by mmap()"""
test_case = "test_persist_nonpmem_data"
| 1,773 | 31.254545 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alloc_class/obj_ctl_alloc_class.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* obj_ctl_alloc_class.c -- tests for the ctl entry points: heap.alloc_class
*/
#include <sys/resource.h>
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class"
static void
basic(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int ret;
PMEMoid oid;
size_t usable_size;
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_NONE;
alloc_class_128.unit_size = 128;
alloc_class_128.units_per_block = 1000;
alloc_class_128.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_129;
alloc_class_129.header_type = POBJ_HEADER_COMPACT;
alloc_class_129.unit_size = 1024;
alloc_class_129.units_per_block = 1000;
alloc_class_129.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class_129);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_128_r;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc",
&alloc_class_128_r);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(alloc_class_128.header_type, alloc_class_128_r.header_type);
UT_ASSERTeq(alloc_class_128.unit_size, alloc_class_128_r.unit_size);
UT_ASSERT(alloc_class_128.units_per_block <=
alloc_class_128_r.units_per_block);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers.
*/
ret = pmemobj_xalloc(pop, &oid, 128, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Reserve as above.
*/
struct pobj_action act;
oid = pmemobj_xreserve(pop, &act, 128, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid));
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_cancel(pop, &act, 1);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers,
* but request size 1 byte.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Two units from alloc class 129 -
* 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 + 1,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 2) - 16); /* 2 units minus hdr */
pmemobj_free(&oid);
/*
* 64 units from alloc class 129
* - 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, (1024 * 64) - 16,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 64) - 16);
pmemobj_free(&oid);
/*
* 65 units from alloc class 129 -
* 1024 bytes unit size, compact headers.
* Should fail, as it would require two bitmap modifications.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 * 64 + 1, 0,
POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, -1);
/*
* Nonexistent alloc class.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(130), NULL, NULL);
UT_ASSERTeq(ret, -1);
struct pobj_alloc_class_desc alloc_class_new;
alloc_class_new.header_type = POBJ_HEADER_NONE;
alloc_class_new.unit_size = 777;
alloc_class_new.units_per_block = 200;
alloc_class_new.class_id = 0;
alloc_class_new.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_fail;
alloc_class_fail.header_type = POBJ_HEADER_NONE;
alloc_class_fail.unit_size = 777;
alloc_class_fail.units_per_block = 200;
alloc_class_fail.class_id = 0;
alloc_class_fail.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.200.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 777);
struct pobj_alloc_class_desc alloc_class_new_huge;
alloc_class_new_huge.header_type = POBJ_HEADER_NONE;
alloc_class_new_huge.unit_size = (2 << 23);
alloc_class_new_huge.units_per_block = 1;
alloc_class_new_huge.class_id = 0;
alloc_class_new_huge.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_huge);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_huge.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (2 << 23));
struct pobj_alloc_class_desc alloc_class_new_max;
alloc_class_new_max.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_max.unit_size = PMEMOBJ_MAX_ALLOC_SIZE;
alloc_class_new_max.units_per_block = 1024;
alloc_class_new_max.class_id = 0;
alloc_class_new_max.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_max.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_new_loop;
alloc_class_new_loop.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_loop.unit_size = 16384;
alloc_class_new_loop.units_per_block = 63;
alloc_class_new_loop.class_id = 0;
alloc_class_new_loop.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_loop);
UT_ASSERTeq(ret, 0);
size_t s = (63 * 16384) - 16;
ret = pmemobj_xalloc(pop, &oid, s + 1, 0,
POBJ_CLASS_ID(alloc_class_new_loop.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 7;
alloc_class_tiny.units_per_block = 1;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
UT_ASSERT(alloc_class_tiny.units_per_block > 1);
for (int i = 0; i < 1000; ++i) {
ret = pmemobj_xalloc(pop, &oid, 7, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
pmemobj_close(pop);
}
static void
many(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
unsigned nunits = UINT16_MAX + 1;
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 8;
alloc_class_tiny.units_per_block = nunits;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
uint64_t *counterp = NULL;
for (size_t i = 0; i < nunits; ++i) {
pmemobj_xalloc(pop, &oid, 8, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
counterp = pmemobj_direct(oid);
(*counterp)++;
/*
* This works only because this is a fresh pool in a new file
* and so the counter must be initially zero.
* This might have to be fixed if that ever changes.
*/
UT_ASSERTeq(*counterp, 1);
}
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class");
if (argc != 3)
UT_FATAL("usage: %s file-name b|m", argv[0]);
const char *path = argv[1];
if (argv[2][0] == 'b')
basic(path);
else if (argv[2][0] == 'm')
many(path);
DONE(NULL);
}
| 7,857 | 26.865248 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/traces_pmem/traces_pmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* traces_pmem.c -- unit test traces for libraries pmem
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "traces_pmem");
UT_ASSERT(!pmem_check_version(PMEM_MAJOR_VERSION,
PMEM_MINOR_VERSION));
UT_ASSERT(!pmemblk_check_version(PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION));
UT_ASSERT(!pmemlog_check_version(PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION));
UT_ASSERT(!pmemobj_check_version(PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION));
DONE(NULL);
}
| 596 | 21.961538 | 56 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_debug/obj_debug.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_debug.c -- unit test for debug features
*
* usage: obj_debug file operation [op_index]:...
*
* operations are 'f' or 'l' or 'r' or 'a' or 'n' or 's'
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <sys/param.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "layout_obj_debug"
TOID_DECLARE_ROOT(struct root);
TOID_DECLARE(struct tobj, 0);
TOID_DECLARE(struct int3_s, 1);
struct root {
POBJ_LIST_HEAD(listhead, struct tobj) lhead, lhead2;
uint32_t val;
};
struct tobj {
POBJ_LIST_ENTRY(struct tobj) next;
};
struct int3_s {
uint32_t i1;
uint32_t i2;
uint32_t i3;
};
typedef void (*func)(PMEMobjpool *pop, void *sync, void *cond);
static void
test_FOREACH(const char *path)
{
PMEMobjpool *pop = NULL;
PMEMoid varoid, nvaroid;
TOID(struct root) root;
TOID(struct tobj) var, nvar;
#define COMMANDS_FOREACH()\
do {\
POBJ_FOREACH(pop, varoid) {}\
POBJ_FOREACH_SAFE(pop, varoid, nvaroid) {}\
POBJ_FOREACH_TYPE(pop, var) {}\
POBJ_FOREACH_SAFE_TYPE(pop, var, nvar) {}\
POBJ_LIST_FOREACH(var, &D_RW(root)->lhead, next) {}\
POBJ_LIST_FOREACH_REVERSE(var, &D_RW(root)->lhead, next) {}\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,
sizeof(struct tobj), NULL, NULL);
COMMANDS_FOREACH();
TX_BEGIN(pop) {
COMMANDS_FOREACH();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_FOREACH();
pmemobj_close(pop);
}
static void
test_lists(const char *path)
{
PMEMobjpool *pop = NULL;
TOID(struct root) root;
TOID(struct tobj) elm;
#define COMMANDS_LISTS()\
do {\
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,\
sizeof(struct tobj), NULL, NULL);\
POBJ_NEW(pop, &elm, struct tobj, NULL, NULL);\
POBJ_LIST_INSERT_AFTER(pop, &D_RW(root)->lhead,\
POBJ_LIST_FIRST(&D_RW(root)->lhead), elm, next);\
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->lhead,\
&D_RW(root)->lhead2, elm, next, next);\
POBJ_LIST_REMOVE(pop, &D_RW(root)->lhead2, elm, next);\
POBJ_FREE(&elm);\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
COMMANDS_LISTS();
TX_BEGIN(pop) {
COMMANDS_LISTS();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_LISTS();
pmemobj_close(pop);
}
static int
int3_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct int3_s *args = (struct int3_s *)arg;
struct int3_s *val = (struct int3_s *)ptr;
val->i1 = args->i1;
val->i2 = args->i2;
val->i3 = args->i3;
pmemobj_persist(pop, val, sizeof(*val));
return 0;
}
static void
test_alloc_construct(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TX_BEGIN(pop) {
struct int3_s args = { 1, 2, 3 };
PMEMoid allocation;
pmemobj_alloc(pop, &allocation, sizeof(allocation), 1,
int3_constructor, &args);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_close(pop);
}
static void
test_double_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid, oid2;
int err = pmemobj_zalloc(pop, &oid, 100, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(!OID_IS_NULL(oid));
oid2 = oid;
pmemobj_free(&oid);
pmemobj_free(&oid2);
}
static int
test_constr(PMEMobjpool *pop, void *ptr, void *arg)
{
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
return 0;
}
static void
test_alloc_in_constructor(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
}
static void
test_mutex_lock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_lock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_unlock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_trylock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_trylock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_timedlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_timedlock(pop, (PMEMmutex *)sync, NULL);
}
static void
test_mutex_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_zero(pop, (PMEMmutex *)sync);
}
static void
test_rwlock_rdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_wrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_timedrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedrdlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_timedwrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedwrlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_tryrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_tryrdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_trywrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_trywrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_unlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_zero(pop, (PMEMrwlock *)sync);
}
static void
test_cond_wait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_wait(pop, (PMEMcond *)cond, (PMEMmutex *)sync);
}
static void
test_cond_signal(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_signal(pop, (PMEMcond *)cond);
}
static void
test_cond_broadcast(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_broadcast(pop, (PMEMcond *)cond);
}
static void
test_cond_timedwait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_timedwait(pop, (PMEMcond *)cond, (PMEMmutex *)sync, NULL);
}
static void
test_cond_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_zero(pop, (PMEMcond *)cond);
}
static void
test_sync_pop_check(unsigned long op_index)
{
PMEMobjpool *pop = (PMEMobjpool *)(uintptr_t)0x1;
func to_test[] = {
test_mutex_lock, test_mutex_unlock, test_mutex_trylock,
test_mutex_timedlock, test_mutex_zero, test_rwlock_rdlock,
test_rwlock_wrlock, test_rwlock_timedrdlock,
test_rwlock_timedwrlock, test_rwlock_tryrdlock,
test_rwlock_trywrlock, test_rwlock_unlock, test_rwlock_zero,
test_cond_wait, test_cond_signal, test_cond_broadcast,
test_cond_timedwait, test_cond_zero
};
if (op_index >= (sizeof(to_test) / sizeof(to_test[0])))
UT_FATAL("Invalid op_index provided");
PMEMmutex stack_sync;
PMEMcond stack_cond;
to_test[op_index](pop, &stack_sync, &stack_cond);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_debug");
if (argc < 3)
UT_FATAL("usage: %s file-name op:f|l|r|a|s [op_index]",
argv[0]);
const char *path = argv[1];
if (strchr("flrapns", argv[2][0]) == NULL || argv[2][1] != '\0')
UT_FATAL("op must be f or l or r or a or p or n or s");
unsigned long op_index;
char *tailptr;
switch (argv[2][0]) {
case 'f':
test_FOREACH(path);
break;
case 'l':
test_lists(path);
break;
case 'a':
test_alloc_construct(path);
break;
case 'p':
test_double_free(path);
break;
case 'n':
test_alloc_in_constructor(path);
break;
case 's':
if (argc != 4)
UT_FATAL("Provide an op_index with option s");
op_index = strtoul(argv[3], &tailptr, 10);
if (tailptr[0] != '\0')
UT_FATAL("Wrong op_index format");
test_sync_pop_check(op_index);
break;
}
DONE(NULL);
}
| 8,098 | 20.771505 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmalloc_rand_mt/obj_pmalloc_rand_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* obj_pmalloc_mt.c -- multithreaded test of allocator
*/
#include <stdint.h>
#include "file.h"
#include "rand.h"
#include "unittest.h"
#define RRAND(seed, max, min) (rnd64_r(&(seed)) % ((max) - (min)) + (min))
static size_t object_size;
static unsigned nobjects;
static unsigned iterations = 1000000;
static unsigned seed;
static void *
test_worker(void *arg)
{
PMEMobjpool *pop = arg;
PMEMoid *objects = ZALLOC(sizeof(PMEMoid) * nobjects);
unsigned fill = 0;
int ret;
rng_t myseed;
randomize_r(&myseed, seed);
for (unsigned i = 0; i < iterations; ++i) {
unsigned fill_ratio = (fill * 100) / nobjects;
unsigned pos = RRAND(myseed, nobjects, 0);
size_t size = RRAND(myseed, object_size, 64);
if (RRAND(myseed, 100, 0) < fill_ratio) {
if (!OID_IS_NULL(objects[pos])) {
pmemobj_free(&objects[pos]);
objects[pos] = OID_NULL;
fill--;
}
} else {
if (OID_IS_NULL(objects[pos])) {
ret = pmemobj_alloc(pop, &objects[pos],
size, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
fill++;
}
}
}
FREE(objects);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_rand_mt");
if (argc < 5 || argc > 7)
UT_FATAL("usage: %s [file] "
"[threads #] [objects #] [object size] "
"[iterations (def: 1000000)] [seed (def: time)]",
argv[0]);
unsigned nthreads = ATOU(argv[2]);
nobjects = ATOU(argv[3]);
object_size = ATOUL(argv[4]);
if (argc > 5)
iterations = ATOU(argv[5]);
if (argc > 6)
seed = ATOU(argv[6]);
else
seed = 0;
PMEMobjpool *pop;
int exists = util_file_exists(argv[1]);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
pop = pmemobj_create(argv[1], "TEST",
(PMEMOBJ_MIN_POOL * 10) + (nthreads * nobjects * object_size),
0666);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
} else {
pop = pmemobj_open(argv[1], "TEST");
if (pop == NULL)
UT_FATAL("!pmemobj_open");
}
os_thread_t *threads = MALLOC(sizeof(os_thread_t) * nthreads);
for (unsigned i = 0; i < nthreads; ++i) {
THREAD_CREATE(&threads[i], NULL, test_worker, pop);
}
for (unsigned i = 0; i < nthreads; ++i) {
THREAD_JOIN(&threads[i], NULL);
}
FREE(threads);
pmemobj_close(pop);
DONE(NULL);
}
| 2,308 | 19.254386 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_config/pmem2_config.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem_config.c -- pmem2_config unittests
*/
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "config.h"
#include "out.h"
#include "source.h"
/*
* test_cfg_create_and_delete_valid - test pmem2_config allocation
*/
static int
test_cfg_create_and_delete_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg;
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(cfg, NULL);
ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_cfg_alloc_enomem - test pmem2_config allocation with error injection
*/
static int
test_alloc_cfg_enomem(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config *cfg;
if (!core_fault_injection_enabled()) {
return 0;
}
core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc");
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_delete_null_config - test pmem2_delete on NULL config
*/
static int
test_delete_null_config(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg = NULL;
/* should not crash */
int ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_config_set_granularity_valid - check valid granularity values
*/
static int
test_config_set_granularity_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check default granularity */
enum pmem2_granularity g =
(enum pmem2_granularity)PMEM2_GRANULARITY_INVALID;
UT_ASSERTeq(cfg.requested_max_granularity, g);
/* change default granularity */
int ret = -1;
g = PMEM2_GRANULARITY_BYTE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* set granularity once more */
ret = -1;
g = PMEM2_GRANULARITY_PAGE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
return 0;
}
/*
* test_config_set_granularity_invalid - check invalid granularity values
*/
static int
test_config_set_granularity_invalid(const struct test_case *tc, int argc,
char *argv[])
{
/* pass invalid granularity */
int ret = 0;
enum pmem2_granularity g_inval = 999;
struct pmem2_config cfg;
pmem2_config_init(&cfg);
ret = pmem2_config_set_required_store_granularity(&cfg, g_inval);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
return 0;
}
/*
* test_set_offset_too_large - setting offset which is too large
*/
static int
test_set_offset_too_large(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to set the offset which is too large */
size_t offset = (size_t)INT64_MAX + 1;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_OFFSET_OUT_OF_RANGE);
return 0;
}
/*
* test_set_offset_success - setting a valid offset
*/
static int
test_set_offset_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the offset */
size_t offset = Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.offset, offset);
return 0;
}
/*
* test_set_length_success - setting a valid length
*/
static int
test_set_length_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the length, can be any length */
size_t length = Ut_mmap_align;
int ret = pmem2_config_set_length(&cfg, length);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.length, length);
return 0;
}
/*
* test_set_offset_max - setting maximum possible offset
*/
static int
test_set_offset_max(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set maximum possible offset */
size_t offset = (INT64_MAX / Ut_mmap_align) * Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_sharing_valid - setting valid sharing
*/
static int
test_set_sharing_valid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check sharing default value */
UT_ASSERTeq(cfg.sharing, PMEM2_SHARED);
int ret = pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.sharing, PMEM2_PRIVATE);
return 0;
}
/*
* test_set_sharing_invalid - setting invalid sharing
*/
static int
test_set_sharing_invalid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
unsigned invalid_sharing = 777;
int ret = pmem2_config_set_sharing(&cfg, invalid_sharing);
UT_ASSERTeq(ret, PMEM2_E_INVALID_SHARING_VALUE);
return 0;
}
/*
* test_validate_unaligned_addr - setting unaligned addr and validating it
*/
static int
test_validate_unaligned_addr(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_validate_unaligned_addr <file>");
/* needed for source alignment */
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fd);
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* let's set addr which is unaligned */
cfg.addr = (char *)1;
int ret = pmem2_config_validate_addr_alignment(&cfg, src);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_set_wrong_addr_req_type - setting wrong addr request type
*/
static int
test_set_wrong_addr_req_type(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen invalid addr request type */
enum pmem2_address_request_type request_type = 999;
int ret = pmem2_config_set_address(&cfg, NULL, request_type);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE);
return 0;
}
/*
* test_null_addr_noreplace - setting null addr when request type
* PMEM2_ADDRESS_FIXED_NOREPLACE is used
*/
static int
test_null_addr_noreplace(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_address(
&cfg, NULL, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_NULL);
return 0;
}
/*
* test_clear_address - using pmem2_config_clear_address func
*/
static int
test_clear_address(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen value of address and addr request type */
void *addr = (void *)(1024 * 1024);
int ret = pmem2_config_set_address(
&cfg, addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(cfg.addr, NULL);
UT_ASSERTne(cfg.addr_request, PMEM2_ADDRESS_ANY);
pmem2_config_clear_address(&cfg);
UT_ASSERTeq(cfg.addr, NULL);
UT_ASSERTeq(cfg.addr_request, PMEM2_ADDRESS_ANY);
return 0;
}
/*
* test_set_valid_prot_flag -- set valid protection flag
*/
static int
test_set_valid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_READ);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_WRITE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_NONE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg,
PMEM2_PROT_WRITE | PMEM2_PROT_READ | PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_invalid_prot_flag -- set invalid protection flag
*/
static int
test_set_invalid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PROT_WRITE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_PROT_FLAG);
UT_ASSERTeq(cfg.protection_flag, PMEM2_PROT_READ | PMEM2_PROT_WRITE);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_cfg_create_and_delete_valid),
TEST_CASE(test_alloc_cfg_enomem),
TEST_CASE(test_delete_null_config),
TEST_CASE(test_config_set_granularity_valid),
TEST_CASE(test_config_set_granularity_invalid),
TEST_CASE(test_set_offset_too_large),
TEST_CASE(test_set_offset_success),
TEST_CASE(test_set_length_success),
TEST_CASE(test_set_offset_max),
TEST_CASE(test_set_sharing_valid),
TEST_CASE(test_set_sharing_invalid),
TEST_CASE(test_validate_unaligned_addr),
TEST_CASE(test_set_wrong_addr_req_type),
TEST_CASE(test_null_addr_noreplace),
TEST_CASE(test_clear_address),
TEST_CASE(test_set_valid_prot_flag),
TEST_CASE(test_set_invalid_prot_flag),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_config");
util_init();
out_init("pmem2_config", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 9,397 | 22.792405 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_config/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
@g.require_granularity(g.ANY)
class Pmem2Config(t.Test):
test_type = t.Short
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile1')
ctx.exec('pmem2_config', self.test_case, filepath)
@g.no_testdir()
class Pmem2ConfigNoDir(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem2_config', self.test_case)
class TEST0(Pmem2ConfigNoDir):
"""allocation and dealocation of pmem2_config"""
test_case = "test_cfg_create_and_delete_valid"
class TEST1(Pmem2ConfigNoDir):
"""allocation of pmem2_config in case of missing memory in system"""
test_case = "test_alloc_cfg_enomem"
class TEST2(Pmem2ConfigNoDir):
"""deleting null pmem2_config"""
test_case = "test_delete_null_config"
class TEST3(Pmem2ConfigNoDir):
"""set valid granularity in the config"""
test_case = "test_config_set_granularity_valid"
class TEST4(Pmem2ConfigNoDir):
"""set invalid granularity in the config"""
test_case = "test_config_set_granularity_invalid"
class TEST5(Pmem2ConfigNoDir):
"""setting offset which is too large"""
test_case = "test_set_offset_too_large"
class TEST6(Pmem2ConfigNoDir):
"""setting a valid offset"""
test_case = "test_set_offset_success"
class TEST7(Pmem2ConfigNoDir):
"""setting a valid length"""
test_case = "test_set_length_success"
class TEST8(Pmem2ConfigNoDir):
"""setting maximum possible offset"""
test_case = "test_set_offset_max"
class TEST9(Pmem2ConfigNoDir):
"""setting a valid sharing"""
test_case = "test_set_sharing_valid"
class TEST10(Pmem2ConfigNoDir):
"""setting a invalid sharing"""
test_case = "test_set_sharing_invalid"
class TEST11(Pmem2Config):
"""setting unaligned addr and validating it"""
test_case = "test_validate_unaligned_addr"
class TEST12(Pmem2ConfigNoDir):
"""setting wrong addr request type"""
test_case = "test_set_wrong_addr_req_type"
class TEST13(Pmem2ConfigNoDir):
"""
setting null addr when request type PMEM2_ADDRESS_FIXED_NOREPLACE
is used
"""
test_case = "test_null_addr_noreplace"
class TEST14(Pmem2ConfigNoDir):
"""
using pmem2_config_clear_address func
"""
test_case = "test_clear_address"
class TEST15(Pmem2ConfigNoDir):
"""
setting a valid protection flags
"""
test_case = "test_set_valid_prot_flag"
class TEST16(Pmem2ConfigNoDir):
"""
setting a invalid protection flags
"""
test_case = "test_set_invalid_prot_flag"
| 2,697 | 21.483333 | 72 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_trunc/pmem_map_file_trunc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem_map_file_trunc.c -- test for mapping specially crafted files,
* which used to confuse Windows libc to truncate it by 1 byte
*
* See https://github.com/pmem/pmdk/pull/3728 for full description.
*
* usage: pmem_map_file_trunc file
*/
#include "unittest.h"
#define EXPECTED_SIZE (4 * 1024)
/*
* so called "Ctrl-Z" or EOF character
* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen
*/
#define FILL_CHAR 0x1a
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_map_file_trunc");
if (argc < 2)
UT_FATAL("not enough args");
size_t mapped;
int ispmem;
char *p;
os_stat_t st;
p = pmem_map_file(argv[1], EXPECTED_SIZE, PMEM_FILE_CREATE, 0644,
&mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
p[EXPECTED_SIZE - 1] = FILL_CHAR;
pmem_persist(&p[EXPECTED_SIZE - 1], 1);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
p = pmem_map_file(argv[1], 0, 0, 0644, &mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
UT_ASSERTeq(p[EXPECTED_SIZE - 1], FILL_CHAR);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
DONE(NULL);
}
| 1,302 | 20.716667 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_trunc/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
import testframework as t
import os
@t.require_build(['debug', 'release'])
class TEST0(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem_map_file_trunc', os.path.join(ctx.testdir, 'testfile'))
| 316 | 18.8125 | 78 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_backup/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# libpmempool_backup/config.sh -- test configuration
#
# Extend timeout for TEST0, as it may take more than a minute
# when run on a non-pmem file system.
CONF_TIMEOUT[0]='10m'
| 280 | 19.071429 | 61 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_backup/common.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2018, Intel Corporation
#
#
# libpmempool_backup/common.sh -- functions for libpmempool_backup unittest
#
set -e
POOLSET=$DIR/pool.set
BACKUP=_backup
REPLICA=_replica
POOL_PART=$DIR/pool.part
OUT=out${UNITTEST_NUM}.log
OUT_TEMP=out${UNITTEST_NUM}_temp.log
DIFF=diff${UNITTEST_NUM}.log
rm -f $LOG $DIFF $OUT_TEMP && touch $LOG $DIFF $OUT_TEMP
# params for blk, log and obj pools
POOL_TYPES=( blk log obj )
POOL_CREATE_PARAMS=( "--write-layout 512" "" "--layout test_layout" )
POOL_CHECK_PARAMS=( "-smgB" "-s" "-soOaAbZH -l -C" )
POOL_OBJ=2
# create_poolset_variation -- create one from the tested poolset variation
# usage: create_poolset_variation <variation-id> [<suffix>]
#
function create_poolset_variation() {
local sfx=""
local variation=$1
shift
if [ $# -gt 0 ]; then
sfx=$1
fi
case "$variation"
in
1)
# valid poolset file
create_poolset $POOLSET$sfx \
20M:${POOL_PART}1$sfx:x \
20M:${POOL_PART}2$sfx:x \
20M:${POOL_PART}3$sfx:x \
20M:${POOL_PART}4$sfx:x
;;
2)
# valid poolset file with replica
create_poolset $POOLSET$sfx \
20M:${POOL_PART}1$sfx:x \
20M:${POOL_PART}2$sfx:x \
20M:${POOL_PART}3$sfx:x \
20M:${POOL_PART}4$sfx:x \
r 80M:${POOL_PART}${REPLICA}$sfx:x
;;
3)
# other number of parts
create_poolset $POOLSET$sfx \
20M:${POOL_PART}1$sfx:x \
20M:${POOL_PART}2$sfx:x \
40M:${POOL_PART}3$sfx:x
;;
4)
# no poolset
# return without check_file
return
;;
5)
# empty
create_poolset $POOLSET$sfx
;;
6)
# other size of part
create_poolset $POOLSET$sfx \
20M:${POOL_PART}1$sfx:x \
20M:${POOL_PART}2$sfx:x \
20M:${POOL_PART}3$sfx:x \
21M:${POOL_PART}4$sfx:x
;;
esac
check_file $POOLSET$sfx
}
#
# backup_and_compare -- perform backup and compare backup result with original
# if compare parameters are provided
# usage: backup_and_compare <poolset> <type> [<compare-params>]
#
function backup_and_compare () {
local poolset=$1
local type=$2
shift 2
# backup
expect_normal_exit ../libpmempool_api/libpmempool_test$EXESUFFIX \
-b $poolset$BACKUP -t $type -r 1 $poolset
cat $OUT >> $OUT_TEMP
# compare
if [ $# -gt 0 ]; then
compare_replicas "$1" $poolset $poolset$BACKUP >> $DIFF
fi
}
ALL_POOL_PARTS="${POOL_PART}1 ${POOL_PART}2 ${POOL_PART}3 ${POOL_PART}4 \
${POOL_PART}${REPLICA}"
ALL_POOL_BACKUP_PARTS="${POOL_PART}1$BACKUP ${POOL_PART}2$BACKUP \
${POOL_PART}3$BACKUP ${POOL_PART}4$BACKUP \
${POOL_PART}${BACKUP}${REPLICA}"
#
# backup_cleanup -- perform cleanup between test cases
#
function backup_cleanup() {
rm -f $POOLSET$BACKUP $ALL_POOL_PARTS $ALL_POOL_BACKUP_PARTS
}
| 2,691 | 21.621849 | 78 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_include/blk_include.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* blk_include.c -- include test for libpmemblk
*
* this is only a compilation test - do not run this program
*/
#include <libpmemblk.h>
int
main(int argc, char *argv[])
{
return 0;
}
| 275 | 15.235294 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_alloc/obj_alloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_alloc.c -- unit test for pmemobj_alloc and pmemobj_zalloc
*/
#include "unittest.h"
#include "heap.h"
#include <limits.h>
POBJ_LAYOUT_BEGIN(alloc);
POBJ_LAYOUT_ROOT(alloc, struct root);
POBJ_LAYOUT_TOID(alloc, struct object);
POBJ_LAYOUT_END(alloc);
struct object {
size_t value;
char data[];
};
struct root {
TOID(struct object) obj;
char data[CHUNKSIZE - sizeof(TOID(struct object))];
};
static uint64_t
check_int(const char *size_str)
{
uint64_t ret;
switch (*size_str) {
case 'S':
ret = SIZE_MAX;
break;
case 'B':
ret = SIZE_MAX - 1;
break;
case 'O':
ret = sizeof(struct object);
break;
default:
ret = ATOULL(size_str);
}
return ret;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_alloc");
const char *path;
size_t size;
uint64_t type_num;
int is_oid_null;
uint64_t flags;
int expected_return_code;
int expected_errno;
int ret;
if (argc < 8)
UT_FATAL("usage: %s path size type_num is_oid_null flags "
"expected_return_code expected_errno ...", argv[0]);
PMEMobjpool *pop = NULL;
PMEMoid *oidp;
path = argv[1];
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR);
if (pop == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
for (int i = 1; i + 6 < argc; i += 7) {
size = (size_t)check_int(argv[i + 1]);
type_num = check_int(argv[i + 2]);
is_oid_null = ATOI(argv[i + 3]);
flags = ATOULL(argv[i + 4]);
expected_return_code = ATOI(argv[i + 5]);
expected_errno = ATOI(argv[i + 6]);
UT_OUT("%s %zu %lu %d %lu %d %d", path, size, type_num,
is_oid_null, flags, expected_return_code,
expected_errno);
TOID(struct root) root = POBJ_ROOT(pop, struct root);
oidp = &D_RW(root)->obj.oid;
if (is_oid_null) {
TOID_ASSIGN(root, OID_NULL);
oidp = &root.oid;
}
ret = pmemobj_xalloc(
pop, oidp, size, type_num, flags, NULL, NULL);
UT_ASSERTeq(ret, expected_return_code);
if (expected_errno != 0) {
UT_ASSERTeq(errno, expected_errno);
}
if (ret == 0) {
UT_OUT("alloc: %zu, size: %zu", size,
pmemobj_alloc_usable_size(D_RW(root)->obj.oid));
if (is_oid_null == 0) {
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->obj));
UT_ASSERT(pmemobj_alloc_usable_size(
D_RW(root)->obj.oid) >= size);
}
}
pmemobj_free(&D_RW(root)->obj.oid);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
UT_OUT("free");
}
pmemobj_close(pop);
DONE(NULL);
}
| 2,483 | 19.195122 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_ravl/util_ravl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_ravl.c -- unit test for ravl tree
*/
#include <stdint.h>
#include <stdlib.h>
#include "ravl.h"
#include "util.h"
#include "unittest.h"
#include "fault_injection.h"
static int
cmpkey(const void *lhs, const void *rhs)
{
intptr_t l = (intptr_t)lhs;
intptr_t r = (intptr_t)rhs;
return (int)(l - r);
}
static void
test_misc(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)3);
ravl_insert(r, (void *)6);
ravl_insert(r, (void *)1);
ravl_insert(r, (void *)7);
ravl_insert(r, (void *)9);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)8);
ravl_insert(r, (void *)2);
ravl_insert(r, (void *)4);
ravl_insert(r, (void *)10);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_LESS);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_LESS_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)8);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_LESS | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)100, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)1);
n = ravl_find(r, (void *)3, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)7, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)5, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)8, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)2, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)4, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_predicate(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)10);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)5);
ravl_delete(r);
}
static void
test_stress(void)
{
struct ravl *r = ravl_new(cmpkey);
for (int i = 0; i < 1000000; ++i) {
ravl_insert(r, (void *)(uintptr_t)rand());
}
ravl_delete(r);
}
struct foo {
int a;
int b;
int c;
};
static int
cmpfoo(const void *lhs, const void *rhs)
{
const struct foo *l = lhs;
const struct foo *r = rhs;
return ((l->a + l->b + l->c) - (r->a + r->b + r->c));
}
static void
test_emplace(void)
{
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
struct foo a = {1, 2, 3};
struct foo b = {2, 3, 4};
struct foo z = {0, 0, 0};
ravl_emplace_copy(r, &a);
ravl_emplace_copy(r, &b);
struct ravl_node *n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
struct foo *fn = ravl_data(n);
UT_ASSERTeq(fn->a, a.a);
UT_ASSERTeq(fn->b, a.b);
UT_ASSERTeq(fn->c, a.c);
ravl_remove(r, n);
n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
fn = ravl_data(n);
UT_ASSERTeq(fn->a, b.a);
UT_ASSERTeq(fn->b, b.b);
UT_ASSERTeq(fn->c, b.c);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_fault_injection_ravl_sized()
{
if (!core_fault_injection_enabled())
return;
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_sized");
struct ravl *r = ravl_new_sized(NULL, 0);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_fault_injection_ravl_node()
{
if (!core_fault_injection_enabled())
return;
struct foo a = {1, 2, 3};
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
UT_ASSERTne(r, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_node");
int ret = ravl_emplace_copy(r, &a);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ravl");
test_predicate();
test_misc();
test_stress();
test_emplace();
test_fault_injection_ravl_sized();
test_fault_injection_ravl_node();
DONE(NULL);
}
| 5,271 | 20.34413 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mocks_windows.h -- redefinitions of pthread functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_sync test.
* It would replace default implementation with mocked functions defined
* in obj_sync.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_mutex_init __wrap_os_mutex_init
#define os_rwlock_init __wrap_os_rwlock_init
#define os_cond_init __wrap_os_cond_init
#endif
| 2,265 | 41.754717 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/obj_sync.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_sync.c -- unit test for PMEM-resident locks
*/
#include "obj.h"
#include "sync.h"
#include "unittest.h"
#include "sys_util.h"
#include "util.h"
#include "os.h"
#define MAX_THREAD_NUM 200
#define DATA_SIZE 128
#define LOCKED_MUTEX 1
#define NANO_PER_ONE 1000000000LL
#define TIMEOUT (NANO_PER_ONE / 1000LL)
#define WORKER_RUNS 10
#define MAX_OPENS 5
#define FATAL_USAGE() UT_FATAL("usage: obj_sync [mrc] <num_threads> <runs>\n")
/* posix thread worker typedef */
typedef void *(*worker)(void *);
/* the mock pmemobj pool */
static PMEMobjpool Mock_pop;
/* the tested object containing persistent synchronization primitives */
static struct mock_obj {
PMEMmutex mutex;
PMEMmutex mutex_locked;
PMEMcond cond;
PMEMrwlock rwlock;
int check_data;
uint8_t data[DATA_SIZE];
} *Test_obj;
PMEMobjpool *
pmemobj_pool_by_ptr(const void *arg)
{
return &Mock_pop;
}
/*
* mock_open_pool -- (internal) simulate pool opening
*/
static void
mock_open_pool(PMEMobjpool *pop)
{
util_fetch_and_add64(&pop->run_id, 2);
}
/*
* mutex_write_worker -- (internal) write data with mutex
*/
static void *
mutex_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* mutex_check_worker -- (internal) check consistency with mutex
*/
static void *
mutex_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* cond_write_worker -- (internal) write data with cond variable
*/
static void *
cond_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
Test_obj->check_data = 1;
if (pmemobj_cond_signal(&Mock_pop, &Test_obj->cond))
UT_ERR("pmemobj_cond_signal");
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* cond_check_worker -- (internal) check consistency with cond variable
*/
static void *
cond_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
while (Test_obj->check_data != 1) {
if (pmemobj_cond_wait(&Mock_pop, &Test_obj->cond,
&Test_obj->mutex))
UT_ERR("pmemobj_cond_wait");
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* rwlock_write_worker -- (internal) write data with rwlock
*/
static void *
rwlock_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_wrlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_wrlock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* rwlock_check_worker -- (internal) check consistency with rwlock
*/
static void *
rwlock_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_rdlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_rdlock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* timed_write_worker -- (internal) intentionally doing nothing
*/
static void *
timed_write_worker(void *arg)
{
return NULL;
}
/*
* timed_check_worker -- (internal) check consistency with mutex
*/
static void *
timed_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
int mutex_id = (int)(uintptr_t)arg % 2;
PMEMmutex *mtx = mutex_id == LOCKED_MUTEX ?
&Test_obj->mutex_locked : &Test_obj->mutex;
struct timespec t1, t2, abs_time;
os_clock_gettime(CLOCK_REALTIME, &t1);
abs_time = t1;
abs_time.tv_nsec += TIMEOUT;
if (abs_time.tv_nsec >= NANO_PER_ONE) {
abs_time.tv_sec++;
abs_time.tv_nsec -= NANO_PER_ONE;
}
int ret = pmemobj_mutex_timedlock(&Mock_pop, mtx, &abs_time);
os_clock_gettime(CLOCK_REALTIME, &t2);
if (mutex_id == LOCKED_MUTEX) {
UT_ASSERTeq(ret, ETIMEDOUT);
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec) *
NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
return NULL;
}
if (ret == 0) {
UT_ASSERTne(mutex_id, LOCKED_MUTEX);
pmemobj_mutex_unlock(&Mock_pop, mtx);
} else if (ret == ETIMEDOUT) {
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec)
* NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
} else {
errno = ret;
UT_ERR("!pmemobj_mutex_timedlock");
}
}
return NULL;
}
/*
* cleanup -- (internal) clean up after each run
*/
static void
cleanup(char test_type)
{
switch (test_type) {
case 'm':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
break;
case 'r':
util_rwlock_destroy(&((PMEMrwlock_internal *)
&(Test_obj->rwlock))->PMEMrwlock_lock);
break;
case 'c':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_cond_destroy(&((PMEMcond_internal *)
&(Test_obj->cond))->PMEMcond_cond);
break;
case 't':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex_locked))->PMEMmutex_lock);
break;
default:
FATAL_USAGE();
}
}
static int
obj_sync_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
/* no-op */
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_sync");
util_init();
if (argc < 4)
FATAL_USAGE();
worker writer;
worker checker;
char test_type = argv[1][0];
switch (test_type) {
case 'm':
writer = mutex_write_worker;
checker = mutex_check_worker;
break;
case 'r':
writer = rwlock_write_worker;
checker = rwlock_check_worker;
break;
case 'c':
writer = cond_write_worker;
checker = cond_check_worker;
break;
case 't':
writer = timed_write_worker;
checker = timed_check_worker;
break;
default:
FATAL_USAGE();
}
unsigned long num_threads = strtoul(argv[2], NULL, 10);
if (num_threads > MAX_THREAD_NUM)
UT_FATAL("Do not use more than %d threads.\n", MAX_THREAD_NUM);
unsigned long opens = strtoul(argv[3], NULL, 10);
if (opens > MAX_OPENS)
UT_FATAL("Do not use more than %d runs.\n", MAX_OPENS);
os_thread_t *write_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
os_thread_t *check_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
/* first pool open */
mock_open_pool(&Mock_pop);
Mock_pop.p_ops.persist = obj_sync_persist;
Mock_pop.p_ops.base = &Mock_pop;
Test_obj = (struct mock_obj *)MALLOC(sizeof(struct mock_obj));
/* zero-initialize the test object */
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex);
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex_locked);
pmemobj_cond_zero(&Mock_pop, &Test_obj->cond);
pmemobj_rwlock_zero(&Mock_pop, &Test_obj->rwlock);
Test_obj->check_data = 0;
memset(&Test_obj->data, 0, DATA_SIZE);
for (unsigned long run = 0; run < opens; run++) {
if (test_type == 't') {
pmemobj_mutex_lock(&Mock_pop,
&Test_obj->mutex_locked);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_CREATE(&write_threads[i], NULL, writer,
(void *)(uintptr_t)i);
THREAD_CREATE(&check_threads[i], NULL, checker,
(void *)(uintptr_t)i);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_JOIN(&write_threads[i], NULL);
THREAD_JOIN(&check_threads[i], NULL);
}
if (test_type == 't') {
pmemobj_mutex_unlock(&Mock_pop,
&Test_obj->mutex_locked);
}
/* up the run_id counter and cleanup */
mock_open_pool(&Mock_pop);
cleanup(test_type);
}
FREE(check_threads);
FREE(write_threads);
FREE(Test_obj);
DONE(NULL);
}
| 8,776 | 21.97644 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/mocks_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* mock-windows.c -- redefinitions of locks function
*/
#include "os.h"
#include "unittest.h"
FUNC_MOCK(os_mutex_init, int,
os_mutex_t *__restrict mutex)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(os_mutex_init, mutex)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(os_rwlock_init, int,
os_rwlock_t *__restrict rwlock)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(os_rwlock_init, rwlock)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(os_cond_init, int,
os_cond_t *__restrict cond)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(os_cond_init, cond)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
| 687 | 17.594595 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/mocks_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- redefinitions of lock functions (Posix implementation)
*/
#include <pthread.h>
#include "util.h"
#include "os.h"
#include "unittest.h"
FUNC_MOCK(pthread_mutex_init, int,
pthread_mutex_t *__restrict mutex,
const pthread_mutexattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_mutex_init, mutex, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_rwlock_init, int,
pthread_rwlock_t *__restrict rwlock,
const pthread_rwlockattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_rwlock_init, rwlock, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_cond_init, int,
pthread_cond_t *__restrict cond,
const pthread_condattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_cond_init, cond, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
| 950 | 22.775 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/out_err_mt_win/out_err_mt_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* out_err_mt_win.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const wchar_t *msg)
{
UT_OUT("%S", msg);
UT_OUT("PMEM: %S", pmem_errormsgW());
UT_OUT("PMEMOBJ: %S", pmemobj_errormsgW());
UT_OUT("PMEMLOG: %S", pmemlog_errormsgW());
UT_OUT("PMEMBLK: %S", pmemblk_errormsgW());
UT_OUT("PMEMPOOL: %S", pmempool_errormsgW());
}
static void
check_errors(int ver)
{
int ret;
int err_need;
int err_found;
ret = swscanf(pmem_errormsgW(),
L"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = swscanf(pmemobj_errormsgW(),
L"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = swscanf(pmemlog_errormsgW(),
L"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = swscanf(pmemblk_errormsgW(),
L"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = swscanf(pmempool_errormsgW(),
L"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
int ver = *(int *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
int ver[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "out_err_mt_win");
if (argc != 6)
UT_FATAL("usage: %S file1 file2 file3 file4 dir",
argv[0]);
print_errors(L"start");
PMEMobjpool *pop = pmemobj_createW(argv[1], L"test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_createW(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_createW(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors(L"version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors(L"pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors(L"pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors(L"pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, nblock + 1);
print_errors(L"pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {0, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors(L"pmempool_check_init");
DONEW(NULL);
}
| 3,844 | 22.30303 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_oid_thread/obj_oid_thread.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_oid_thread.c -- unit test for the reverse direct operation
*/
#include "unittest.h"
#include "lane.h"
#include "obj.h"
#include "sys_util.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock;
static os_cond_t cond;
static int flag = 1;
static PMEMoid thread_oid;
/*
* test_worker -- (internal) test worker thread
*/
static void *
test_worker(void *arg)
{
util_mutex_lock(&lock);
/* before pool is closed */
void *direct = pmemobj_direct(thread_oid);
UT_ASSERT(OID_EQUALS(thread_oid, pmemobj_oid(direct)));
flag = 0;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
util_mutex_lock(&lock);
while (flag == 0)
os_cond_wait(&cond, &lock);
/* after pool is closed */
UT_ASSERT(OID_IS_NULL(pmemobj_oid(direct)));
util_mutex_unlock(&lock);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_oid_thread");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
util_mutex_init(&lock);
util_cond_init(&cond);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMoid *));
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
/* Address outside the pmemobj pool */
void *allocated_memory = MALLOC(sizeof(int));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(allocated_memory)));
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(NULL)));
oids[0] = OID_NULL;
for (unsigned i = 0; i < npools; ++i) {
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERT(OID_EQUALS(oids[i],
pmemobj_oid(pmemobj_direct(oids[i]))));
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(OID_EQUALS(tmpoids[i],
pmemobj_oid(pmemobj_direct(tmpoids[i]))));
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(!OID_IS_NULL(pmemobj_oid(pmemobj_direct(thread_oid))));
util_mutex_lock(&lock);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the thread to perform the first direct */
while (flag != 0)
os_cond_wait(&cond, &lock);
for (unsigned i = 0; i < npools; ++i) {
pmemobj_free(&tmpoids[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(tmpoids[i]))));
pmemobj_close(pops[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(oids[i]))));
}
/* signal the waiting thread */
flag = 1;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
THREAD_JOIN(&t, NULL);
FREE(path);
FREE(tmpoids);
FREE(oids);
FREE(pops);
FREE(allocated_memory);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
DONE(NULL);
}
| 3,186 | 21.602837 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_file_open/util_file_open.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* util_file_open.c -- unit test for util_file_open()
*
* usage: util_file_open minlen path [path]...
*/
#include "unittest.h"
#include "file.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_file_open");
if (argc < 3)
UT_FATAL("usage: %s minlen path...", argv[0]);
char *fname;
size_t minsize = strtoul(argv[1], &fname, 0);
for (int arg = 2; arg < argc; arg++) {
size_t size = 0;
int fd = util_file_open(argv[arg], &size, minsize, O_RDWR);
if (fd == -1)
UT_OUT("!%s: util_file_open", argv[arg]);
else {
UT_OUT("%s: open, len %zu", argv[arg], size);
os_close(fd);
}
}
DONE(NULL);
}
| 722 | 18.026316 | 61 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libvmmalloc/README.md
|
This library has been moved to a
[separate repository](https://github.com/pmem/vmem).
| 86 | 28 | 52 |
md
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.